Browse Source

Don't use quality identifier in title searches

pull/1316/head
Ruud 12 years ago
parent
commit
61a0bb8ec6
  1. 5
      couchpotato/core/providers/nzb/binsearch/main.py
  2. 6
      couchpotato/core/providers/nzb/nzbclub/main.py
  3. 2
      couchpotato/core/providers/nzb/nzbindex/main.py
  4. 2
      couchpotato/core/providers/torrent/scenehd/main.py
  5. 94
      couchpotato/core/providers/torrent/thepiratebay/main.py
  6. 2
      couchpotato/core/providers/torrent/torrentleech/main.py

5
couchpotato/core/providers/nzb/binsearch/main.py

@ -22,11 +22,10 @@ class BinSearch(NZBProvider):
def _search(self, movie, quality, results): def _search(self, movie, quality, results):
q = '%s %s' % (movie['library']['identifier'], quality.get('identifier'))
arguments = tryUrlencode({ arguments = tryUrlencode({
'q': q, 'q': movie['library']['identifier'],
'm': 'n', 'm': 'n',
'max': 250, 'max': 400,
'adv_age': Env.setting('retention', 'nzb'), 'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date', 'adv_sort': 'date',
'adv_col': 'on', 'adv_col': 'on',

6
couchpotato/core/providers/nzb/nzbclub/main.py

@ -20,13 +20,13 @@ class NZBClub(NZBProvider, RSS):
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (title, movie['library']['year'], quality.get('identifier')) q = '"%s %s"' % (title, movie['library']['year'])
params = tryUrlencode({ params = tryUrlencode({
'q': q, 'q': q,
'ig': '1', 'ig': 1,
'rpp': 200, 'rpp': 200,
'st': 1, 'st': 5,
'sp': 1, 'sp': 1,
'ns': 1, 'ns': 1,
}) })

2
couchpotato/core/providers/nzb/nzbindex/main.py

@ -23,7 +23,7 @@ class NzbIndex(NZBProvider, RSS):
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s %s' % (title, movie['library']['year'], quality.get('identifier')) q = '"%s %s"' % (title, movie['library']['year'])
arguments = tryUrlencode({ arguments = tryUrlencode({
'q': q, 'q': q,
'age': Env.setting('retention', 'nzb'), 'age': Env.setting('retention', 'nzb'),

2
couchpotato/core/providers/torrent/scenehd/main.py

@ -22,7 +22,7 @@ class SceneHD(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (simplifyString(title), movie['library']['year'], quality.get('identifier')) q = '"%s %s"' % (simplifyString(title), movie['library']['year'])
arguments = tryUrlencode({ arguments = tryUrlencode({
'search': q, 'search': q,
}) })

94
couchpotato/core/providers/torrent/thepiratebay/main.py

@ -15,7 +15,7 @@ class ThePirateBay(TorrentMagnetProvider):
urls = { urls = {
'detail': '%s/torrent/%s', 'detail': '%s/torrent/%s',
'search': '%s/search/%s/0/7/%d' 'search': '%s/search/%s/%s/7/%d'
} }
cat_ids = [ cat_ids = [
@ -45,52 +45,66 @@ class ThePirateBay(TorrentMagnetProvider):
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode(title + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0]) page = 0
total_pages = 1
data = self.getHTMLData(search_url) while page < total_pages:
if data: search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s %s"' % (title, movie['library']['year'])), page, self.getCatId(quality['identifier'])[0])
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table: data = self.getHTMLData(search_url)
return
entries = results_table.find_all('tr') if data:
for result in entries[2:]: try:
link = result.find(href = re.compile('torrent\/\d+\/')) soup = BeautifulSoup(data)
download = result.find(href = re.compile('magnet:')) results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return
try: try:
size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size') total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
page += 1
except: except:
continue pass
if link and download: print total_pages, page
def extra_score(item): entries = results_table.find_all('tr')
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None] for result in entries[2:]:
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None] link = result.find(href = re.compile('torrent\/\d+\/'))
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None] download = result.find(href = re.compile('magnet:'))
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None]
try:
return confirmed + trusted + vip + moderated size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size')
except:
results.append({ continue
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': link.string, if link and download:
'url': download['href'],
'detail_url': self.getDomain(link['href']), def extra_score(item):
'size': self.parseSize(size), trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None]
'seeders': tryInt(result.find_all('td')[2].string), vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None]
'leechers': tryInt(result.find_all('td')[3].string), confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None]
'extra_score': extra_score, moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None]
'get_more_info': self.getMoreInfo
}) return confirmed + trusted + vip + moderated
except: results.append({
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) 'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': link.string,
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self): def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain() return super(ThePirateBay, self).isEnabled() and self.getDomain()

2
couchpotato/core/providers/torrent/torrentleech/main.py

@ -34,7 +34,7 @@ class TorrentLeech(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % (tryUrlencode(title.replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0]) url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener) data = self.getHTMLData(url, opener = self.login_opener)
if data: if data:

Loading…
Cancel
Save