Browse Source

Don't use quality identifier in title searches

pull/1316/head
Ruud 12 years ago
parent
commit
61a0bb8ec6
  1. 5
      couchpotato/core/providers/nzb/binsearch/main.py
  2. 6
      couchpotato/core/providers/nzb/nzbclub/main.py
  3. 2
      couchpotato/core/providers/nzb/nzbindex/main.py
  4. 2
      couchpotato/core/providers/torrent/scenehd/main.py
  5. 94
      couchpotato/core/providers/torrent/thepiratebay/main.py
  6. 2
      couchpotato/core/providers/torrent/torrentleech/main.py

5
couchpotato/core/providers/nzb/binsearch/main.py

@ -22,11 +22,10 @@ class BinSearch(NZBProvider):
def _search(self, movie, quality, results):
q = '%s %s' % (movie['library']['identifier'], quality.get('identifier'))
arguments = tryUrlencode({
'q': q,
'q': movie['library']['identifier'],
'm': 'n',
'max': 250,
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',

6
couchpotato/core/providers/nzb/nzbclub/main.py

@ -20,13 +20,13 @@ class NZBClub(NZBProvider, RSS):
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (title, movie['library']['year'], quality.get('identifier'))
q = '"%s %s"' % (title, movie['library']['year'])
params = tryUrlencode({
'q': q,
'ig': '1',
'ig': 1,
'rpp': 200,
'st': 1,
'st': 5,
'sp': 1,
'ns': 1,
})

2
couchpotato/core/providers/nzb/nzbindex/main.py

@ -23,7 +23,7 @@ class NzbIndex(NZBProvider, RSS):
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s %s' % (title, movie['library']['year'], quality.get('identifier'))
q = '"%s %s"' % (title, movie['library']['year'])
arguments = tryUrlencode({
'q': q,
'age': Env.setting('retention', 'nzb'),

2
couchpotato/core/providers/torrent/scenehd/main.py

@ -22,7 +22,7 @@ class SceneHD(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (simplifyString(title), movie['library']['year'], quality.get('identifier'))
q = '"%s %s"' % (simplifyString(title), movie['library']['year'])
arguments = tryUrlencode({
'search': q,
})

94
couchpotato/core/providers/torrent/thepiratebay/main.py

@ -15,7 +15,7 @@ class ThePirateBay(TorrentMagnetProvider):
urls = {
'detail': '%s/torrent/%s',
'search': '%s/search/%s/0/7/%d'
'search': '%s/search/%s/%s/7/%d'
}
cat_ids = [
@ -45,52 +45,66 @@ class ThePirateBay(TorrentMagnetProvider):
def _searchOnTitle(self, title, movie, quality, results):
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode(title + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
page = 0
total_pages = 1
data = self.getHTMLData(search_url)
while page < total_pages:
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s %s"' % (title, movie['library']['year'])), page, self.getCatId(quality['identifier'])[0])
if not results_table:
return
data = self.getHTMLData(search_url)
entries = results_table.find_all('tr')
for result in entries[2:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return
try:
size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size')
total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
page += 1
except:
continue
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None]
return confirmed + trusted + vip + moderated
results.append({
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': link.string,
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
pass
print total_pages, page
entries = results_table.find_all('tr')
for result in entries[2:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
try:
size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size')
except:
continue
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None]
return confirmed + trusted + vip + moderated
results.append({
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': link.string,
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain()

2
couchpotato/core/providers/torrent/torrentleech/main.py

@ -34,7 +34,7 @@ class TorrentLeech(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % (tryUrlencode(title.replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener)
if data:

Loading…
Cancel
Save