You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

118 lines
4.5 KiB

13 years ago
from bs4 import BeautifulSoup
13 years ago
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode, \
simplifyString
13 years ago
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, getTitle
13 years ago
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
class NZBClub(NZBProvider, RSS):
urls = {
'search': 'https://www.nzbclub.com/nzbfeed.aspx?%s',
13 years ago
}
http_time_between_calls = 3 #seconds
13 years ago
def search(self, movie, quality):
results = []
if self.isDisabled():
13 years ago
return results
q = '"%s" %s %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier'))
13 years ago
for ignored in Env.setting('ignored_words', 'searcher').split(','):
q = '%s -%s' % (q, ignored.strip())
params = {
'q': q,
'ig': '1',
'rpp': 200,
'st': 1,
'sp': 1,
'ns': 1,
}
cache_key = 'nzbclub.%s.%s.%s' % (movie['library']['identifier'], quality.get('identifier'), q)
data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params))
13 years ago
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s' % (self.getName(), e))
return results
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
if 'ARCHIVE inside ARCHIVE' in full_description:
log.info('Wrong: Seems to be passworded files: %s' % new['name'])
return False
return True
13 years ago
new = {
'id': nzbclub_id,
'type': 'nzb',
'provider': self.getName(),
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'download': self.download,
13 years ago
'detail_url': self.getTextElement(nzb, "link"),
'description': '',
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
13 years ago
}
13 years ago
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single_category = False, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
13 years ago
results.append(new)
self.found(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from NZBClub')
return results
def getMoreInfo(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('pre', attrs = {'class':'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def extraCheck(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
if 'ARCHIVE inside ARCHIVE' in full_description:
log.info('Wrong: Seems to be passworded files: %s' % item['name'])
return False
return True