diff --git a/.gitignore b/.gitignore index 1d21d2e..78fda6d 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,5 @@ nosetests.xml # Visual Studio /.vs + +.DS_Store diff --git a/couchpotato/core/downloaders/sabnzbd.py b/couchpotato/core/downloaders/sabnzbd.py index 0f82618..47c94ad 100644 --- a/couchpotato/core/downloaders/sabnzbd.py +++ b/couchpotato/core/downloaders/sabnzbd.py @@ -100,7 +100,7 @@ class Sabnzbd(DownloaderBase): # the version check will work even with wrong api key, so we need the next check as well sab_data = self.call({ - 'mode': 'qstatus', + 'mode': 'queue', }) if not sab_data: return False diff --git a/couchpotato/core/downloaders/transmission.py b/couchpotato/core/downloaders/transmission.py index f40b955..4bb7dbe 100644 --- a/couchpotato/core/downloaders/transmission.py +++ b/couchpotato/core/downloaders/transmission.py @@ -143,12 +143,21 @@ class Transmission(DownloaderBase): log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s', (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir'])) + """ + https://trac.transmissionbt.com/browser/branches/2.8x/libtransmission/transmission.h#L1853 + 0 = Torrent is stopped + 1 = Queued to check files + 2 = Checking files + 3 = Queued to download + 4 = Downloading + 5 = Queued to seed + 6 = Seeding + """ + status = 'busy' if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'): status = 'failed' - elif torrent['status'] == 0 and torrent['percentDone'] == 1: - status = 'completed' - elif torrent['status'] == 16 and torrent['percentDone'] == 1: + elif torrent['status'] == 0 and torrent['percentDone'] == 1 and torrent['isFinished']: status = 'completed' elif torrent['status'] in [5, 6]: status = 'seeding' diff --git a/couchpotato/core/media/_base/providers/torrent/bithdtv.py b/couchpotato/core/media/_base/providers/torrent/bithdtv.py index 149d7c1..a3eb1d9 100644 --- a/couchpotato/core/media/_base/providers/torrent/bithdtv.py +++ b/couchpotato/core/media/_base/providers/torrent/bithdtv.py @@ -13,9 +13,6 @@ log = CPLog(__name__) class Base(TorrentProvider): urls = { - 'test': 'https://www.bit-hdtv.com/', - 'login': 'https://www.bit-hdtv.com/takelogin.php', - 'login_check': 'https://www.bit-hdtv.com/messages.php', 'detail': 'https://www.bit-hdtv.com/details.php?id=%s', 'search': 'https://www.bit-hdtv.com/torrents.php?', 'download': 'https://www.bit-hdtv.com/download.php?id=%s', @@ -31,7 +28,7 @@ class Base(TorrentProvider): url = "%s&%s" % (self.urls['search'], query) - data = self.getHTMLData(url) + data = self.getHTMLData(url, headers = self.getRequestHeaders()) if data: # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML @@ -42,11 +39,12 @@ class Base(TorrentProvider): html = BeautifulSoup(data, 'html.parser') try: - result_tables = html.find_all('table', attrs = {'width': '750', 'class': ''}) + result_tables = html.find_all('table', attrs = {'width': '800', 'class': ''}) if result_tables is None: return - result_table = result_tables[1] + # Take first result + result_table = result_tables[0] if result_table is None: return @@ -72,10 +70,10 @@ class Base(TorrentProvider): except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - def getLoginParams(self): + def getRequestHeaders(self): + cookies = 'h_sl={};h_sp={};h_su={}'.format(self.conf('cookiesettingsl') or '', self.conf('cookiesettingsp') or '', self.conf('cookiesettingsu') or '') return { - 'username': self.conf('username'), - 'password': self.conf('password'), + 'Cookie': cookies } def getMoreInfo(self, item): @@ -87,11 +85,13 @@ class Base(TorrentProvider): item['description'] = description return item - def loginSuccess(self, output): - return 'logout.php' in output.lower() - - loginCheckSuccess = loginSuccess + def download(self, url = '', nzb_id = ''): + try: + return self.urlopen(url, headers=self.getRequestHeaders()) + except: + log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) + return 'try_next' config = [{ 'name': 'bithdtv', @@ -110,13 +110,22 @@ config = [{ 'default': False, }, { - 'name': 'username', + 'name': 'cookiesettingsl', + 'label': 'Cookies (h_sl)', + 'default': '', + 'description': 'Cookie h_sl from session', + }, + { + 'name': 'cookiesettingsp', + 'label': 'Cookies (h_sp)', 'default': '', + 'description': 'Cookie h_sp from session', }, { - 'name': 'password', + 'name': 'cookiesettingsu', + 'label': 'Cookies (h_su)', 'default': '', - 'type': 'password', + 'description': 'Cookie h_su from session', }, { 'name': 'seed_ratio', diff --git a/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py index c96afd1..6a13ff4 100644 --- a/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py +++ b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py @@ -73,6 +73,8 @@ class Base(TorrentProvider): torrentdesc += ' Scene' if self.conf('prefer_scene'): torrentscore += 2000 + if self.conf('no_scene'): + torrentscore -= 2000 if 'RemasterTitle' in torrent and torrent['RemasterTitle']: torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle']) @@ -258,6 +260,14 @@ config = [{ 'description': 'Favors scene-releases over non-scene releases.' }, { + 'name': 'no_scene', + 'advanced': True, + 'type': 'bool', + 'label': 'Reject scene', + 'default': 0, + 'description': 'Reject scene-releases over non-scene releases.' + }, + { 'name': 'require_approval', 'advanced': True, 'type': 'bool', diff --git a/couchpotato/core/media/_base/providers/torrent/torrentz.py b/couchpotato/core/media/_base/providers/torrent/torrentz.py index 8412a8d..96e8025 100644 --- a/couchpotato/core/media/_base/providers/torrent/torrentz.py +++ b/couchpotato/core/media/_base/providers/torrent/torrentz.py @@ -15,25 +15,19 @@ log = CPLog(__name__) class Base(TorrentMagnetProvider, RSS): urls = { - 'detail': 'https://torrentz.eu/%s', - 'search': 'https://torrentz.eu/feed?q=%s', - 'verified_search': 'https://torrentz.eu/feed_verified?q=%s' + 'detail': 'https://torrentz2.eu/%s', + 'search': 'https://torrentz2.eu/feed?f=%s' } http_time_between_calls = 0 def _searchOnTitle(self, title, media, quality, results): - search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search'] + search_url = self.urls['search'] # Create search parameters search_params = self.buildUrl(title, media, quality) - smin = quality.get('size_min') - smax = quality.get('size_max') - if smin and smax: - search_params += ' size %sm - %sm' % (smin, smax) - min_seeds = tryInt(self.conf('minimal_seeds')) if min_seeds: search_params += ' seed > %s' % (min_seeds - 1) @@ -52,17 +46,24 @@ class Base(TorrentMagnetProvider, RSS): magnet = splitString(detail_url, '/')[-1] magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce')) - reg = re.search('Size: (?P\d+) MB Seeds: (?P[\d,]+) Peers: (?P[\d,]+)', six.text_type(description)) + reg = re.search('Size: (?P\d+) (?P[KMG]B) Seeds: (?P[\d,]+) Peers: (?P[\d,]+)', six.text_type(description)) size = reg.group('size') + unit = reg.group('unit') seeds = reg.group('seeds').replace(',', '') peers = reg.group('peers').replace(',', '') + multiplier = 1 + if unit == 'GB': + multiplier = 1000 + elif unit == 'KB': + multiplier = 0 + results.append({ 'id': magnet, 'name': six.text_type(name), 'url': magnet_url, 'detail_url': detail_url, - 'size': tryInt(size), + 'size': tryInt(size)*multiplier, 'seeders': tryInt(seeds), 'leechers': tryInt(peers), }) @@ -78,7 +79,7 @@ config = [{ 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'Torrentz', - 'description': 'Torrentz is a free, fast and powerful meta-search engine. Torrentz', + 'description': 'Torrentz.eu was a free, fast and powerful meta-search engine combining results from dozens of search engines, Torrentz2.eu is trying to replace it. Torrentz2', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC', 'options': [ @@ -88,13 +89,6 @@ config = [{ 'default': True }, { - 'name': 'verified_only', - 'type': 'bool', - 'default': True, - 'advanced': True, - 'description': 'Only search verified releases', - }, - { 'name': 'minimal_seeds', 'type': 'int', 'default': 1, diff --git a/couchpotato/core/media/_base/providers/torrent/yts.py b/couchpotato/core/media/_base/providers/torrent/yts.py index 188f9e5..674adc3 100644 --- a/couchpotato/core/media/_base/providers/torrent/yts.py +++ b/couchpotato/core/media/_base/providers/torrent/yts.py @@ -11,8 +11,8 @@ class Base(TorrentMagnetProvider): # Only qualities allowed: 720p/1080p/3D - the rest will fail. # All YTS.ag torrents are verified urls = { - 'detail': 'https://yts.ag/api#list_movies', - 'search': 'https://yts.ag/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s' + 'detail': 'https://yts.am/api#list_movies', + 'search': 'https://yts.am/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s' } def _search(self, movie, quality, results): diff --git a/couchpotato/core/media/movie/_base/static/details.js b/couchpotato/core/media/movie/_base/static/details.js index 127e260..dd156ba 100644 --- a/couchpotato/core/media/movie/_base/static/details.js +++ b/couchpotato/core/media/movie/_base/static/details.js @@ -77,7 +77,6 @@ var MovieDetails = new Class({ 'class': parent.get('title') == t ? 'icon-ok' : '' })); }); - }, addSection: function(name, section_el){ @@ -101,7 +100,7 @@ var MovieDetails = new Class({ var self = this; self.el.addClass('show'); - + document.onkeyup = self.keyup.bind(self); //if(!App.mobile_screen){ // $(self.content).getElements('> .head, > .section').each(function(section, nr){ // dynamics.css(section, { @@ -130,12 +129,19 @@ var MovieDetails = new Class({ }, + keyup: function(e) { + if (e.keyCode == 27 /* Esc */) { + this.close(); + } + }, + close: function(){ var self = this; var ended = function() { self.el.dispose(); self.overlay.removeEventListener('transitionend', ended); + document.onkeyup = null; }; self.overlay.addEventListener('transitionend', ended, false); @@ -165,5 +171,4 @@ var MovieDetails = new Class({ App.removeEvent('history.push', self.outer_click); } - }); diff --git a/couchpotato/core/media/movie/providers/automation/letterboxd.py b/couchpotato/core/media/movie/providers/automation/letterboxd.py index e1fcddb..072c416 100644 --- a/couchpotato/core/media/movie/providers/automation/letterboxd.py +++ b/couchpotato/core/media/movie/providers/automation/letterboxd.py @@ -13,7 +13,7 @@ autoload = 'Letterboxd' class Letterboxd(Automation): - url = 'http://letterboxd.com/%s/watchlist/' + url = 'http://letterboxd.com/%s/watchlist/page/%d/' pattern = re.compile(r'(.*)\((\d*)\)') interval = 1800 @@ -46,18 +46,30 @@ class Letterboxd(Automation): if not enablers[index]: continue - soup = BeautifulSoup(self.getHTMLData(self.url % username)) + soup = BeautifulSoup(self.getHTMLData(self.url % (username, 1))) - for movie in soup.find_all('li', attrs = {'class': 'poster-container'}): - img = movie.find('img', movie) - title = img.get('alt') + pagination = soup.find_all('li', attrs={'class': 'paginate-page'}) + number_of_pages = tryInt(pagination[-1].find('a').get_text()) if pagination else 1 + pages = range(1, number_of_pages) - movies.append({ - 'title': title - }) + for page in pages: + soup = BeautifulSoup(self.getHTMLData(self.url % (username, page))) + movies += self.getMoviesFromHTML(soup) return movies + def getMoviesFromHTML(self, html): + movies = [] + + for movie in html.find_all('li', attrs={'class': 'poster-container'}): + img = movie.find('img') + title = img.get('alt') + + movies.append({ + 'title': title + }) + + return movies config = [{ 'name': 'letterboxd', diff --git a/couchpotato/core/media/movie/providers/metadata/xbmc.py b/couchpotato/core/media/movie/providers/metadata/xbmc.py index 3031403..acca4aa 100644 --- a/couchpotato/core/media/movie/providers/metadata/xbmc.py +++ b/couchpotato/core/media/movie/providers/metadata/xbmc.py @@ -3,6 +3,7 @@ import os import re import traceback import xml.dom.minidom +import time from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData from couchpotato.core.helpers.encoding import toUnicode @@ -92,7 +93,7 @@ class XBMC(MovieMetaData): pass # Other values - types = ['year', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] + types = ['year', 'originaltitle:original_title', 'outline', 'plot', 'tagline'] for type in types: if ':' in type: @@ -107,6 +108,14 @@ class XBMC(MovieMetaData): except: pass + # Release date + try: + if movie_info.get('released'): + el = SubElement(nfoxml, 'premiered') + el.text = time.strftime('%Y-%m-%d', time.strptime(movie_info.get('released'), '%d %b %Y')) + except: + log.debug('Failed to parse release date %s: %s', (movie_info.get('released'), traceback.format_exc())) + # Rating for rating_type in ['imdb', 'rotten', 'tmdb']: try: diff --git a/couchpotato/core/media/movie/providers/nzb/binsearch.py b/couchpotato/core/media/movie/providers/nzb/binsearch.py index d6f4852..b3e59c0 100644 --- a/couchpotato/core/media/movie/providers/nzb/binsearch.py +++ b/couchpotato/core/media/movie/providers/nzb/binsearch.py @@ -21,7 +21,7 @@ class BinSearch(MovieProvider, Base): 'adv_sort': 'date', 'adv_col': 'on', 'adv_nfo': 'on', - 'minsize': quality.get('size_min'), - 'maxsize': quality.get('size_max'), + 'xminsize': quality.get('size_min'), + 'xmaxsize': quality.get('size_max'), }) return query diff --git a/couchpotato/core/media/movie/providers/torrent/alpharatio.py b/couchpotato/core/media/movie/providers/torrent/alpharatio.py index e7f39c1..762ef47 100644 --- a/couchpotato/core/media/movie/providers/torrent/alpharatio.py +++ b/couchpotato/core/media/movie/providers/torrent/alpharatio.py @@ -19,7 +19,7 @@ class AlphaRatio(MovieProvider, Base): cat_ids = [ ([7, 9], ['bd50']), - ([7, 9], ['720p', '1080p']), + ([7, 9], ['720p', '1080p', '2160p']), ([6, 8], ['dvdr']), ([6, 8], ['brrip', 'dvdrip']), ] diff --git a/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py b/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py index 5e92eef..b2edf4d 100644 --- a/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py +++ b/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py @@ -10,7 +10,7 @@ autoload = 'PassThePopcorn' class PassThePopcorn(MovieProvider, Base): quality_search_params = { - '2160p': {'resolution': '4K'}, + '2160p': {'resolution': '2160p'}, 'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, '1080p': {'resolution': '1080p'}, '720p': {'resolution': '720p'}, @@ -25,7 +25,7 @@ class PassThePopcorn(MovieProvider, Base): } post_search_filters = { - '2160p': {'Resolution': ['4K']}, + '2160p': {'Resolution': ['2160p']}, 'bd50': {'Codec': ['BD50']}, '1080p': {'Resolution': ['1080p']}, '720p': {'Resolution': ['720p']}, diff --git a/couchpotato/core/media/movie/providers/torrent/torrentleech.py b/couchpotato/core/media/movie/providers/torrent/torrentleech.py index eea74f8..bfa5cd1 100644 --- a/couchpotato/core/media/movie/providers/torrent/torrentleech.py +++ b/couchpotato/core/media/movie/providers/torrent/torrentleech.py @@ -11,12 +11,14 @@ autoload = 'TorrentLeech' class TorrentLeech(MovieProvider, Base): cat_ids = [ - ([13], ['720p', '1080p', 'bd50']), + ([41, 47], ['2160p']), + ([13, 14, 37, 43], ['720p', '1080p']), + ([13], ['bd50']), ([8], ['cam']), ([9], ['ts', 'tc']), - ([10], ['r5', 'scr']), + ([10, 11, 37], ['r5', 'scr']), ([11], ['dvdrip']), - ([13, 14], ['brrip']), + ([13, 14, 37, 43], ['brrip']), ([12], ['dvdr']), ] diff --git a/couchpotato/core/media/movie/providers/torrent/torrentz.py b/couchpotato/core/media/movie/providers/torrent/torrentz.py index 011ec43..d1294e6 100644 --- a/couchpotato/core/media/movie/providers/torrent/torrentz.py +++ b/couchpotato/core/media/movie/providers/torrent/torrentz.py @@ -11,4 +11,4 @@ autoload = 'Torrentz' class Torrentz(MovieProvider, Base): def buildUrl(self, title, media, quality): - return tryUrlencode('"%s %s"' % (title, media['info']['year'])) \ No newline at end of file + return tryUrlencode('%s %s' % (title, media['info']['year'])) diff --git a/couchpotato/core/plugins/renamer.py b/couchpotato/core/plugins/renamer.py index 4163383..43f3d5e 100755 --- a/couchpotato/core/plugins/renamer.py +++ b/couchpotato/core/plugins/renamer.py @@ -839,7 +839,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) if use_default: move_type = self.conf('default_file_action') - if move_type not in ['copy', 'link']: + if move_type not in ['copy', 'link', 'symlink_reversed']: try: log.info('Moving "%s" to "%s"', (old, dest)) shutil.move(old, dest) @@ -856,6 +856,16 @@ Remove it if you want it to be renamed (again, or at least let it try again) elif move_type == 'copy': log.info('Copying "%s" to "%s"', (old, dest)) shutil.copy(old, dest) + elif move_type == 'symlink_reversed': + log.info('Reverse symlink "%s" to "%s"', (old, dest)) + try: + shutil.move(old, dest) + except: + log.error('Moving "%s" to "%s" went wrong: %s', (old, dest, traceback.format_exc())) + try: + symlink(dest, old) + except: + log.error('Error while linking "%s" back to "%s": %s', (dest, old, traceback.format_exc())) else: log.info('Linking "%s" to "%s"', (old, dest)) # First try to hardlink @@ -863,7 +873,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) log.debug('Hardlinking file "%s" to "%s"...', (old, dest)) link(old, dest) except: - # Try to simlink next + # Try to symlink next log.debug('Couldn\'t hardlink file "%s" to "%s". Symlinking instead. Error: %s.', (old, dest, traceback.format_exc())) shutil.copy(old, dest) try: @@ -1115,10 +1125,10 @@ Remove it if you want it to be renamed (again, or at least let it try again) for release_download in scan_releases: # Ask the renamer to scan the item if release_download['scan']: - if release_download['pause'] and self.conf('file_action') == 'link': + if release_download['pause'] and self.conf('file_action') in ['link', "symlink_reversed"]: fireEvent('download.pause', release_download = release_download, pause = True, single = True) self.scan(release_download = release_download) - if release_download['pause'] and self.conf('file_action') == 'link': + if release_download['pause'] and self.conf('file_action') in ['link', "symlink_reversed"]: fireEvent('download.pause', release_download = release_download, pause = False, single = True) if release_download['process_complete']: # First make sure the files were successfully processed @@ -1171,7 +1181,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) return src in group['before_rename'] def moveTypeIsLinked(self): - return self.conf('default_file_action') in ['copy', 'link'] + return self.conf('default_file_action') in ['copy', 'link', "symlink_reversed"] def statusInfoComplete(self, release_download): return release_download.get('id') and release_download.get('downloader') and release_download.get('folder') @@ -1507,9 +1517,9 @@ config = [{ 'label': 'Default File Action', 'default': 'move', 'type': 'dropdown', - 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')], + 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move'), ('Reverse Symlink', 'symlink_reversed')], 'description': ('Link, Copy or Move after download completed.', - 'Link first tries hard link, then sym link and falls back to Copy.'), + 'Link first tries hard link, then sym link and falls back to Copy. Reverse Symlink moves the file and creates symlink to it in the original location'), 'advanced': True, }, { @@ -1517,7 +1527,7 @@ config = [{ 'label': 'Torrent File Action', 'default': 'link', 'type': 'dropdown', - 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')], + 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move'), ('Reverse Symlink', 'symlink_reversed')], 'description': 'See above. It is prefered to use link when downloading torrents as it will save you space, while still being able to seed.', 'advanced': True, }, diff --git a/couchpotato/static/images/icons/dark/safari.svg b/couchpotato/static/images/icons/dark/safari.svg new file mode 100644 index 0000000..89b5092 --- /dev/null +++ b/couchpotato/static/images/icons/dark/safari.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/couchpotato/static/images/icons/safari.svg b/couchpotato/static/images/icons/safari.svg new file mode 100644 index 0000000..89b5092 --- /dev/null +++ b/couchpotato/static/images/icons/safari.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/couchpotato/static/scripts/combined.plugins.min.js b/couchpotato/static/scripts/combined.plugins.min.js index b93d282..1862ca9 100644 --- a/couchpotato/static/scripts/combined.plugins.min.js +++ b/couchpotato/static/scripts/combined.plugins.min.js @@ -382,16 +382,23 @@ var MovieDetails = new Class({ open: function() { var self = this; self.el.addClass("show"); + document.onkeyup = self.keyup.bind(self); self.outer_click = function() { self.close(); }; App.addEvent("history.push", self.outer_click); }, + keyup: function(e) { + if (e.keyCode == 27) { + this.close(); + } + }, close: function() { var self = this; var ended = function() { self.el.dispose(); self.overlay.removeEventListener("transitionend", ended); + document.onkeyup = null; }; self.overlay.addEventListener("transitionend", ended, false); self.el.removeClass("show"); @@ -3416,7 +3423,7 @@ var QualityBase = new Class({ try { return this.qualities.filter(function(q) { return q.identifier == identifier; - }).pick(); + }).pick() || {}; } catch (e) {} return {}; }, diff --git a/couchpotato/templates/index.html b/couchpotato/templates/index.html index 927b2d0..4020773 100644 --- a/couchpotato/templates/index.html +++ b/couchpotato/templates/index.html @@ -15,6 +15,9 @@ + + + diff --git a/libs/subliminal/services/__init__.py b/libs/subliminal/services/__init__.py index b82b309..b169aaf 100755 --- a/libs/subliminal/services/__init__.py +++ b/libs/subliminal/services/__init__.py @@ -183,16 +183,21 @@ class ServiceBase(object): return False return True - def download_file(self, url, filepath): + def download_file(self, url, filepath, data=None): """Attempt to download a file and remove it in case of failure :param string url: URL to download :param string filepath: destination path + :param string data: data to add to the post request """ logger.info(u'Downloading %s in %s' % (url, filepath)) try: - r = self.session.get(url, timeout = 10, headers = {'Referer': url, 'User-Agent': self.user_agent}) + headers = {'Referer': url, 'User-Agent': self.user_agent} + if data: + r = self.session.post(url, data=data, timeout=10, headers=headers) + else: + r = self.session.get(url, timeout=10, headers=headers) with open(filepath, 'wb') as f: f.write(r.content) except Exception as e: @@ -202,18 +207,23 @@ class ServiceBase(object): raise DownloadFailedError(str(e)) logger.debug(u'Download finished') - def download_zip_file(self, url, filepath): + def download_zip_file(self, url, filepath, data=None): """Attempt to download a zip file and extract any subtitle file from it, if any. This cleans up after itself if anything fails. :param string url: URL of the zip file to download :param string filepath: destination path for the subtitle + :param string data: data to add to the post request """ logger.info(u'Downloading %s in %s' % (url, filepath)) try: zippath = filepath + '.zip' - r = self.session.get(url, timeout = 10, headers = {'Referer': url, 'User-Agent': self.user_agent}) + headers = {'Referer': url, 'User-Agent': self.user_agent} + if data: + r = self.session.post(url, data=data, timeout=10, headers=headers) + else: + r = self.session.get(url, timeout=10, headers=headers) with open(zippath, 'wb') as f: f.write(r.content) if not zipfile.is_zipfile(zippath): diff --git a/libs/subliminal/services/subscenter.py b/libs/subliminal/services/subscenter.py index 258edad..7125f92 100644 --- a/libs/subliminal/services/subscenter.py +++ b/libs/subliminal/services/subscenter.py @@ -16,124 +16,147 @@ # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see . from . import ServiceBase -from ..exceptions import DownloadFailedError, ServiceError +from ..exceptions import ServiceError from ..language import language_set from ..subtitles import get_subtitle_path, ResultSubtitle from ..videos import Episode, Movie -from ..utils import to_unicode, get_keywords -from bs4 import BeautifulSoup +from ..utils import to_unicode + import bisect -import json import logging +from urllib import urlencode + logger = logging.getLogger(__name__) class Subscenter(ServiceBase): - server = 'http://www.subscenter.info/he/' - api_based = False + server = 'http://www.cinemast.org/he/cinemast/api/' + api_based = True languages = language_set(['he']) videos = [Episode, Movie] require_video = False - def _search_url_title(self, title, kind): - """Search the URL title for the given `title`. - - :param str title: title to search for. - :param str kind: kind of the title, ``movie`` or ``series``. - :return: the URL version of the title. - :rtype: str or None - """ - # make the search - logger.info('Searching title name for %r', title) - r = self.session.get(self.server + 'subtitle/search/', params={'q': title}, allow_redirects=False, timeout=10) - r.raise_for_status() + default_username = 'subliminal@gmail.com' + default_password = 'subliminal' - # if redirected, get the url title from the Location header - if r.is_redirect: - parts = r.headers['Location'].split('/') + def __init__(self, config=None): + super(Subscenter, self).__init__(config) + self.token = None + self.user_id = None - # check kind - if parts[-3] == kind: - return parts[-2] + def init(self): + super(Subscenter, self).init() + logger.debug('Logging in') + url = self.server_url + 'login/' - return None + # actual login + data = {'username': self.default_username, 'password': self.default_password} + r = self.session.post(url, data=urlencode(data), allow_redirects=False, timeout=10) - # otherwise, get the first valid suggestion - soup = BeautifulSoup(r.content, ['lxml', 'html.parser']) - suggestions = soup.select('#processes div.generalWindowTop a') - logger.debug('Found %d suggestions', len(suggestions)) - for suggestion in suggestions: - parts = suggestion.attrs['href'].split('/') + if r.status_code != 200: + raise ServiceError('Login failed') + + try: + result = r.json() + if 'token' not in result: + raise ServiceError('Login failed') - # check kind - if parts[-3] == kind: - return parts[-2] + logger.info('Logged in') + self.user_id = r.json().get('user') + self.token = r.json().get('token') + except ValueError: + raise ServiceError('Login failed') + + def terminate(self): + super(Subscenter, self).terminate() + if self.token or self.user_id: + logger.info('Logged out') + self.token = None + self.user_id = None def list_checked(self, video, languages): series = None season = None episode = None title = video.title + year = video.year if isinstance(video, Episode): series = video.series season = video.season episode = video.episode - return self.query(video.path or video.release, languages, get_keywords(video.guess), series, season, - episode, title) + return self.query(video.path or video.release, languages, series, season, episode, title, year) - def query(self, filepath, languages=None, keywords=None, series=None, season=None, episode=None, title=None): + def query(self, filepath, languages=None, series=None, season=None, episode=None, title=None, year=None): logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format( series, season, episode, languages)) - # Set the correct parameters depending on the kind. - if series and season and episode: - url_series = self._search_url_title(series, 'series') - url = self.server + 'cst/data/series/sb/{}/{}/{}/'.format(url_series, season, episode) + + query = { + 'user': self.user_id, + 'token': self.token + } + + # episode + if season and episode: + query['q'] = series + query['type'] = 'series' + query['season'] = season + query['episode'] = episode elif title: - url_title = self._search_url_title(title, 'movie') - url = self.server + 'cst/data/movie/sb/{}/'.format(url_title) + query['q'] = title + query['type'] = 'movies' + if year: + query['year_start'] = year - 1 + query['year_end'] = year else: raise ServiceError('One or more parameters are missing') - logger.debug('Searching subtitles for title {0}, season {1}, episode {2}'.format(title, season, episode)) - response = self.session.get(url) - if response.status_code != 200: - raise ServiceError('Request failed with status code {0}'.format(response.status_code)) - # Loop over results. - subtitles = dict() - response_json = json.loads(response.content) - for language_code, language_data in response_json.items(): - language_object = self.get_language(language_code) - if language_object in self.languages and language_object in languages: - for quality_data in language_data.values(): - for quality, subtitles_data in quality_data.items(): - for subtitle_item in subtitles_data.values(): - # Read the item. - subtitle_id = subtitle_item['id'] - subtitle_key = subtitle_item['key'] - subtitle_version = subtitle_item['h_version'] - release = subtitle_item['subtitle_version'] - subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi) - download_link = self.server_url + 'subtitle/download/{0}/{1}/?v={2}&key={3}'.format( - language_code, subtitle_id, subtitle_version, subtitle_key) - # Add the release and increment downloaded count if we already have the subtitle. - if subtitle_id in subtitles: - logger.debug('Found additional release {0} for subtitle {1}'.format( - release, subtitle_id)) - bisect.insort_left(subtitles[subtitle_id].release, release) # Deterministic order. - continue - # Otherwise create it. - subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(), - download_link, release=to_unicode(release)) - logger.debug('Found subtitle %r', subtitle) - subtitles[subtitle_id] = subtitle + + # get the list of subtitles + logger.debug('Getting the list of subtitles') + url = self.server_url + 'search/' + r = self.session.post(url, data=urlencode(query)) + r.raise_for_status() + + try: + results = r.json() + except ValueError: + return {} + + # loop over results + subtitles = {} + for group_data in results.get('data', []): + for language_code, subtitles_data in group_data.get('subtitles', {}).items(): + language_object = self.get_language(language_code) + + for subtitle_item in subtitles_data: + # read the item + subtitle_id = subtitle_item['id'] + subtitle_key = subtitle_item['key'] + release = subtitle_item['version'] + + subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi) + download_link = self.server_url + 'subtitle/download/{0}/?v={1}&key={2}&sub_id={3}'.format( + language_code, release, subtitle_key, subtitle_id) + # Add the release and increment downloaded count if we already have the subtitle. + if subtitle_id in subtitles: + logger.debug('Found additional release {0} for subtitle {1}'.format( + release, subtitle_id)) + bisect.insort_left(subtitles[subtitle_id].release, release) # Deterministic order. + continue + # Otherwise create it. + subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(), + download_link, release=to_unicode(release)) + logger.debug('Found subtitle %r', subtitle) + subtitles[subtitle_id] = subtitle + return subtitles.values() def download(self, subtitle): - try: - self.download_zip_file(subtitle.link, subtitle.path) - except DownloadFailedError: - # If no zip file was retrieved, daily downloads limit has exceeded. - raise ServiceError('Daily limit exceeded') + data = { + 'user': self.user_id, + 'token': self.token + } + self.download_zip_file(subtitle.link, subtitle.path, data=urlencode(data)) return subtitle