diff --git a/couchpotato/__init__.py b/couchpotato/__init__.py index 8dc691d..04756fa 100644 --- a/couchpotato/__init__.py +++ b/couchpotato/__init__.py @@ -1,31 +1,47 @@ from couchpotato.api import api_docs, api_docs_missing, api -from couchpotato.core.auth import requires_auth from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.variable import md5 +from couchpotato.core.helpers.variable import md5, tryInt from couchpotato.core.logger import CPLog from couchpotato.environment import Env -from sqlalchemy.engine import create_engine -from sqlalchemy.orm import scoped_session -from sqlalchemy.orm.session import sessionmaker from tornado import template -from tornado.web import RequestHandler +from tornado.web import RequestHandler, authenticated import os import time +import traceback log = CPLog(__name__) + views = {} template_loader = template.Loader(os.path.join(os.path.dirname(__file__), 'templates')) + +class BaseHandler(RequestHandler): + + def get_current_user(self): + username = Env.setting('username') + password = Env.setting('password') + + if username or password: + return self.get_secure_cookie('user') + else: # Login when no username or password are set + return True + # Main web handler -@requires_auth -class WebHandler(RequestHandler): +class WebHandler(BaseHandler): + + @authenticated def get(self, route, *args, **kwargs): route = route.strip('/') if not views.get(route): page_not_found(self) return - self.write(views[route]()) + + try: + self.write(views[route]()) + except: + log.error("Failed doing web request '%s': %s", (route, traceback.format_exc())) + self.write({'success': False, 'error': 'Failed returning results'}) def addView(route, func, static = False): views[route] = func @@ -58,16 +74,54 @@ addView('docs', apiDocs) class KeyHandler(RequestHandler): def get(self, *args, **kwargs): api = None + + try: + username = Env.setting('username') + password = Env.setting('password') + + if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password): + api = Env.setting('api_key') + + self.write({ + 'success': api is not None, + 'api_key': api + }) + except: + log.error('Failed doing key request: %s', (traceback.format_exc())) + self.write({'success': False, 'error': 'Failed returning results'}) + + +class LoginHandler(BaseHandler): + + def get(self, *args, **kwargs): + + if self.get_current_user(): + self.redirect(Env.get('web_base')) + else: + self.write(template_loader.load('login.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env)) + + def post(self, *args, **kwargs): + + api = None + username = Env.setting('username') password = Env.setting('password') - if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password): + if (self.get_argument('username') == username or not username) and (md5(self.get_argument('password')) == password or not password): api = Env.setting('api_key') - self.write({ - 'success': api is not None, - 'api_key': api - }) + if api: + remember_me = tryInt(self.get_argument('remember_me', default = 0)) + self.set_secure_cookie('user', api, expires_days = 30 if remember_me > 0 else None) + + self.redirect(Env.get('web_base')) + +class LogoutHandler(BaseHandler): + + def get(self, *args, **kwargs): + self.clear_cookie('user') + self.redirect('%slogin/' % Env.get('web_base')) + def page_not_found(rh): index_url = Env.get('web_base') diff --git a/couchpotato/api.py b/couchpotato/api.py index 77957f1..a9f449b 100644 --- a/couchpotato/api.py +++ b/couchpotato/api.py @@ -1,4 +1,5 @@ from couchpotato.core.helpers.request import getParams +from couchpotato.core.logger import CPLog from functools import wraps from threading import Thread from tornado.gen import coroutine @@ -6,8 +7,12 @@ from tornado.web import RequestHandler, asynchronous import json import threading import tornado +import traceback import urllib +log = CPLog(__name__) + + api = {} api_locks = {} api_nonblock = {} @@ -41,7 +46,11 @@ class NonBlockHandler(RequestHandler): if self.request.connection.stream.closed(): return - self.write(response) + try: + self.finish(response) + except: + log.error('Failed doing nonblock request: %s', (traceback.format_exc())) + self.finish({'success': False, 'error': 'Failed returning results'}) def on_connection_close(self): @@ -70,33 +79,43 @@ class ApiHandler(RequestHandler): api_locks[route].acquire() - kwargs = {} - for x in self.request.arguments: - kwargs[x] = urllib.unquote(self.get_argument(x)) - - # Split array arguments - kwargs = getParams(kwargs) - - # Remove t random string - try: del kwargs['t'] - except: pass - - # Add async callback handler - @run_async - def run_handler(callback): - result = api[route](**kwargs) - callback(result) - result = yield tornado.gen.Task(run_handler) - - # Check JSONP callback - jsonp_callback = self.get_argument('callback_func', default = None) - - if jsonp_callback: - self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')') - elif isinstance(result, (tuple)) and result[0] == 'redirect': - self.redirect(result[1]) - else: - self.write(result) + try: + + kwargs = {} + for x in self.request.arguments: + kwargs[x] = urllib.unquote(self.get_argument(x)) + + # Split array arguments + kwargs = getParams(kwargs) + + # Remove t random string + try: del kwargs['t'] + except: pass + + # Add async callback handler + @run_async + def run_handler(callback): + try: + result = api[route](**kwargs) + callback(result) + except: + log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) + callback({'success': False, 'error': 'Failed returning results'}) + result = yield tornado.gen.Task(run_handler) + + # Check JSONP callback + jsonp_callback = self.get_argument('callback_func', default = None) + + if jsonp_callback: + self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')') + elif isinstance(result, tuple) and result[0] == 'redirect': + self.redirect(result[1]) + else: + self.write(result) + + except: + log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) + self.write({'success': False, 'error': 'Failed returning results'}) api_locks[route].release() diff --git a/couchpotato/core/_base/clientscript/main.py b/couchpotato/core/_base/clientscript/main.py index fece6fa..1b7f163 100644 --- a/couchpotato/core/_base/clientscript/main.py +++ b/couchpotato/core/_base/clientscript/main.py @@ -6,6 +6,7 @@ from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from minify.cssmin import cssmin from minify.jsmin import jsmin +from tornado.web import StaticFileHandler import os import re import traceback @@ -80,7 +81,7 @@ class ClientScript(Plugin): for static_type in self.core_static: for rel_path in self.core_static.get(static_type): file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path) - core_url = 'api/%s/static/%s?%s' % (Env.setting('api_key'), rel_path, tryInt(os.path.getmtime(file_path))) + core_url = 'static/%s' % rel_path if static_type == 'script': self.registerScript(core_url, file_path, position = 'front') @@ -90,6 +91,13 @@ class ClientScript(Plugin): def minify(self): + # Create cache dir + cache = Env.get('cache_dir') + parent_dir = os.path.join(cache, 'minified') + self.makeDir(parent_dir) + + Env.get('app').add_handlers(".*$", [(Env.get('web_base') + 'minified/(.*)', StaticFileHandler, {'path': parent_dir})]) + for file_type in ['style', 'script']: ext = 'js' if file_type is 'script' else 'css' positions = self.paths.get(file_type, {}) @@ -100,8 +108,8 @@ class ClientScript(Plugin): def _minify(self, file_type, files, position, out): cache = Env.get('cache_dir') - out_name = 'minified_' + out - out = os.path.join(cache, out_name) + out_name = out + out = os.path.join(cache, 'minified', out_name) raw = [] for file_path in files: @@ -111,7 +119,7 @@ class ClientScript(Plugin): data = jsmin(f) else: data = self.prefix(f) - data = cssmin(f) + data = cssmin(data) data = data.replace('../images/', '../static/images/') data = data.replace('../fonts/', '../static/fonts/') data = data.replace('../../static/', '../static/') # Replace inside plugins @@ -131,7 +139,7 @@ class ClientScript(Plugin): if not self.minified[file_type].get(position): self.minified[file_type][position] = [] - minified_url = 'api/%s/file.cache/%s?%s' % (Env.setting('api_key'), out_name, tryInt(os.path.getmtime(out))) + minified_url = 'minified/%s?%s' % (out_name, tryInt(os.path.getmtime(out))) self.minified[file_type][position].append(minified_url) def getStyles(self, *args, **kwargs): @@ -165,6 +173,8 @@ class ClientScript(Plugin): def register(self, api_path, file_path, type, location): + api_path = '%s?%s' % (api_path, tryInt(os.path.getmtime(file_path))) + if not self.urls[type].get(location): self.urls[type][location] = [] self.urls[type][location].append(api_path) diff --git a/couchpotato/core/_base/updater/main.py b/couchpotato/core/_base/updater/main.py index 38b7d36..f3b4b19 100644 --- a/couchpotato/core/_base/updater/main.py +++ b/couchpotato/core/_base/updater/main.py @@ -132,6 +132,7 @@ class BaseUpdater(Plugin): update_failed = False update_version = None last_check = 0 + auto_register_static = False def doUpdate(self): pass diff --git a/couchpotato/core/auth.py b/couchpotato/core/auth.py deleted file mode 100644 index e58016b..0000000 --- a/couchpotato/core/auth.py +++ /dev/null @@ -1,40 +0,0 @@ -from couchpotato.core.helpers.variable import md5 -from couchpotato.environment import Env -import base64 - -def check_auth(username, password): - return username == Env.setting('username') and password == Env.setting('password') - -def requires_auth(handler_class): - - def wrap_execute(handler_execute): - - def require_basic_auth(handler, kwargs): - if Env.setting('username') and Env.setting('password'): - - auth_header = handler.request.headers.get('Authorization') - auth_decoded = base64.decodestring(auth_header[6:]) if auth_header else None - if auth_decoded: - username, password = auth_decoded.split(':', 2) - - if auth_header is None or not auth_header.startswith('Basic ') or (not check_auth(username.decode('latin'), md5(password.decode('latin')))): - handler.set_status(401) - handler.set_header('WWW-Authenticate', 'Basic realm="CouchPotato Login"') - handler._transforms = [] - handler.finish() - - return False - - return True - - def _execute(self, transforms, *args, **kwargs): - - if not require_basic_auth(self, kwargs): - return False - return handler_execute(self, transforms, *args, **kwargs) - - return _execute - - handler_class._execute = wrap_execute(handler_class._execute) - - return handler_class diff --git a/couchpotato/core/downloaders/base.py b/couchpotato/core/downloaders/base.py index cc0d59e..08be4bd 100644 --- a/couchpotato/core/downloaders/base.py +++ b/couchpotato/core/downloaders/base.py @@ -49,7 +49,10 @@ class Downloader(Provider): return [] - def _download(self, data = {}, movie = {}, manual = False, filedata = None): + def _download(self, data = None, movie = None, manual = False, filedata = None): + if not movie: movie = {} + if not data: data = {} + if self.isDisabled(manual, data): return return self.download(data = data, movie = movie, filedata = filedata) @@ -119,7 +122,7 @@ class Downloader(Provider): except: log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source)) - log.error('Failed converting magnet url to torrent: %s', (torrent_hash)) + log.error('Failed converting magnet url to torrent: %s', torrent_hash) return False def downloadReturnId(self, download_id): @@ -128,18 +131,24 @@ class Downloader(Provider): 'id': download_id } - def isDisabled(self, manual, data): + def isDisabled(self, manual = False, data = None): + if not data: data = {} + return not self.isEnabled(manual, data) - def _isEnabled(self, manual, data = {}): + def _isEnabled(self, manual, data = None): + if not data: data = {} + if not self.isEnabled(manual, data): return return True - def isEnabled(self, manual, data = {}): + def isEnabled(self, manual = False, data = None): + if not data: data = {} + d_manual = self.conf('manual', default = False) return super(Downloader, self).isEnabled() and \ - ((d_manual and manual) or (d_manual is False)) and \ + (d_manual and manual or d_manual is False) and \ (not data or self.isCorrectProtocol(data.get('protocol'))) def _pause(self, item, pause = True): diff --git a/couchpotato/core/downloaders/blackhole/main.py b/couchpotato/core/downloaders/blackhole/main.py index 9d2a526..9a5a621 100644 --- a/couchpotato/core/downloaders/blackhole/main.py +++ b/couchpotato/core/downloaders/blackhole/main.py @@ -12,7 +12,9 @@ class Blackhole(Downloader): protocol = ['nzb', 'torrent', 'torrent_magnet'] - def download(self, data = {}, movie = {}, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} directory = self.conf('directory') if not directory or not os.path.isdir(directory): @@ -62,7 +64,8 @@ class Blackhole(Downloader): else: return ['nzb'] - def isEnabled(self, manual, data = {}): + def isEnabled(self, manual = False, data = None): + if not data: data = {} for_protocol = ['both'] if data and 'torrent' in data.get('protocol'): for_protocol.append('torrent') diff --git a/couchpotato/core/downloaders/deluge/main.py b/couchpotato/core/downloaders/deluge/main.py index 6a9eb3c..580ed7f 100644 --- a/couchpotato/core/downloaders/deluge/main.py +++ b/couchpotato/core/downloaders/deluge/main.py @@ -54,7 +54,7 @@ class Deluge(Downloader): if self.conf('completed_directory'): if os.path.isdir(self.conf('completed_directory')): - options['move_completed'] = 1 + options['move_completed'] = 1 options['move_completed_path'] = self.conf('completed_directory') else: log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory')) @@ -96,7 +96,7 @@ class Deluge(Downloader): queue = self.drpc.get_alltorrents() - if not (queue): + if not queue: log.debug('Nothing in queue or error') return False diff --git a/couchpotato/core/downloaders/nzbget/main.py b/couchpotato/core/downloaders/nzbget/main.py index 04f68f4..b7cf026 100644 --- a/couchpotato/core/downloaders/nzbget/main.py +++ b/couchpotato/core/downloaders/nzbget/main.py @@ -19,7 +19,9 @@ class NZBGet(Downloader): url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc' - def download(self, data = {}, movie = {}, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} if not filedata: log.error('Unable to get NZB file: %s', traceback.format_exc()) @@ -140,7 +142,7 @@ class NZBGet(Downloader): statuses.append({ 'id': nzb_id, 'name': item['NZBFilename'], - 'status': 'completed' if item['ParStatus'] == 'SUCCESS' and item['ScriptStatus'] == 'SUCCESS' else 'failed', + 'status': 'completed' if item['ParStatus'] in ['SUCCESS','NONE'] and item['ScriptStatus'] in ['SUCCESS','NONE'] else 'failed', 'original_status': item['ParStatus'] + ', ' + item['ScriptStatus'], 'timeleft': str(timedelta(seconds = 0)), 'folder': ss(item['DestDir']) @@ -172,11 +174,16 @@ class NZBGet(Downloader): try: history = rpc.history() + nzb_id = None + path = None + for hist in history: - if hist['Parameters'] and hist['Parameters']['couchpotato'] and hist['Parameters']['couchpotato'] == item['id']: - nzb_id = hist['ID'] - path = hist['DestDir'] - if rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]): + for param in hist['Parameters']: + if param['Name'] == 'couchpotato' and param['Value'] == item['id']: + nzb_id = hist['ID'] + path = hist['DestDir'] + + if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]): shutil.rmtree(path, True) except: log.error('Failed deleting: %s', traceback.format_exc(0)) diff --git a/couchpotato/core/downloaders/nzbvortex/main.py b/couchpotato/core/downloaders/nzbvortex/main.py index 2944c32..a652f11 100644 --- a/couchpotato/core/downloaders/nzbvortex/main.py +++ b/couchpotato/core/downloaders/nzbvortex/main.py @@ -23,7 +23,9 @@ class NZBVortex(Downloader): api_level = None session_id = None - def download(self, data = {}, movie = {}, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} # Send the nzb try: @@ -97,9 +99,10 @@ class NZBVortex(Downloader): return False - def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs): + def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs): # Login first + if not parameters: parameters = {} if not self.session_id and auth: self.login() @@ -122,7 +125,7 @@ class NZBVortex(Downloader): # Try login and do again if not repeat: self.login() - return self.call(call, parameters = parameters, repeat = True, *args, **kwargs) + return self.call(call, parameters = parameters, repeat = True, **kwargs) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) except: @@ -148,7 +151,8 @@ class NZBVortex(Downloader): return self.api_level - def isEnabled(self, manual, data): + def isEnabled(self, manual = False, data = None): + if not data: data = {} return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel() diff --git a/couchpotato/core/downloaders/pneumatic/main.py b/couchpotato/core/downloaders/pneumatic/main.py index 25923e0..643350e 100644 --- a/couchpotato/core/downloaders/pneumatic/main.py +++ b/couchpotato/core/downloaders/pneumatic/main.py @@ -12,7 +12,9 @@ class Pneumatic(Downloader): protocol = ['nzb'] strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s' - def download(self, data = {}, movie = {}, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} directory = self.conf('directory') if not directory or not os.path.isdir(directory): diff --git a/couchpotato/core/downloaders/rtorrent/main.py b/couchpotato/core/downloaders/rtorrent/main.py index 680c44a..161c671 100755 --- a/couchpotato/core/downloaders/rtorrent/main.py +++ b/couchpotato/core/downloaders/rtorrent/main.py @@ -1,20 +1,19 @@ from base64 import b16encode, b32decode -from datetime import timedelta -from hashlib import sha1 -import shutil -from couchpotato.core.helpers.encoding import ss -from rtorrent.err import MethodError - from bencode import bencode, bdecode from couchpotato.core.downloaders.base import Downloader, StatusList +from couchpotato.core.helpers.encoding import ss from couchpotato.core.logger import CPLog +from datetime import timedelta +from hashlib import sha1 from rtorrent import RTorrent - +from rtorrent.err import MethodError +import shutil log = CPLog(__name__) class rTorrent(Downloader): + protocol = ['torrent', 'torrent_magnet'] rt = None @@ -194,7 +193,7 @@ class rTorrent(Downloader): if torrent is None: return False - torrent.erase() # just removes the torrent, doesn't delete data + torrent.erase() # just removes the torrent, doesn't delete data if delete_files: shutil.rmtree(item['folder'], True) diff --git a/couchpotato/core/downloaders/sabnzbd/main.py b/couchpotato/core/downloaders/sabnzbd/main.py index 68bbd26..08ee409 100644 --- a/couchpotato/core/downloaders/sabnzbd/main.py +++ b/couchpotato/core/downloaders/sabnzbd/main.py @@ -15,7 +15,9 @@ class Sabnzbd(Downloader): protocol = ['nzb'] - def download(self, data = {}, movie = {}, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} log.info('Sending "%s" to SABnzbd.', data.get('name')) @@ -26,9 +28,10 @@ class Sabnzbd(Downloader): 'priority': self.conf('priority'), } + nzb_filename = None if filedata: if len(filedata) < 50: - log.error('No proper nzb available: %s', (filedata)) + log.error('No proper nzb available: %s', filedata) return False # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb @@ -38,7 +41,7 @@ class Sabnzbd(Downloader): req_params['name'] = data.get('url') try: - if req_params.get('mode') is 'addfile': + if nzb_filename and req_params.get('mode') is 'addfile': sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True) else: sab_data = self.call(req_params) diff --git a/couchpotato/core/downloaders/synology/main.py b/couchpotato/core/downloaders/synology/main.py index 362577f..d5082c7 100644 --- a/couchpotato/core/downloaders/synology/main.py +++ b/couchpotato/core/downloaders/synology/main.py @@ -12,7 +12,9 @@ class Synology(Downloader): protocol = ['nzb', 'torrent', 'torrent_magnet'] log = CPLog(__name__) - def download(self, data, movie, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} response = False log.error('Sending "%s" (%s) to Synology.', (data['name'], data['protocol'])) @@ -49,7 +51,9 @@ class Synology(Downloader): else: return ['nzb'] - def isEnabled(self, manual, data = {}): + def isEnabled(self, manual = False, data = None): + if not data: data = {} + for_protocol = ['both'] if data and 'torrent' in data.get('protocol'): for_protocol.append('torrent') @@ -61,7 +65,7 @@ class Synology(Downloader): class SynologyRPC(object): - '''SynologyRPC lite library''' + """SynologyRPC lite library""" def __init__(self, host = 'localhost', port = 5000, username = None, password = None): @@ -98,7 +102,7 @@ class SynologyRPC(object): req = requests.post(url, data = args, files = files) req.raise_for_status() response = json.loads(req.text) - if response['success'] == True: + if response['success']: log.info('Synology action successfull') return response except requests.ConnectionError, err: @@ -111,11 +115,11 @@ class SynologyRPC(object): return response def create_task(self, url = None, filename = None, filedata = None): - ''' Creates new download task in Synology DownloadStation. Either specify + """ Creates new download task in Synology DownloadStation. Either specify url or pair (filename, filedata). Returns True if task was created, False otherwise - ''' + """ result = False # login if self._login(): diff --git a/couchpotato/core/downloaders/transmission/main.py b/couchpotato/core/downloaders/transmission/main.py index be7f2f7..5ff33c0 100644 --- a/couchpotato/core/downloaders/transmission/main.py +++ b/couchpotato/core/downloaders/transmission/main.py @@ -44,8 +44,9 @@ class Transmission(Downloader): return False # Set parameters for adding torrent - params = {} - params['paused'] = self.conf('paused', default = False) + params = { + 'paused': self.conf('paused', default = False) + } if self.conf('directory'): if os.path.isdir(self.conf('directory')): @@ -135,11 +136,11 @@ class Transmission(Downloader): def removeFailed(self, item): log.info('%s failed downloading, deleting...', item['name']) - return self.trpc.remove_torrent(self, item['hashString'], True) + return self.trpc.remove_torrent(item['hashString'], True) def processComplete(self, item, delete_files = False): log.debug('Requesting Transmission to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else '')) - return self.trpc.remove_torrent(self, item['hashString'], delete_files) + return self.trpc.remove_torrent(item['hashString'], delete_files) class TransmissionRPC(object): diff --git a/couchpotato/core/downloaders/utorrent/main.py b/couchpotato/core/downloaders/utorrent/main.py index 59e9a4a..d933007 100644 --- a/couchpotato/core/downloaders/utorrent/main.py +++ b/couchpotato/core/downloaders/utorrent/main.py @@ -1,5 +1,5 @@ from base64 import b16encode, b32decode -from bencode import bencode, bdecode +from bencode import bencode as benc, bdecode from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.helpers.encoding import isInt, ss from couchpotato.core.helpers.variable import tryInt, tryFloat @@ -36,7 +36,9 @@ class uTorrent(Downloader): return self.utorrent_api - def download(self, data, movie, filedata = None): + def download(self, data = None, movie = None, filedata = None): + if not movie: movie = {} + if not data: data = {} log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('protocol'))) @@ -74,7 +76,7 @@ class uTorrent(Downloader): torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers) else: info = bdecode(filedata)["info"] - torrent_hash = sha1(bencode(info)).hexdigest().upper() + torrent_hash = sha1(benc(info)).hexdigest().upper() torrent_filename = self.createFileName(data, filedata, movie) if data.get('seed_ratio'): @@ -280,7 +282,9 @@ class uTorrentAPI(object): return settings_dict - def set_settings(self, settings_dict = {}): + def set_settings(self, settings_dict = None): + if not settings_dict: settings_dict = {} + for key in settings_dict: if isinstance(settings_dict[key], bool): settings_dict[key] = 1 if settings_dict[key] else 0 diff --git a/couchpotato/core/event.py b/couchpotato/core/event.py index 0e0b4a7..7b01fbd 100644 --- a/couchpotato/core/event.py +++ b/couchpotato/core/event.py @@ -21,9 +21,11 @@ def addEvent(name, handler, priority = 100): def createHandle(*args, **kwargs): + h = None try: # Open handler has_parent = hasattr(handler, 'im_self') + parent = None if has_parent: parent = handler.im_self bc = hasattr(parent, 'beforeCall') @@ -33,7 +35,7 @@ def addEvent(name, handler, priority = 100): h = runHandler(name, handler, *args, **kwargs) # Close handler - if has_parent: + if parent and has_parent: ac = hasattr(parent, 'afterCall') if ac: parent.afterCall(handler) except: @@ -53,11 +55,6 @@ def removeEvent(name, handler): def fireEvent(name, *args, **kwargs): if not events.has_key(name): return - e = Event(name = name, threads = 10, asynch = kwargs.get('async', False), exc_info = True, traceback = True, lock = threading.RLock()) - - for event in events[name]: - e.handle(event['handler'], priority = event['priority']) - #log.debug('Firing event %s', name) try: @@ -67,7 +64,6 @@ def fireEvent(name, *args, **kwargs): 'single': False, # Return single handler 'merge': False, # Merge items 'in_order': False, # Fire them in specific order, waits for the other to finish - 'async': False } # Do options @@ -78,12 +74,32 @@ def fireEvent(name, *args, **kwargs): options[x] = val except: pass - # Make sure only 1 event is fired at a time when order is wanted - kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None - kwargs['event_return_on_result'] = options['single'] + if len(events[name]) == 1: + + single = None + try: + single = events[name][0]['handler'](*args, **kwargs) + except: + log.error('Failed running single event: %s', traceback.format_exc()) + + # Don't load thread for single event + result = { + 'single': (single is not None, single), + } + + else: + + e = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock()) + + for event in events[name]: + e.handle(event['handler'], priority = event['priority']) + + # Make sure only 1 event is fired at a time when order is wanted + kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None + kwargs['event_return_on_result'] = options['single'] - # Fire - result = e(*args, **kwargs) + # Fire + result = e(*args, **kwargs) if options['single'] and not options['merge']: results = None diff --git a/couchpotato/core/helpers/encoding.py b/couchpotato/core/helpers/encoding.py index 9b753db..6e86444 100644 --- a/couchpotato/core/helpers/encoding.py +++ b/couchpotato/core/helpers/encoding.py @@ -63,7 +63,7 @@ def stripAccents(s): def tryUrlencode(s): new = u'' - if isinstance(s, (dict)): + if isinstance(s, dict): for key, value in s.iteritems(): new += u'&%s=%s' % (key, tryUrlencode(value)) diff --git a/couchpotato/core/helpers/request.py b/couchpotato/core/helpers/request.py index c224979..888e63f 100644 --- a/couchpotato/core/helpers/request.py +++ b/couchpotato/core/helpers/request.py @@ -8,7 +8,7 @@ def getParams(params): reg = re.compile('^[a-z0-9_\.]+$') - current = temp = {} + temp = {} for param, value in sorted(params.iteritems()): nest = re.split("([\[\]]+)", param) diff --git a/couchpotato/core/helpers/rss.py b/couchpotato/core/helpers/rss.py index d88fdb5..b840d86 100644 --- a/couchpotato/core/helpers/rss.py +++ b/couchpotato/core/helpers/rss.py @@ -6,7 +6,7 @@ log = CPLog(__name__) class RSS(object): def getTextElements(self, xml, path): - ''' Find elements and return tree''' + """ Find elements and return tree""" textelements = [] try: @@ -28,7 +28,7 @@ class RSS(object): return elements def getElement(self, xml, path): - ''' Find element and return text''' + """ Find element and return text""" try: return xml.find(path) @@ -36,7 +36,7 @@ class RSS(object): return def getTextElement(self, xml, path): - ''' Find element and return text''' + """ Find element and return text""" try: return xml.find(path).text diff --git a/couchpotato/core/helpers/variable.py b/couchpotato/core/helpers/variable.py index e6c9f84..8f393d0 100644 --- a/couchpotato/core/helpers/variable.py +++ b/couchpotato/core/helpers/variable.py @@ -106,6 +106,11 @@ def md5(text): def sha1(text): return hashlib.sha1(text).hexdigest() +def isLocalIP(ip): + ip = ip.lstrip('htps:/') + regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/' + return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.' + def getExt(filename): return os.path.splitext(filename)[1][1:] @@ -113,8 +118,8 @@ def cleanHost(host): if not host.startswith(('http://', 'https://')): host = 'http://' + host - if not host.endswith('/'): - host += '/' + host = host.rstrip('/') + host += '/' return host @@ -174,11 +179,11 @@ def getTitle(library_dict): def possibleTitles(raw_title): - titles = [] - - titles.append(toSafeString(raw_title).lower()) - titles.append(raw_title.lower()) - titles.append(simplifyString(raw_title)) + titles = [ + toSafeString(raw_title).lower(), + raw_title.lower(), + simplifyString(raw_title) + ] # replace some chars new_title = raw_title.replace('&', 'and') diff --git a/couchpotato/core/loader.py b/couchpotato/core/loader.py index 745c75d..2016d28 100644 --- a/couchpotato/core/loader.py +++ b/couchpotato/core/loader.py @@ -66,7 +66,7 @@ class Loader(object): self.loadPlugins(m, plugin.get('name')) except ImportError as e: # todo:: subclass ImportError for missing requirements. - if (e.message.lower().startswith("missing")): + if e.message.lower().startswith("missing"): log.error(e.message) pass # todo:: this needs to be more descriptive. @@ -91,7 +91,7 @@ class Loader(object): for cur_file in glob.glob(os.path.join(dir_name, '*')): name = os.path.basename(cur_file) - if os.path.isdir(os.path.join(dir_name, name)) and name != 'static': + if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(cur_file, '__init__.py')): module_name = '%s.%s' % (module, name) self.addModule(priority, plugin_type, module_name, name) @@ -122,7 +122,7 @@ class Loader(object): try: module.start() return True - except Exception, e: + except: log.error('Failed loading plugin "%s": %s', (module.__file__, traceback.format_exc())) return False diff --git a/couchpotato/core/media/__init__.py b/couchpotato/core/media/__init__.py index 8187f98..1cef967 100644 --- a/couchpotato/core/media/__init__.py +++ b/couchpotato/core/media/__init__.py @@ -1,5 +1,4 @@ from couchpotato.core.event import addEvent -from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin diff --git a/couchpotato/core/media/_base/searcher/__init__.py b/couchpotato/core/media/_base/searcher/__init__.py index f3d764d..0fb6cc0 100644 --- a/couchpotato/core/media/_base/searcher/__init__.py +++ b/couchpotato/core/media/_base/searcher/__init__.py @@ -1,5 +1,4 @@ from .main import Searcher -import random def start(): return Searcher() diff --git a/couchpotato/core/media/_base/searcher/base.py b/couchpotato/core/media/_base/searcher/base.py index ab29439..368c6e2 100644 --- a/couchpotato/core/media/_base/searcher/base.py +++ b/couchpotato/core/media/_base/searcher/base.py @@ -1,4 +1,3 @@ -from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin @@ -19,12 +18,10 @@ class SearcherBase(Plugin): self.initCron() - - """ Set the searcher cronjob - Make sure to reset cronjob after setting has changed - - """ def initCron(self): + """ Set the searcher cronjob + Make sure to reset cronjob after setting has changed + """ _type = self.getType() @@ -38,14 +35,11 @@ class SearcherBase(Plugin): addEvent('setting.save.%s_searcher.cron_hour.after' % _type, setCrons) addEvent('setting.save.%s_searcher.cron_minute.after' % _type, setCrons) - - """ Return progress of current searcher - - """ def getProgress(self, **kwargs): + """ Return progress of current searcher""" - progress = {} - progress[self.getType()] = self.in_progress + progress = { + self.getType(): self.in_progress + } return progress - diff --git a/couchpotato/core/media/_base/searcher/main.py b/couchpotato/core/media/_base/searcher/main.py index b07279d..7bff261 100644 --- a/couchpotato/core/media/_base/searcher/main.py +++ b/couchpotato/core/media/_base/searcher/main.py @@ -51,6 +51,10 @@ class Searcher(SearcherBase): def download(self, data, movie, manual = False): + if not data.get('protocol'): + data['protocol'] = data['type'] + data['type'] = 'movie' + # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single = True) @@ -122,7 +126,7 @@ class Searcher(SearcherBase): return True - log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol', ''))) + log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol'))) return False @@ -146,7 +150,8 @@ class Searcher(SearcherBase): return search_protocols - def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}): + def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None): + if not preferred_quality: preferred_quality = {} name = nzb['name'] size = nzb.get('size', 0) @@ -173,10 +178,10 @@ class Searcher(SearcherBase): year_name = fireEvent('scanner.name_year', name, single = True) if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): if size > 3000: # Assume dvdr - log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size)) + log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size) found['dvdr'] = True else: # Assume dvdrip - log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size)) + log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', size) found['dvdrip'] = True # Allow other qualities @@ -191,6 +196,7 @@ class Searcher(SearcherBase): if not isinstance(haystack, (list, tuple, set)): haystack = [haystack] + year_name = {} for string in haystack: year_name = fireEvent('scanner.name_year', string, single = True) diff --git a/couchpotato/core/media/movie/_base/main.py b/couchpotato/core/media/movie/_base/main.py index 68f6f3f..1bca3d9 100644 --- a/couchpotato/core/media/movie/_base/main.py +++ b/couchpotato/core/media/movie/_base/main.py @@ -2,10 +2,11 @@ from couchpotato import get_session from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, fireEventAsync, addEvent from couchpotato.core.helpers.encoding import toUnicode, simplifyString -from couchpotato.core.helpers.variable import getImdb, splitString, tryInt +from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \ + mergeDicts from couchpotato.core.logger import CPLog from couchpotato.core.media.movie import MovieTypeBase -from couchpotato.core.settings.model import Library, LibraryTitle, Media, \ +from couchpotato.core.settings.model import Library, LibraryTitle, Movie, \ Release from couchpotato.environment import Env from sqlalchemy.orm import joinedload_all @@ -167,19 +168,33 @@ class MovieBase(MovieTypeBase): if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] - q = db.query(Media) \ - .outerjoin(Media.releases, Media.library, Library.titles) \ - .filter(LibraryTitle.default == True) \ - .group_by(Media.id) + # query movie ids + q = db.query(Movie) \ + .with_entities(Movie.id) \ + .group_by(Movie.id) # Filter on movie status if status and len(status) > 0: - q = q.filter(or_(*[Media.status.has(identifier = s) for s in status])) + statuses = fireEvent('status.get', status, single = len(status) > 1) + statuses = [s.get('id') for s in statuses] + + q = q.filter(Movie.status_id.in_(statuses)) # Filter on release status if release_status and len(release_status) > 0: - q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status])) + q = q.join(Movie.releases) + + statuses = fireEvent('status.get', release_status, single = len(release_status) > 1) + statuses = [s.get('id') for s in statuses] + + q = q.filter(Release.status_id.in_(statuses)) + # Only join when searching / ordering + if starts_with or search or order != 'release_order': + q = q.join(Movie.library, Library.titles) \ + .filter(LibraryTitle.default == True) + + # Add search filters filter_or = [] if starts_with: starts_with = toUnicode(starts_with.lower()) @@ -194,48 +209,79 @@ class MovieBase(MovieTypeBase): if search: filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%')) - if filter_or: + if len(filter_or) > 0: q = q.filter(or_(*filter_or)) total_count = q.count() + if total_count == 0: + return 0, [] if order == 'release_order': q = q.order_by(desc(Release.last_edit)) else: q = q.order_by(asc(LibraryTitle.simple_title)) - q = q.subquery() - q2 = db.query(Media).join((q, q.c.id == Media.id)) \ - .options(joinedload_all('releases.files')) \ - .options(joinedload_all('releases.info')) \ - .options(joinedload_all('profile.types')) \ + if limit_offset: + splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset + limit = splt[0] + offset = 0 if len(splt) is 1 else splt[1] + q = q.limit(limit).offset(offset) + + # Get all movie_ids in sorted order + movie_ids = [m.id for m in q.all()] + + # List release statuses + releases = db.query(Release) \ + .filter(Release.movie_id.in_(movie_ids)) \ + .all() + + release_statuses = dict((m, set()) for m in movie_ids) + releases_count = dict((m, 0) for m in movie_ids) + for release in releases: + release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id)) + releases_count[release.movie_id] += 1 + + # Get main movie data + q2 = db.query(Movie) \ .options(joinedload_all('library.titles')) \ .options(joinedload_all('library.files')) \ .options(joinedload_all('status')) \ .options(joinedload_all('files')) - if limit_offset: - splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset - limit = splt[0] - offset = 0 if len(splt) is 1 else splt[1] - q2 = q2.limit(limit).offset(offset) + q2 = q2.filter(Movie.id.in_(movie_ids)) results = q2.all() - movies = [] + + # Create dict by movie id + movie_dict = {} for movie in results: - movies.append(movie.to_dict({ - 'profile': {'types': {}}, - 'releases': {'files':{}, 'info': {}}, + movie_dict[movie.id] = movie + + # List movies based on movie_ids order + movies = [] + for movie_id in movie_ids: + + releases = [] + for r in release_statuses.get(movie_id): + x = splitString(r) + releases.append({'status_id': x[0], 'quality_id': x[1]}) + + # Merge releases with movie dict + movies.append(mergeDicts(movie_dict[movie_id].to_dict({ 'library': {'titles': {}, 'files':{}}, 'files': {}, + }), { + 'releases': releases, + 'releases_count': releases_count.get(movie_id), })) db.expire_all() - return (total_count, movies) + return total_count, movies def availableChars(self, status = None, release_status = None): - chars = '' + status = status or [] + release_status = release_status or [] db = get_session() @@ -245,38 +291,53 @@ class MovieBase(MovieTypeBase): if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] - q = db.query(Media) \ - .outerjoin(Media.releases, Media.library, Library.titles, Media.status) \ - .options(joinedload_all('library.titles')) + q = db.query(Movie) # Filter on movie status if status and len(status) > 0: - q = q.filter(or_(*[Media.status.has(identifier = s) for s in status])) + statuses = fireEvent('status.get', status, single = len(release_status) > 1) + statuses = [s.get('id') for s in statuses] + + q = q.filter(Movie.status_id.in_(statuses)) # Filter on release status if release_status and len(release_status) > 0: - q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status])) - results = q.all() + statuses = fireEvent('status.get', release_status, single = len(release_status) > 1) + statuses = [s.get('id') for s in statuses] - for movie in results: - if movie.library.titles: - char = movie.library.titles[0].simple_title[0] + q = q.join(Movie.releases) \ + .filter(Release.status_id.in_(statuses)) + + q = q.join(Library, LibraryTitle) \ + .with_entities(LibraryTitle.simple_title) \ + .filter(LibraryTitle.default == True) + + titles = q.all() + + chars = set() + for title in titles: + try: + char = title[0][0] char = char if char in ascii_lowercase else '#' - if char not in chars: - chars += str(char) + chars.add(str(char)) + except: + log.error('Failed getting title for %s', title.libraries_id) + + if len(chars) == 25: + break db.expire_all() - return ''.join(sorted(chars, key = str.lower)) + return ''.join(sorted(chars)) def listView(self, **kwargs): - status = splitString(kwargs.get('status', None)) - release_status = splitString(kwargs.get('release_status', None)) - limit_offset = kwargs.get('limit_offset', None) - starts_with = kwargs.get('starts_with', None) - search = kwargs.get('search', None) - order = kwargs.get('order', None) + status = splitString(kwargs.get('status')) + release_status = splitString(kwargs.get('release_status')) + limit_offset = kwargs.get('limit_offset') + starts_with = kwargs.get('starts_with') + search = kwargs.get('search') + order = kwargs.get('order') total_movies, movies = self.list( status = status, @@ -311,7 +372,7 @@ class MovieBase(MovieTypeBase): db = get_session() for x in splitString(id): - movie = db.query(Media).filter_by(id = x).first() + movie = db.query(Movie).filter_by(id = x).first() if movie: @@ -347,7 +408,9 @@ class MovieBase(MovieTypeBase): 'movies': movies, } - def add(self, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None): + def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None): + if not params: params = {} + if not params.get('identifier'): msg = 'Can\'t add movie without imdb identifier.' log.error(msg) @@ -372,15 +435,15 @@ class MovieBase(MovieTypeBase): fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True) default_profile = fireEvent('profile.default', single = True) - cat_id = params.get('category_id', None) + cat_id = params.get('category_id') db = get_session() - m = db.query(Media).filter_by(library_id = library.get('id')).first() + m = db.query(Movie).filter_by(library_id = library.get('id')).first() added = True do_search = False search_after = search_after and self.conf('search_on_add', section = 'moviesearcher') if not m: - m = Media( + m = Movie( library_id = library.get('id'), profile_id = params.get('profile_id', default_profile.get('id')), status_id = status_id if status_id else status_active.get('id'), @@ -457,13 +520,13 @@ class MovieBase(MovieTypeBase): ids = splitString(id) for movie_id in ids: - m = db.query(Media).filter_by(id = movie_id).first() + m = db.query(Movie).filter_by(id = movie_id).first() if not m: continue m.profile_id = kwargs.get('profile_id') - cat_id = kwargs.get('category_id', None) + cat_id = kwargs.get('category_id') if cat_id is not None: m.category_id = tryInt(cat_id) if tryInt(cat_id) > 0 else None @@ -504,7 +567,7 @@ class MovieBase(MovieTypeBase): db = get_session() - movie = db.query(Media).filter_by(id = movie_id).first() + movie = db.query(Movie).filter_by(id = movie_id).first() if movie: deleted = False if delete_from == 'all': @@ -518,7 +581,7 @@ class MovieBase(MovieTypeBase): total_deleted = 0 new_movie_status = None for release in movie.releases: - if delete_from in ['wanted', 'snatched']: + if delete_from in ['wanted', 'snatched', 'late']: if release.status_id != done_status.get('id'): db.delete(release) total_deleted += 1 @@ -554,12 +617,12 @@ class MovieBase(MovieTypeBase): db = get_session() - m = db.query(Media).filter_by(id = movie_id).first() + m = db.query(Movie).filter_by(id = movie_id).first() if not m or len(m.library.titles) == 0: log.debug('Can\'t restatus movie, doesn\'t seem to exist.') return False - log.debug('Changing status for %s', (m.library.titles[0].title)) + log.debug('Changing status for %s', m.library.titles[0].title) if not m.profile: m.status_id = done_status.get('id') else: @@ -580,7 +643,7 @@ class MovieBase(MovieTypeBase): def onComplete(): db = get_session() - movie = db.query(Media).filter_by(id = movie_id).first() + movie = db.query(Movie).filter_by(id = movie_id).first() fireEventAsync('movie.searcher.single', movie.to_dict(self.default_dict), on_complete = self.createNotifyFront(movie_id)) db.expire_all() @@ -591,7 +654,7 @@ class MovieBase(MovieTypeBase): def notifyFront(): db = get_session() - movie = db.query(Media).filter_by(id = movie_id).first() + movie = db.query(Movie).filter_by(id = movie_id).first() fireEvent('notify.frontend', type = 'movie.update.%s' % movie.id, data = movie.to_dict(self.default_dict)) db.expire_all() diff --git a/couchpotato/core/media/movie/_base/static/list.js b/couchpotato/core/media/movie/_base/static/list.js index 1b11fab..341d234 100644 --- a/couchpotato/core/media/movie/_base/static/list.js +++ b/couchpotato/core/media/movie/_base/static/list.js @@ -273,8 +273,25 @@ var MovieList = new Class({ }) ).addClass('search'); + var available_chars; self.filter_menu.addEvent('open', function(){ self.navigation_search_input.focus(); + + // Get available chars and highlight + if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible())) + Api.request('movie.available_chars', { + 'data': Object.merge({ + 'status': self.options.status + }, self.filter), + 'onSuccess': function(json){ + available_chars = json.chars + + json.chars.split('').each(function(c){ + self.letters[c.capitalize()].addClass('available') + }) + + } + }); }); self.filter_menu.addLink( @@ -311,21 +328,6 @@ var MovieList = new Class({ }).inject(self.navigation_alpha); }); - // Get available chars and highlight - if(self.navigation.isDisplayed() || self.navigation.isVisible()) - Api.request('movie.available_chars', { - 'data': Object.merge({ - 'status': self.options.status - }, self.filter), - 'onSuccess': function(json){ - - json.chars.split('').each(function(c){ - self.letters[c.capitalize()].addClass('available') - }) - - } - }); - // Add menu or hide if (self.options.menu.length > 0) self.options.menu.each(function(menu_item){ @@ -566,7 +568,7 @@ var MovieList = new Class({ } self.store(json.movies); - self.addMovies(json.movies, json.total); + self.addMovies(json.movies, json.total || json.movies.length); if(self.scrollspy) { self.load_more.set('text', 'load more movies'); self.scrollspy.start(); diff --git a/couchpotato/core/media/movie/_base/static/movie.actions.js b/couchpotato/core/media/movie/_base/static/movie.actions.js index ea6f00f..7d8c37f 100644 --- a/couchpotato/core/media/movie/_base/static/movie.actions.js +++ b/couchpotato/core/media/movie/_base/static/movie.actions.js @@ -124,6 +124,46 @@ MA.Release = new Class({ else self.showHelper(); + App.addEvent('movie.searcher.ended.'+self.movie.data.id, function(notification){ + self.releases = null; + if(self.options_container){ + self.options_container.destroy(); + self.options_container = null; + } + }); + + }, + + show: function(e){ + var self = this; + if(e) + (e).preventDefault(); + + if(self.releases) + self.createReleases(); + else { + + self.movie.busy(true); + + Api.request('release.for_movie', { + 'data': { + 'id': self.movie.data.id + }, + 'onComplete': function(json){ + self.movie.busy(false, 1); + + if(json && json.releases){ + self.releases = json.releases; + self.createReleases(); + } + else + alert('Something went wrong, check the logs.'); + } + }); + + } + + }, createReleases: function(){ @@ -145,7 +185,7 @@ MA.Release = new Class({ new Element('span.provider', {'text': 'Provider'}) ).inject(self.release_container) - self.movie.data.releases.sortBy('-info.score').each(function(release){ + self.releases.each(function(release){ var status = Status.get(release.status_id), quality = Quality.getProfile(release.quality_id) || {}, @@ -211,13 +251,11 @@ MA.Release = new Class({ } }); - if(self.last_release){ + if(self.last_release) self.release_container.getElement('#release_'+self.last_release.id).addClass('last_release'); - } - if(self.next_release){ + if(self.next_release) self.release_container.getElement('#release_'+self.next_release.id).addClass('next_release'); - } if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){ @@ -230,7 +268,9 @@ MA.Release = new Class({ self.last_release ? new Element('a.button.orange', { 'text': 'the same release again', 'events': { - 'click': self.trySameRelease.bind(self) + 'click': function(){ + self.download(self.last_release); + } } }) : null, self.next_release && self.last_release ? new Element('span.or', { @@ -239,7 +279,9 @@ MA.Release = new Class({ self.next_release ? [new Element('a.button.green', { 'text': self.last_release ? 'another release' : 'the best release', 'events': { - 'click': self.tryNextRelease.bind(self) + 'click': function(){ + self.download(self.next_release); + } } }), new Element('span.or', { @@ -248,18 +290,15 @@ MA.Release = new Class({ ) } - } - - }, + self.last_release = null; + self.next_release = null; - show: function(e){ - var self = this; - if(e) - (e).preventDefault(); + } - self.createReleases(); + // Show it self.options_container.inject(self.movie, 'top'); self.movie.slide('in', self.options_container); + }, showHelper: function(e){ @@ -267,15 +306,29 @@ MA.Release = new Class({ if(e) (e).preventDefault(); - self.createReleases(); + var has_available = false, + has_snatched = false; - if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){ + self.movie.data.releases.each(function(release){ + if(has_available && has_snatched) return; + + var status = Status.get(release.status_id); + + if(['snatched', 'downloaded', 'seeding'].contains(status.identifier)) + has_snatched = true; + + if(['available'].contains(status.identifier)) + has_available = true; + + }); + + if(has_available || has_snatched){ self.trynext_container = new Element('div.buttons.trynext').inject(self.movie.info_container); self.trynext_container.adopt( - self.next_release ? [new Element('a.icon2.readd', { - 'text': self.last_release ? 'Download another release' : 'Download the best release', + has_available ? [new Element('a.icon2.readd', { + 'text': has_snatched ? 'Download another release' : 'Download the best release', 'events': { 'click': self.tryNextRelease.bind(self) } @@ -291,24 +344,7 @@ MA.Release = new Class({ new Element('a.icon2.completed', { 'text': 'mark this movie done', 'events': { - 'click': function(){ - Api.request('movie.delete', { - 'data': { - 'id': self.movie.get('id'), - 'delete_from': 'wanted' - }, - 'onComplete': function(){ - var movie = $(self.movie); - movie.set('tween', { - 'duration': 300, - 'onComplete': function(){ - self.movie.destroy() - } - }); - movie.tween('height', 0); - } - }); - } + 'click': self.markMovieDone.bind(self) } }) ) @@ -326,14 +362,14 @@ MA.Release = new Class({ var release_el = self.release_container.getElement('#release_'+release.id), icon = release_el.getElement('.download.icon2'); - self.movie.busy(true); + icon.addClass('icon spinner').removeClass('download'); Api.request('release.download', { 'data': { 'id': release.id }, 'onComplete': function(json){ - self.movie.busy(false); + icon.removeClass('icon spinner'); if(json.success) icon.addClass('completed'); @@ -365,24 +401,36 @@ MA.Release = new Class({ }, - tryNextRelease: function(movie_id){ + markMovieDone: function(){ var self = this; - self.createReleases(); - - if(self.last_release) - self.ignore(self.last_release); - - if(self.next_release) - self.download(self.next_release); + Api.request('movie.delete', { + 'data': { + 'id': self.movie.get('id'), + 'delete_from': 'wanted' + }, + 'onComplete': function(){ + var movie = $(self.movie); + movie.set('tween', { + 'duration': 300, + 'onComplete': function(){ + self.movie.destroy() + } + }); + movie.tween('height', 0); + } + }); }, - trySameRelease: function(movie_id){ + tryNextRelease: function(movie_id){ var self = this; - if(self.last_release) - self.download(self.last_release); + Api.request('movie.searcher.try_next', { + 'data': { + 'id': self.movie.get('id') + } + }); } @@ -581,7 +629,7 @@ MA.Edit = new Class({ 'text': profile.label ? profile.label : profile.data.label }).inject(self.profile_select); - if(self.movie.profile && self.movie.profile.data && self.movie.profile.data.id == profile_id) + if(self.movie.get('profile_id') == profile_id) self.profile_select.set('value', profile_id); }); @@ -780,16 +828,45 @@ MA.Files = new Class({ self.el = new Element('a.directory', { 'title': 'Available files', 'events': { - 'click': self.showFiles.bind(self) + 'click': self.show.bind(self) } }); }, - showFiles: function(e){ + show: function(e){ var self = this; (e).preventDefault(); + if(self.releases) + self.showFiles(); + else { + + self.movie.busy(true); + + Api.request('release.for_movie', { + 'data': { + 'id': self.movie.data.id + }, + 'onComplete': function(json){ + self.movie.busy(false, 1); + + if(json && json.releases){ + self.releases = json.releases; + self.showFiles(); + } + else + alert('Something went wrong, check the logs.'); + } + }); + + } + + }, + + showFiles: function(){ + var self = this; + if(!self.options_container){ self.options_container = new Element('div.options').adopt( self.files_container = new Element('div.files.table') @@ -802,7 +879,7 @@ MA.Files = new Class({ new Element('span.is_available', {'text': 'Available'}) ).inject(self.files_container) - Array.each(self.movie.data.releases, function(release){ + Array.each(self.releases, function(release){ var rel = new Element('div.release').inject(self.files_container); diff --git a/couchpotato/core/media/movie/_base/static/movie.js b/couchpotato/core/media/movie/_base/static/movie.js index 20956a0..363d860 100644 --- a/couchpotato/core/media/movie/_base/static/movie.js +++ b/couchpotato/core/media/movie/_base/static/movie.js @@ -58,7 +58,7 @@ var Movie = new Class({ }) }, - busy: function(set_busy){ + busy: function(set_busy, timeout){ var self = this; if(!set_busy){ @@ -72,9 +72,9 @@ var Movie = new Class({ self.spinner.el.destroy(); self.spinner = null; self.mask = null; - }, 400); + }, timeout || 400); } - }, 1000) + }, timeout || 1000) } else if(!self.spinner) { self.createMask(); @@ -179,20 +179,21 @@ var Movie = new Class({ }); // Add releases - self.data.releases.each(function(release){ - - var q = self.quality.getElement('.q_id'+ release.quality_id), - status = Status.get(release.status_id); - - if(!q && (status.identifier == 'snatched' || status.identifier == 'done')) - var q = self.addQuality(release.quality_id) - - if (status && q && !q.hasClass(status.identifier)){ - q.addClass(status.identifier); - q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label) - } - - }); + if(self.data.releases) + self.data.releases.each(function(release){ + + var q = self.quality.getElement('.q_id'+ release.quality_id), + status = Status.get(release.status_id); + + if(!q && (status.identifier == 'snatched' || status.identifier == 'done')) + var q = self.addQuality(release.quality_id) + + if (status && q && !q.hasClass(status.identifier)){ + q.addClass(status.identifier); + q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label) + } + + }); Object.each(self.options.actions, function(action, key){ self.action[key.toLowerCase()] = action = new self.options.actions[key](self) diff --git a/couchpotato/core/media/movie/_base/static/search.js b/couchpotato/core/media/movie/_base/static/search.js index e7fff40..0c9e08b 100644 --- a/couchpotato/core/media/movie/_base/static/search.js +++ b/couchpotato/core/media/movie/_base/static/search.js @@ -326,10 +326,10 @@ Block.Search.Item = new Class({ self.options_el.grab( new Element('div', { - 'class': self.info.in_wanted && self.info.in_wanted.profile || in_library ? 'in_library_wanted' : '' + 'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : '' }).adopt( - self.info.in_wanted && self.info.in_wanted.profile ? new Element('span.in_wanted', { - 'text': 'Already in wanted list: ' + self.info.in_wanted.profile.label + self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', { + 'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label') }) : (in_library ? new Element('span.in_library', { 'text': 'Already in library: ' + in_library.join(', ') }) : null), @@ -390,7 +390,7 @@ Block.Search.Item = new Class({ self.options_el.addClass('set'); if(categories.length == 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 && - !(self.info.in_wanted && self.info.in_wanted.profile || in_library)) + !(self.info.in_wanted && self.info.in_wanted.profile_id || in_library)) self.add(); } diff --git a/couchpotato/core/media/movie/library/movie/main.py b/couchpotato/core/media/movie/library/movie/main.py index be4b85e..6975f73 100644 --- a/couchpotato/core/media/movie/library/movie/main.py +++ b/couchpotato/core/media/movie/library/movie/main.py @@ -2,8 +2,8 @@ from couchpotato import get_session from couchpotato.core.event import addEvent, fireEventAsync, fireEvent from couchpotato.core.helpers.encoding import toUnicode, simplifyString from couchpotato.core.logger import CPLog -from couchpotato.core.settings.model import Library, LibraryTitle, File from couchpotato.core.media._base.library import LibraryBase +from couchpotato.core.settings.model import Library, LibraryTitle, File from string import ascii_letters import time import traceback @@ -71,6 +71,7 @@ class MovieLibraryPlugin(LibraryBase): library = db.query(Library).filter_by(identifier = identifier).first() done_status = fireEvent('status.get', 'done', single = True) + library_dict = None if library: library_dict = library.to_dict(self.default_dict) diff --git a/couchpotato/core/media/movie/searcher/main.py b/couchpotato/core/media/movie/searcher/main.py index 4f37e0b..37571fb 100644 --- a/couchpotato/core/media/movie/searcher/main.py +++ b/couchpotato/core/media/movie/searcher/main.py @@ -115,7 +115,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase): self.in_progress = False - def single(self, movie, search_protocols = None): + def single(self, movie, search_protocols = None, manual = False): # Find out search type try: @@ -126,7 +126,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase): done_status = fireEvent('status.get', 'done', single = True) - if not movie['profile'] or movie['status_id'] == done_status.get('id'): + if not movie['profile'] or (movie['status_id'] == done_status.get('id') and not manual): log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') return @@ -237,7 +237,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase): log.info('Ignored, score to low: %s', nzb['name']) continue - downloaded = fireEvent('searcher.download', data = nzb, movie = movie, single = True) + downloaded = fireEvent('searcher.download', data = nzb, movie = movie, manual = manual, single = True) if downloaded is True: ret = True break @@ -403,7 +403,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase): def tryNextReleaseView(self, id = None, **kwargs): - trynext = self.tryNextRelease(id) + trynext = self.tryNextRelease(id, manual = True) return { 'success': trynext @@ -411,14 +411,14 @@ class MovieSearcher(SearcherBase, MovieTypeBase): def tryNextRelease(self, movie_id, manual = False): - snatched_status, ignored_status = fireEvent('status.get', ['snatched', 'ignored'], single = True) + snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True) try: db = get_session() - rels = db.query(Release).filter_by( - status_id = snatched_status.get('id'), - movie_id = movie_id - ).all() + rels = db.query(Release) \ + .filter_by(movie_id = movie_id) \ + .filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \ + .all() for rel in rels: rel.status_id = ignored_status.get('id') @@ -426,7 +426,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase): movie_dict = fireEvent('movie.get', movie_id, single = True) log.info('Trying next release for: %s', getTitle(movie_dict['library'])) - fireEvent('movie.searcher.single', movie_dict) + fireEvent('movie.searcher.single', movie_dict, manual = manual) return True diff --git a/couchpotato/core/notifications/base.py b/couchpotato/core/notifications/base.py index 7418e1a..4c0d099 100644 --- a/couchpotato/core/notifications/base.py +++ b/couchpotato/core/notifications/base.py @@ -32,7 +32,9 @@ class Notification(Provider): addEvent(listener, self.createNotifyHandler(listener)) def createNotifyHandler(self, listener): - def notify(message = None, group = {}, data = None): + def notify(message = None, group = None, data = None): + if not group: group = {} + if not self.conf('on_snatch', default = True) and listener == 'movie.snatched': return return self._notify(message = message, data = data if data else group, listener = listener) @@ -45,9 +47,10 @@ class Notification(Provider): def _notify(self, *args, **kwargs): if self.isEnabled(): return self.notify(*args, **kwargs) + return False - def notify(self, message = '', data = {}, listener = None): - pass + def notify(self, message = '', data = None, listener = None): + if not data: data = {} def test(self, **kwargs): diff --git a/couchpotato/core/notifications/boxcar/main.py b/couchpotato/core/notifications/boxcar/main.py index b30d487..0fca749 100644 --- a/couchpotato/core/notifications/boxcar/main.py +++ b/couchpotato/core/notifications/boxcar/main.py @@ -10,7 +10,8 @@ class Boxcar(Notification): url = 'https://boxcar.io/devices/providers/7MNNXY3UIzVBwvzkKwkC/notifications' - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} try: message = message.strip() diff --git a/couchpotato/core/notifications/core/main.py b/couchpotato/core/notifications/core/main.py index 21cd197..a9a20b0 100644 --- a/couchpotato/core/notifications/core/main.py +++ b/couchpotato/core/notifications/core/main.py @@ -128,7 +128,8 @@ class CoreNotifier(Notification): Env.prop(prop_name, value = last_check) - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} db = get_session() @@ -149,7 +150,8 @@ class CoreNotifier(Notification): return True - def frontend(self, type = 'notification', data = {}, message = None): + def frontend(self, type = 'notification', data = None, message = None): + if not data: data = {} log.debug('Notifying frontend') diff --git a/couchpotato/core/notifications/email/main.py b/couchpotato/core/notifications/email/main.py index 21fcf15..f94688d 100644 --- a/couchpotato/core/notifications/email/main.py +++ b/couchpotato/core/notifications/email/main.py @@ -11,7 +11,8 @@ log = CPLog(__name__) class Email(Notification): - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} # Extract all the settings from settings from_address = self.conf('from') diff --git a/couchpotato/core/notifications/growl/main.py b/couchpotato/core/notifications/growl/main.py index caad661..dabeea0 100644 --- a/couchpotato/core/notifications/growl/main.py +++ b/couchpotato/core/notifications/growl/main.py @@ -43,7 +43,8 @@ class Growl(Notification): else: log.error('Failed register of growl: %s', traceback.format_exc()) - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} self.register() diff --git a/couchpotato/core/notifications/nmj/main.py b/couchpotato/core/notifications/nmj/main.py index 695f53b..1479fb1 100644 --- a/couchpotato/core/notifications/nmj/main.py +++ b/couchpotato/core/notifications/nmj/main.py @@ -23,16 +23,15 @@ class NMJ(Notification): def autoConfig(self, host = 'localhost', **kwargs): - database = '' mount = '' try: terminal = telnetlib.Telnet(host) except Exception: - log.error('Warning: unable to get a telnet session to %s', (host)) + log.error('Warning: unable to get a telnet session to %s', host) return self.failed() - log.debug('Connected to %s via telnet', (host)) + log.debug('Connected to %s via telnet', host) terminal.read_until('sh-3.00# ') terminal.write('cat /tmp/source\n') terminal.write('cat /tmp/netshare\n') @@ -46,7 +45,7 @@ class NMJ(Notification): device = match.group(2) log.info('Found NMJ database %s on device %s', (database, device)) else: - log.error('Could not get current NMJ database on %s, NMJ is probably not running!', (host)) + log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host) return self.failed() if device.startswith('NETWORK_SHARE/'): @@ -54,7 +53,7 @@ class NMJ(Notification): if match: mount = match.group().replace('127.0.0.1', host) - log.info('Found mounting url on the Popcorn Hour in configuration: %s', (mount)) + log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount) else: log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url') return self.failed() @@ -65,17 +64,18 @@ class NMJ(Notification): 'mount': mount, } - def addToLibrary(self, message = None, group = {}): + def addToLibrary(self, message = None, group = None): if self.isDisabled(): return + if not group: group = {} host = self.conf('host') mount = self.conf('mount') database = self.conf('database') if mount: - log.debug('Try to mount network drive via url: %s', (mount)) + log.debug('Try to mount network drive via url: %s', mount) try: - data = self.urlopen(mount) + self.urlopen(mount) except: return False @@ -98,11 +98,11 @@ class NMJ(Notification): et = etree.fromstring(response) result = et.findtext('returnValue') except SyntaxError, e: - log.error('Unable to parse XML returned from the Popcorn Hour: %s', (e)) + log.error('Unable to parse XML returned from the Popcorn Hour: %s', e) return False if int(result) > 0: - log.error('Popcorn Hour returned an errorcode: %s', (result)) + log.error('Popcorn Hour returned an errorcode: %s', result) return False else: log.info('NMJ started background scan') diff --git a/couchpotato/core/notifications/notifo/main.py b/couchpotato/core/notifications/notifo/main.py index 6e4d7ad..2d56ed7 100644 --- a/couchpotato/core/notifications/notifo/main.py +++ b/couchpotato/core/notifications/notifo/main.py @@ -12,7 +12,8 @@ class Notifo(Notification): url = 'https://api.notifo.com/v1/send_notification' - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} try: params = { diff --git a/couchpotato/core/notifications/notifymyandroid/main.py b/couchpotato/core/notifications/notifymyandroid/main.py index 2c4ac90..92e5956 100644 --- a/couchpotato/core/notifications/notifymyandroid/main.py +++ b/couchpotato/core/notifications/notifymyandroid/main.py @@ -8,19 +8,17 @@ log = CPLog(__name__) class NotifyMyAndroid(Notification): - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} nma = pynma.PyNMA() keys = splitString(self.conf('api_key')) nma.addkey(keys) nma.developerkey(self.conf('dev_key')) - # hacky fix for the event type - # as it seems to be part of the message now - self.event = message.split(' ')[0] response = nma.push( application = self.default_title, - event = self.event, + event = message.split(' ')[0], description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1 diff --git a/couchpotato/core/notifications/plex/main.py b/couchpotato/core/notifications/plex/main.py index 02c9b30..f6088f5 100644 --- a/couchpotato/core/notifications/plex/main.py +++ b/couchpotato/core/notifications/plex/main.py @@ -17,8 +17,9 @@ class Plex(Notification): super(Plex, self).__init__() addEvent('renamer.after', self.addToLibrary) - def addToLibrary(self, message = None, group = {}): + def addToLibrary(self, message = None, group = None): if self.isDisabled(): return + if not group: group = {} log.info('Sending notification to Plex') hosts = self.getHosts(port = 32400) @@ -37,7 +38,7 @@ class Plex(Notification): for s in sections: if s.getAttribute('type') in source_type: url = refresh_url % s.getAttribute('key') - x = self.urlopen(url) + self.urlopen(url) except: log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1))) @@ -45,7 +46,8 @@ class Plex(Notification): return True - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} hosts = self.getHosts(port = 3000) successful = 0 diff --git a/couchpotato/core/notifications/prowl/main.py b/couchpotato/core/notifications/prowl/main.py index e5c4678..a8a3dda 100644 --- a/couchpotato/core/notifications/prowl/main.py +++ b/couchpotato/core/notifications/prowl/main.py @@ -12,7 +12,8 @@ class Prowl(Notification): 'api': 'https://api.prowlapp.com/publicapi/add' } - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} data = { 'apikey': self.conf('api_key'), diff --git a/couchpotato/core/notifications/pushalot/main.py b/couchpotato/core/notifications/pushalot/main.py index 4c5e76c..4e3b6e7 100644 --- a/couchpotato/core/notifications/pushalot/main.py +++ b/couchpotato/core/notifications/pushalot/main.py @@ -11,7 +11,8 @@ class Pushalot(Notification): 'api': 'https://pushalot.com/api/sendmessage' } - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} data = { 'AuthorizationToken': self.conf('auth_token'), diff --git a/couchpotato/core/notifications/pushover/main.py b/couchpotato/core/notifications/pushover/main.py index ea5e774..76f730b 100644 --- a/couchpotato/core/notifications/pushover/main.py +++ b/couchpotato/core/notifications/pushover/main.py @@ -11,7 +11,8 @@ class Pushover(Notification): app_token = 'YkxHMYDZp285L265L3IwH3LmzkTaCy' - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} http_handler = HTTPSConnection("api.pushover.net:443") diff --git a/couchpotato/core/notifications/synoindex/main.py b/couchpotato/core/notifications/synoindex/main.py index 315520e..0f7775d 100644 --- a/couchpotato/core/notifications/synoindex/main.py +++ b/couchpotato/core/notifications/synoindex/main.py @@ -15,8 +15,9 @@ class Synoindex(Notification): super(Synoindex, self).__init__() addEvent('renamer.after', self.addToLibrary) - def addToLibrary(self, message = None, group = {}): + def addToLibrary(self, message = None, group = None): if self.isDisabled(): return + if not group: group = {} command = [self.index_path, '-A', group.get('destination_dir')] log.info('Executing synoindex command: %s ', command) @@ -27,9 +28,8 @@ class Synoindex(Notification): return True except OSError, e: log.error('Unable to run synoindex: %s', e) - return False - return True + return False def test(self, **kwargs): return { diff --git a/couchpotato/core/notifications/toasty/main.py b/couchpotato/core/notifications/toasty/main.py index 79b021e..c65b6b4 100644 --- a/couchpotato/core/notifications/toasty/main.py +++ b/couchpotato/core/notifications/toasty/main.py @@ -11,7 +11,8 @@ class Toasty(Notification): 'api': 'http://api.supertoasty.com/notify/%s?%s' } - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} data = { 'title': self.default_title, diff --git a/couchpotato/core/notifications/trakt/main.py b/couchpotato/core/notifications/trakt/main.py index 86d4708..99d5553 100644 --- a/couchpotato/core/notifications/trakt/main.py +++ b/couchpotato/core/notifications/trakt/main.py @@ -13,7 +13,8 @@ class Trakt(Notification): listen_to = ['movie.downloaded'] - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} post_data = { 'username': self.conf('automation_username'), diff --git a/couchpotato/core/notifications/twitter/main.py b/couchpotato/core/notifications/twitter/main.py index facc36b..ad4fc31 100644 --- a/couchpotato/core/notifications/twitter/main.py +++ b/couchpotato/core/notifications/twitter/main.py @@ -4,7 +4,8 @@ from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from couchpotato.environment import Env -from pytwitter import Api, parse_qsl +from pytwitter import Api +from urlparse import parse_qsl import oauth2 log = CPLog(__name__) @@ -29,7 +30,8 @@ class Twitter(Notification): addApiView('notify.%s.auth_url' % self.getName().lower(), self.getAuthorizationUrl) addApiView('notify.%s.credentials' % self.getName().lower(), self.getCredentials) - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} api = Api(self.consumer_key, self.consumer_secret, self.conf('access_token_key'), self.conf('access_token_secret')) diff --git a/couchpotato/core/notifications/xbmc/main.py b/couchpotato/core/notifications/xbmc/main.py index 34a9c1d..dc185c4 100755 --- a/couchpotato/core/notifications/xbmc/main.py +++ b/couchpotato/core/notifications/xbmc/main.py @@ -17,7 +17,8 @@ class XBMC(Notification): use_json_notifications = {} http_time_between_calls = 0 - def notify(self, message = '', data = {}, listener = None): + def notify(self, message = '', data = None, listener = None): + if not data: data = {} hosts = splitString(self.conf('host')) @@ -53,9 +54,9 @@ class XBMC(Notification): try: for result in response: - if (result.get('result') and result['result'] == 'OK'): + if result.get('result') and result['result'] == 'OK': successful += 1 - elif (result.get('error')): + elif result.get('error'): log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) except: @@ -72,7 +73,7 @@ class XBMC(Notification): ('JSONRPC.Version', {}) ]) for result in response: - if (result.get('result') and type(result['result']['version']).__name__ == 'int'): + if result.get('result') and type(result['result']['version']).__name__ == 'int': # only v2 and v4 return an int object # v6 (as of XBMC v12(Frodo)) is required to send notifications xbmc_rpc_version = str(result['result']['version']) @@ -85,15 +86,15 @@ class XBMC(Notification): # send the text message resp = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message}) for result in resp: - if (result.get('result') and result['result'] == 'OK'): + if result.get('result') and result['result'] == 'OK': log.debug('Message delivered successfully!') success = True break - elif (result.get('error')): + elif result.get('error'): log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) break - elif (result.get('result') and type(result['result']['version']).__name__ == 'dict'): + elif result.get('result') and type(result['result']['version']).__name__ == 'dict': # XBMC JSON-RPC v6 returns an array object containing # major, minor and patch number xbmc_rpc_version = str(result['result']['version']['major']) @@ -108,16 +109,16 @@ class XBMC(Notification): # send the text message resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})]) for result in resp: - if (result.get('result') and result['result'] == 'OK'): + if result.get('result') and result['result'] == 'OK': log.debug('Message delivered successfully!') success = True break - elif (result.get('error')): + elif result.get('error'): log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) break # error getting version info (we do have contact with XBMC though) - elif (result.get('error')): + elif result.get('error'): log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) log.debug('Use JSON notifications: %s ', self.use_json_notifications) diff --git a/couchpotato/core/plugins/base.py b/couchpotato/core/plugins/base.py index 89ef29b..b9ec0c0 100644 --- a/couchpotato/core/plugins/base.py +++ b/couchpotato/core/plugins/base.py @@ -2,7 +2,7 @@ from StringIO import StringIO from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \ toUnicode -from couchpotato.core.helpers.variable import getExt, md5 +from couchpotato.core.helpers.variable import getExt, md5, isLocalIP from couchpotato.core.logger import CPLog from couchpotato.environment import Env from multipartpost import MultipartPostHandler @@ -26,11 +26,13 @@ log = CPLog(__name__) class Plugin(object): _class_name = None + plugin_path = None enabled_option = 'enabled' auto_register_static = True _needs_shutdown = False + _running = None user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20130519 Firefox/24.0' http_last_use = {} @@ -81,7 +83,7 @@ class Plugin(object): class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() # View path - path = 'api/%s/static/%s/' % (Env.setting('api_key'), class_name) + path = 'static/plugin/%s/' % (class_name) # Add handler to Tornado Env.get('app').add_handlers(".*$", [(Env.get('web_base') + path + '(.*)', StaticFileHandler, {'path': static_folder})]) @@ -140,7 +142,7 @@ class Plugin(object): if self.http_failed_disabled[host] > (time.time() - 900): log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host) if not show_error: - raise + raise Exception('Disabled calls to %s for 15 minutes because so many failed requests') else: return '' else: @@ -203,7 +205,7 @@ class Plugin(object): self.http_failed_request[host] += 1 # Disable temporarily - if self.http_failed_request[host] > 5: + if self.http_failed_request[host] > 5 and not isLocalIP(host): self.http_failed_disabled[host] = time.time() except: @@ -257,8 +259,8 @@ class Plugin(object): def getCache(self, cache_key, url = None, **kwargs): - cache_key = md5(ss(cache_key)) - cache = Env.get('cache').get(cache_key) + cache_key_md5 = md5(ss(cache_key)) + cache = Env.get('cache').get(cache_key_md5) if cache: if not Env.get('dev'): log.debug('Getting cache %s', cache_key) return cache @@ -282,8 +284,9 @@ class Plugin(object): return '' def setCache(self, cache_key, value, timeout = 300): + cache_key_md5 = md5(ss(cache_key)) log.debug('Setting cache %s', cache_key) - Env.get('cache').set(cache_key, value, timeout) + Env.get('cache').set(cache_key_md5, value, timeout) return value def createNzbName(self, data, movie): @@ -292,9 +295,9 @@ class Plugin(object): def createFileName(self, data, filedata, movie): name = os.path.join(self.createNzbName(data, movie)) - if data.get('type') == 'nzb' and 'DOCTYPE nzb' not in filedata and '' not in filedata: + if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '' not in filedata: return '%s.%s' % (name, 'rar') - return '%s.%s' % (name, data.get('type')) + return '%s.%s' % (name, data.get('protocol')) def cpTag(self, movie): if Env.setting('enabled', 'renamer'): @@ -306,4 +309,4 @@ class Plugin(object): return not self.isEnabled() def isEnabled(self): - return self.conf(self.enabled_option) or self.conf(self.enabled_option) == None + return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None diff --git a/couchpotato/core/plugins/browser/main.py b/couchpotato/core/plugins/browser/main.py index 6b989a0..380e682 100644 --- a/couchpotato/core/plugins/browser/main.py +++ b/couchpotato/core/plugins/browser/main.py @@ -12,7 +12,7 @@ if os.name == 'nt': except: # todo:: subclass ImportError for missing dependencies, vs. broken plugins? raise ImportError("Missing the win32file module, which is a part of the prerequisite \ - pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/"); + pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/") else: import win32file #@UnresolvedImport diff --git a/couchpotato/core/plugins/dashboard/main.py b/couchpotato/core/plugins/dashboard/main.py index 8e96d2d..2da4d8c 100644 --- a/couchpotato/core/plugins/dashboard/main.py +++ b/couchpotato/core/plugins/dashboard/main.py @@ -4,8 +4,9 @@ from couchpotato.core.event import fireEvent from couchpotato.core.helpers.variable import splitString, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Media +from couchpotato.core.settings.model import Movie, Library, LibraryTitle from sqlalchemy.orm import joinedload_all +from sqlalchemy.sql.expression import asc import random as rndm import time @@ -40,67 +41,81 @@ class Dashboard(Plugin): profile_pre[profile.get('id')] = contains - # Get all active movies - active_status, snatched_status, downloaded_status, available_status = fireEvent('status.get', ['active', 'snatched', 'downloaded', 'available'], single = True) - subq = db.query(Media).filter(Media.status_id == active_status.get('id')).subquery() - - q = db.query(Media).join((subq, subq.c.id == Media.id)) \ - .options(joinedload_all('releases')) \ - .options(joinedload_all('profile')) \ - .options(joinedload_all('library.titles')) \ - .options(joinedload_all('library.files')) \ - .options(joinedload_all('status')) \ - .options(joinedload_all('files')) - # Add limit limit = 12 if limit_offset: splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset limit = tryInt(splt[0]) - all_movies = q.all() + # Get all active movies + active_status = fireEvent('status.get', ['active'], single = True) + q = db.query(Movie) \ + .join(Library) \ + .filter(Movie.status_id == active_status.get('id')) \ + .with_entities(Movie.id, Movie.profile_id, Library.info, Library.year) \ + .group_by(Movie.id) + + if not random: + q = q.join(LibraryTitle) \ + .filter(LibraryTitle.default == True) \ + .order_by(asc(LibraryTitle.simple_title)) + + active = q.all() + movies = [] - if random: - rndm.shuffle(all_movies) + if len(active) > 0: + + # Do the shuffle + if random: + rndm.shuffle(active) + + movie_ids = [] + for movie in active: + movie_id, profile_id, info, year = movie + + pp = profile_pre.get(profile_id) + if not pp: continue + + eta = info.get('release_date', {}) or {} + coming_soon = False + + # Theater quality + if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, year, single = True): + coming_soon = True + elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, year, single = True): + coming_soon = True + + if coming_soon: + + # Don't list older movies + if ((not late and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or + (late and (eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))): + movie_ids.append(movie_id) + + if len(movie_ids) >= limit: + break + + if len(movie_ids) > 0: + + # Get all movie information + movies_raw = db.query(Movie) \ + .options(joinedload_all('library.titles')) \ + .options(joinedload_all('library.files')) \ + .options(joinedload_all('files')) \ + .filter(Movie.id.in_(movie_ids)) \ + .all() + + # Create dict by movie id + movie_dict = {} + for movie in movies_raw: + movie_dict[movie.id] = movie + + for movie_id in movie_ids: + movies.append(movie_dict[movie_id].to_dict({ + 'library': {'titles': {}, 'files':{}}, + 'files': {}, + })) - movies = [] - for movie in all_movies: - pp = profile_pre.get(movie.profile.id) - eta = movie.library.info.get('release_date', {}) or {} - coming_soon = False - - # Theater quality - if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, movie.library.year, single = True): - coming_soon = True - if pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, movie.library.year, single = True): - coming_soon = True - - # Skip if movie is snatched/downloaded/available - skip = False - for release in movie.releases: - if release.status_id in [snatched_status.get('id'), downloaded_status.get('id'), available_status.get('id')]: - skip = True - break - if skip: - continue - - if coming_soon: - temp = movie.to_dict({ - 'profile': {'types': {}}, - 'releases': {'files':{}, 'info': {}}, - 'library': {'titles': {}, 'files':{}}, - 'files': {}, - }) - - # Don't list older movies - if ((not late and ((not eta.get('dvd') and not eta.get('theater')) or (eta.get('dvd') and eta.get('dvd') > (now - 2419200)))) or \ - (late and (eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))): - movies.append(temp) - - if len(movies) >= limit: - break - - db.expire_all() return { 'success': True, 'empty': len(movies) == 0, diff --git a/couchpotato/core/plugins/file/main.py b/couchpotato/core/plugins/file/main.py index cdd67f5..238bc76 100644 --- a/couchpotato/core/plugins/file/main.py +++ b/couchpotato/core/plugins/file/main.py @@ -71,7 +71,7 @@ class FileManager(Plugin): db = get_session() for root, dirs, walk_files in os.walk(Env.get('cache_dir')): for filename in walk_files: - if root == python_cache or 'minified' in filename or 'version' in filename or 'temp_updater' in root: continue + if root == python_cache or 'minified' in root or 'version' in filename or 'temp_updater' in root: continue file_path = os.path.join(root, filename) f = db.query(File).filter(File.path == toUnicode(file_path)).first() if not f: @@ -83,7 +83,8 @@ class FileManager(Plugin): Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': Env.get('cache_dir')})]) - def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = {}): + def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): + if not urlopen_kwargs: urlopen_kwargs = {} if not dest: # to Cache dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url))) @@ -100,7 +101,9 @@ class FileManager(Plugin): self.createFile(dest, filedata, binary = True) return dest - def add(self, path = '', part = 1, type_tuple = (), available = 1, properties = {}): + def add(self, path = '', part = 1, type_tuple = (), available = 1, properties = None): + if not properties: properties = {} + type_id = self.getType(type_tuple).get('id') db = get_session() diff --git a/couchpotato/core/plugins/log/main.py b/couchpotato/core/plugins/log/main.py index 18a78b9..dc8f740 100644 --- a/couchpotato/core/plugins/log/main.py +++ b/couchpotato/core/plugins/log/main.py @@ -90,7 +90,6 @@ class Logging(Plugin): if not os.path.isfile(path): break - reversed_lines = [] f = open(path, 'r') reversed_lines = toUnicode(f.read()).split('[0m\n') reversed_lines.reverse() diff --git a/couchpotato/core/plugins/manage/main.py b/couchpotato/core/plugins/manage/main.py index 516cb88..702b129 100644 --- a/couchpotato/core/plugins/manage/main.py +++ b/couchpotato/core/plugins/manage/main.py @@ -26,7 +26,8 @@ class Manage(Plugin): addEvent('manage.diskspace', self.getDiskSpace) # Add files after renaming - def after_rename(message = None, group = {}): + def after_rename(message = None, group = None): + if not group: group = {} return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files']) addEvent('renamer.after', after_rename, priority = 110) @@ -117,7 +118,9 @@ class Manage(Plugin): fireEvent('movie.delete', movie_id = done_movie['id'], delete_from = 'all') else: - for release in done_movie.get('releases', []): + releases = fireEvent('release.for_movie', id = done_movie.get('id'), single = True) + + for release in releases: if len(release.get('files', [])) == 0: fireEvent('release.delete', release['id']) else: @@ -128,9 +131,9 @@ class Manage(Plugin): break # Check if there are duplicate releases (different quality) use the last one, delete the rest - if len(done_movie.get('releases', [])) > 1: + if len(releases) > 1: used_files = {} - for release in done_movie.get('releases', []): + for release in releases: for release_file in release.get('files', []): already_used = used_files.get(release_file['path']) @@ -169,6 +172,7 @@ class Manage(Plugin): self.in_progress = False def createAddToLibrary(self, folder, added_identifiers = []): + def addToLibrary(group, total_found, to_go): if self.in_progress[folder]['total'] is None: self.in_progress[folder] = { @@ -184,7 +188,7 @@ class Manage(Plugin): fireEvent('release.add', group = group) fireEventAsync('library.update.movie', identifier = identifier, on_complete = self.createAfterUpdate(folder, identifier)) else: - self.in_progress[folder]['to_go'] = self.in_progress[folder]['to_go'] - 1 + self.in_progress[folder]['to_go'] -= 1 return addToLibrary @@ -195,7 +199,7 @@ class Manage(Plugin): if not self.in_progress or self.shuttingDown(): return - self.in_progress[folder]['to_go'] = self.in_progress[folder]['to_go'] - 1 + self.in_progress[folder]['to_go'] -= 1 total = self.in_progress[folder]['total'] movie_dict = fireEvent('movie.get', identifier, single = True) diff --git a/couchpotato/core/plugins/profile/main.py b/couchpotato/core/plugins/profile/main.py index c265fa9..9ff3ead 100644 --- a/couchpotato/core/plugins/profile/main.py +++ b/couchpotato/core/plugins/profile/main.py @@ -5,6 +5,7 @@ from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.settings.model import Profile, ProfileType, Media +from sqlalchemy.orm import joinedload_all log = CPLog(__name__) @@ -55,7 +56,9 @@ class ProfilePlugin(Plugin): def all(self): db = get_session() - profiles = db.query(Profile).all() + profiles = db.query(Profile) \ + .options(joinedload_all('types')) \ + .all() temp = [] for profile in profiles: @@ -104,7 +107,9 @@ class ProfilePlugin(Plugin): def default(self): db = get_session() - default = db.query(Profile).first() + default = db.query(Profile) \ + .options(joinedload_all('types')) \ + .first() default_dict = default.to_dict(self.to_dict) db.expire_all() @@ -155,7 +160,7 @@ class ProfilePlugin(Plugin): def fill(self): - db = get_session(); + db = get_session() profiles = [{ 'label': 'Best', diff --git a/couchpotato/core/plugins/quality/main.py b/couchpotato/core/plugins/quality/main.py index 67c7f00..1149c03 100644 --- a/couchpotato/core/plugins/quality/main.py +++ b/couchpotato/core/plugins/quality/main.py @@ -102,7 +102,7 @@ class QualityPlugin(Plugin): def fill(self): - db = get_session(); + db = get_session() order = 0 for q in self.qualities: @@ -152,39 +152,41 @@ class QualityPlugin(Plugin): return True - def guess(self, files, extra = {}): + def guess(self, files, extra = None): + if not extra: extra = {} # Create hash for cache - hash = md5(str([f.replace('.' + getExt(f), '') for f in files])) - cached = self.getCache(hash) - if cached and extra is {}: return cached + cache_key = md5(str([f.replace('.' + getExt(f), '') for f in files])) + cached = self.getCache(cache_key) + if cached and len(extra) == 0: return cached + qualities = self.all() for cur_file in files: words = re.split('\W+', cur_file.lower()) found = {} - for quality in self.all(): + for quality in qualities: contains = self.containsTag(quality, words, cur_file) if contains: found[quality['identifier']] = True - for quality in self.all(): + for quality in qualities: # Check identifier if quality['identifier'] in words: if len(found) == 0 or len(found) == 1 and found.get(quality['identifier']): log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file)) - return self.setCache(hash, quality) + return self.setCache(cache_key, quality) # Check alt and tags contains = self.containsTag(quality, words, cur_file) if contains: - return self.setCache(hash, quality) + return self.setCache(cache_key, quality) # Try again with loose testing - quality = self.guessLoose(hash, files = files, extra = extra) + quality = self.guessLoose(cache_key, files = files, extra = extra) if quality: - return self.setCache(hash, quality) + return self.setCache(cache_key, quality) log.debug('Could not identify quality for: %s', files) return None @@ -204,7 +206,7 @@ class QualityPlugin(Plugin): return - def guessLoose(self, hash, files = None, extra = None): + def guessLoose(self, cache_key, files = None, extra = None): if extra: for quality in self.all(): @@ -212,15 +214,15 @@ class QualityPlugin(Plugin): # Check width resolution, range 20 if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20): log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0))) - return self.setCache(hash, quality) + return self.setCache(cache_key, quality) # Check height resolution, range 20 if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20): log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0))) - return self.setCache(hash, quality) + return self.setCache(cache_key, quality) if 480 <= extra.get('resolution_width', 0) <= 720: log.debug('Found as dvdrip') - return self.setCache(hash, self.single('dvdrip')) + return self.setCache(cache_key, self.single('dvdrip')) return None diff --git a/couchpotato/core/plugins/release/main.py b/couchpotato/core/plugins/release/main.py index 34668cc..d1fbbd6 100644 --- a/couchpotato/core/plugins/release/main.py +++ b/couchpotato/core/plugins/release/main.py @@ -6,8 +6,10 @@ from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.scanner.main import Scanner from couchpotato.core.settings.model import File, Release as Relea, Media +from sqlalchemy.orm import joinedload_all from sqlalchemy.sql.expression import and_, or_ import os +import traceback log = CPLog(__name__) @@ -35,7 +37,14 @@ class Release(Plugin): 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) + addApiView('release.for_movie', self.forMovieView, docs = { + 'desc': 'Returns all releases for a movie. Ordered by score(desc)', + 'params': { + 'id': {'type': 'id', 'desc': 'ID of the movie'} + } + }) + addEvent('release.for_movie', self.forMovie) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) @@ -88,8 +97,8 @@ class Release(Plugin): added_files = db.query(File).filter(or_(*[File.id == x for x in added_files])).all() rel.files.extend(added_files) db.commit() - except Exception, e: - log.debug('Failed to attach "%s" to release: %s', (cur_file, e)) + except: + log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc())) fireEvent('movie.restatus', movie.id) @@ -174,7 +183,11 @@ class Release(Plugin): # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) - if item['protocol'] != 'torrent_magnet': + if not item.get('protocol'): + item['protocol'] = item['type'] + item['type'] = 'movie' + + if item.get('protocol') != 'torrent_magnet': item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download success = fireEvent('searcher.download', data = item, movie = rel.movie.to_dict({ @@ -203,3 +216,28 @@ class Release(Plugin): return { 'success': False } + + def forMovie(self, id = None): + + db = get_session() + + releases_raw = db.query(Relea) \ + .options(joinedload_all('info')) \ + .options(joinedload_all('files')) \ + .filter(Relea.movie_id == id) \ + .all() + + releases = [r.to_dict({'info':{}, 'files':{}}) for r in releases_raw] + releases = sorted(releases, key = lambda k: k['info'].get('score', 0), reverse = True) + + return releases + + def forMovieView(self, id = None, **kwargs): + + releases = self.forMovie(id) + + return { + 'releases': releases, + 'success': True + } + diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer/main.py index 0a11a5d..82434f5 100644 --- a/couchpotato/core/plugins/renamer/main.py +++ b/couchpotato/core/plugins/renamer/main.py @@ -9,8 +9,7 @@ from couchpotato.core.plugins.base import Plugin from couchpotato.core.settings.model import Library, File, Profile, Release, \ ReleaseInfo from couchpotato.environment import Env -from unrar2 import RarFile, RarInfo -from unrar2.rar_exceptions import * +from unrar2 import RarFile import errno import fnmatch import os @@ -62,10 +61,10 @@ class Renamer(Plugin): def scanView(self, **kwargs): - async = tryInt(kwargs.get('async', None)) - movie_folder = kwargs.get('movie_folder', None) - downloader = kwargs.get('downloader', None) - download_id = kwargs.get('download_id', None) + async = tryInt(kwargs.get('async', 0)) + movie_folder = kwargs.get('movie_folder') + downloader = kwargs.get('downloader') + download_id = kwargs.get('download_id') download_info = {'folder': movie_folder} if movie_folder else None if download_info: @@ -98,7 +97,7 @@ class Renamer(Plugin): elif self.conf('from') in self.conf('to'): log.error('The "to" can\'t be inside of the "from" folder. You\'ll get an infinite loop.') return - elif (movie_folder and movie_folder in [self.conf('to'), self.conf('from')]): + elif movie_folder and movie_folder in [self.conf('to'), self.conf('from')]: log.error('The "to" and "from" folders can\'t be inside of or the same as the provided movie folder.') return @@ -131,8 +130,8 @@ class Renamer(Plugin): # Unpack any archives extr_files = None if self.conf('unrar'): - folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files, \ - cleanup = self.conf('cleanup') and not self.downloadIsTorrent(download_info)) + folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files, + cleanup = self.conf('cleanup') and not self.downloadIsTorrent(download_info)) groups = fireEvent('scanner.scan', folder = folder if folder else self.conf('from'), files = files, download_info = download_info, return_ignored = False, single = True) @@ -347,7 +346,7 @@ class Renamer(Plugin): profile = db.query(Profile).filter_by(core = True, label = group['meta_data']['quality']['label']).first() fireEvent('movie.add', params = {'identifier': group['library']['identifier'], 'profile_id': profile.id}, search_after = False) db.expire_all() - library = db.query(Library).filter_by(identifier = group['library']['identifier']).first() + library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first() for movie in library_ent.media: @@ -496,7 +495,9 @@ class Renamer(Plugin): self.renaming_started = False - def getRenameExtras(self, extra_type = '', replacements = {}, folder_name = '', file_name = '', destination = '', group = {}, current_file = '', remove_multiple = False): + def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False): + if not group: group = {} + if not replacements: replacements = {} replacements = replacements.copy() rename_files = {} @@ -517,7 +518,7 @@ class Renamer(Plugin): def tagDir(self, group, tag): ignore_file = None - if isinstance(group, (dict)): + if isinstance(group, dict): for movie_file in sorted(list(group['files']['movie'])): ignore_file = '%s.%s.ignore' % (os.path.splitext(movie_file)[0], tag) break @@ -603,9 +604,9 @@ Remove it if you want it to be renamed (again, or at least let it try again) return True def doReplace(self, string, replacements, remove_multiple = False): - ''' + """ replace confignames with the real thing - ''' + """ replacements = replacements.copy() if remove_multiple: @@ -844,11 +845,12 @@ Remove it if you want it to be renamed (again, or at least let it try again) def statusInfoComplete(self, item): return item['id'] and item['downloader'] and item['folder'] - + def movieInFromFolder(self, movie_folder): return movie_folder and self.conf('from') in movie_folder or not movie_folder - def extractFiles(self, folder = None, movie_folder = None, files = [], cleanup = False): + def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False): + if not files: files = [] # RegEx for finding rar files archive_regex = '(?P^(?P(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)' @@ -873,7 +875,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) #Extract all found archives for archive in archives: # Check if it has already been processed by CPS - if (self.hastagDir(os.path.dirname(archive['file']))): + if self.hastagDir(os.path.dirname(archive['file'])): continue # Find all related archive files @@ -942,7 +944,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) self.makeDir(os.path.dirname(move_to)) self.moveFile(leftoverfile, move_to, cleanup) except Exception, e: - log.error('Failed moving left over file %s to %s: %s %s',(leftoverfile, move_to, e, traceback.format_exc())) + log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc())) # As we probably tried to overwrite the nfo file, check if it exists and then remove the original if os.path.isfile(move_to): if cleanup: @@ -965,9 +967,9 @@ Remove it if you want it to be renamed (again, or at least let it try again) if extr_files: files.extend(extr_files) - # Cleanup files and folder if movie_folder was not provided + # Cleanup files and folder if movie_folder was not provided if not movie_folder: files = [] folder = None - return (folder, movie_folder, files, extr_files) + return folder, movie_folder, files, extr_files diff --git a/couchpotato/core/plugins/scanner/main.py b/couchpotato/core/plugins/scanner/main.py index 7e34588..706f6c7 100644 --- a/couchpotato/core/plugins/scanner/main.py +++ b/couchpotato/core/plugins/scanner/main.py @@ -429,7 +429,7 @@ class Scanner(Plugin): if len(processed_movies) > 0: log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) else: - log.debug('Found no movies in the folder %s', (folder)) + log.debug('Found no movies in the folder %s', folder) return processed_movies @@ -508,6 +508,7 @@ class Scanner(Plugin): detected_languages = {} # Subliminal scanner + paths = None try: paths = group['files']['movie'] scan_result = [] @@ -560,12 +561,14 @@ class Scanner(Plugin): break # Check and see if nfo contains the imdb-id + nfo_file = None if not imdb_id: try: - for nfo_file in files['nfo']: - imdb_id = getImdb(nfo_file) + for nf in files['nfo']: + imdb_id = getImdb(nf) if imdb_id: - log.debug('Found movie via nfo file: %s', nfo_file) + log.debug('Found movie via nfo file: %s', nf) + nfo_file = nf break except: pass @@ -585,26 +588,16 @@ class Scanner(Plugin): # Check if path is already in db if not imdb_id: db = get_session() - for cur_file in files['movie']: - f = db.query(File).filter_by(path = toUnicode(cur_file)).first() + for cf in files['movie']: + f = db.query(File).filter_by(path = toUnicode(cf)).first() try: imdb_id = f.library[0].identifier - log.debug('Found movie via database: %s', cur_file) + log.debug('Found movie via database: %s', cf) + cur_file = cf break except: pass - # Search based on OpenSubtitleHash - if not imdb_id and not group['is_dvd']: - for cur_file in files['movie']: - movie = fireEvent('movie.by_hash', file = cur_file, merge = True) - - if len(movie) > 0: - imdb_id = movie[0].get('imdb') - if imdb_id: - log.debug('Found movie via OpenSubtitleHash: %s', cur_file) - break - # Search based on identifiers if not imdb_id: for identifier in group['identifiers']: @@ -691,10 +684,9 @@ class Scanner(Plugin): return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn'] files = set(filter(test, files)) - images = {} - - # Fanart - images['backdrop'] = set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files)) + images = { + 'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files)) + } # Rest images['rest'] = files - images['backdrop'] diff --git a/couchpotato/core/plugins/score/main.py b/couchpotato/core/plugins/score/main.py index cc87c9a..5f9da1a 100644 --- a/couchpotato/core/plugins/score/main.py +++ b/couchpotato/core/plugins/score/main.py @@ -17,7 +17,7 @@ class Score(Plugin): addEvent('score.calculate', self.calculate) def calculate(self, nzb, movie): - ''' Calculate the score of a NZB, used for sorting later ''' + """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) diff --git a/couchpotato/core/plugins/score/scores.py b/couchpotato/core/plugins/score/scores.py index a95b0a4..6aa0b46 100644 --- a/couchpotato/core/plugins/score/scores.py +++ b/couchpotato/core/plugins/score/scores.py @@ -1,6 +1,6 @@ from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import simplifyString -from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.helpers.variable import tryInt from couchpotato.environment import Env import re @@ -24,7 +24,7 @@ name_scores = [ def nameScore(name, year, preferred_words): - ''' Calculate score for words in the NZB name ''' + """ Calculate score for words in the NZB name """ score = 0 name = name.lower() @@ -34,11 +34,11 @@ def nameScore(name, year, preferred_words): v = value.split(':') add = int(v.pop()) if v.pop() in name: - score = score + add + score += add # points if the year is correct if str(year) in name: - score = score + 5 + score += 5 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) diff --git a/couchpotato/core/plugins/status/main.py b/couchpotato/core/plugins/status/main.py index 8db2bf7..7546c65 100644 --- a/couchpotato/core/plugins/status/main.py +++ b/couchpotato/core/plugins/status/main.py @@ -75,7 +75,7 @@ class StatusPlugin(Plugin): def get(self, identifiers): - if not isinstance(identifiers, (list)): + if not isinstance(identifiers, list): identifiers = [identifiers] db = get_session() diff --git a/couchpotato/core/plugins/subtitle/main.py b/couchpotato/core/plugins/subtitle/main.py index d7424c7..0b494c1 100644 --- a/couchpotato/core/plugins/subtitle/main.py +++ b/couchpotato/core/plugins/subtitle/main.py @@ -36,13 +36,12 @@ class Subtitle(Plugin): files = [] for file in release.files.filter(FileType.status.has(identifier = 'movie')).all(): - files.append(file.path); + files.append(file.path) # get subtitles for those files subliminal.list_subtitles(files, cache_dir = Env.get('cache_dir'), multi = True, languages = self.getLanguages(), services = self.services) def searchSingle(self, group): - if self.isDisabled(): return try: diff --git a/couchpotato/core/plugins/suggestion/main.py b/couchpotato/core/plugins/suggestion/main.py index 25666af..2cedeba 100644 --- a/couchpotato/core/plugins/suggestion/main.py +++ b/couchpotato/core/plugins/suggestion/main.py @@ -1,13 +1,14 @@ from couchpotato import get_session from couchpotato.api import addApiView from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.encoding import ss -from couchpotato.core.helpers.variable import splitString, md5 +from couchpotato.core.helpers.variable import splitString from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Media +from couchpotato.core.settings.model import Movie, Library from couchpotato.environment import Env +from sqlalchemy.orm import joinedload_all from sqlalchemy.sql.expression import or_ + class Suggestion(Plugin): def __init__(self): @@ -15,44 +16,53 @@ class Suggestion(Plugin): addApiView('suggestion.view', self.suggestView) addApiView('suggestion.ignore', self.ignoreView) - def suggestView(self, **kwargs): + def suggestView(self, limit = 6, **kwargs): movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) - limit = kwargs.get('limit', 6) - - if not movies or len(movies) == 0: - db = get_session() - active_movies = db.query(Media) \ - .filter(or_(*[Media.status.has(identifier = s) for s in ['active', 'done']])).all() - movies = [x.library.identifier for x in active_movies] - - if not ignored or len(ignored) == 0: - ignored = splitString(Env.prop('suggest_ignore', default = '')) + seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: + + if not movies or len(movies) == 0: + db = get_session() + active_movies = db.query(Movie) \ + .options(joinedload_all('library')) \ + .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all() + movies = [x.library.identifier for x in active_movies] + + if not ignored or len(ignored) == 0: + ignored = splitString(Env.prop('suggest_ignore', default = '')) + if not seen or len(seen) == 0: + movies.extend(splitString(Env.prop('suggest_seen', default = ''))) + suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) - self.setCache(md5(ss('suggestion_cached')), suggestions, timeout = 6048000) # Cache for 10 weeks + self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks return { 'success': True, 'count': len(suggestions), - 'suggestions': suggestions[:limit] + 'suggestions': suggestions[:int(limit)] } - def ignoreView(self, imdb = None, limit = 6, remove_only = False, **kwargs): + def ignoreView(self, imdb = None, limit = 6, remove_only = False, mark_seen = False, **kwargs): ignored = splitString(Env.prop('suggest_ignore', default = '')) + seen = splitString(Env.prop('suggest_seen', default = '')) + new_suggestions = [] if imdb: - if not remove_only: + if mark_seen: + seen.append(imdb) + Env.prop('suggest_seen', ','.join(set(seen))) + elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) - new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored) + new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored, seen = seen) return { 'result': True, @@ -60,12 +70,13 @@ class Suggestion(Plugin): 'suggestions': new_suggestions[limit - 1:limit] } - def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None): + def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') new_suggestions = [] ignored = [] if not ignored else ignored + seen = [] if not seen else seen if ignore_imdb: for cs in cached_suggestion: @@ -75,10 +86,15 @@ class Suggestion(Plugin): # Get new results and add them if len(new_suggestions) - 1 < limit: + active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) + db = get_session() - active_movies = db.query(Media) \ - .filter(or_(*[Media.status.has(identifier = s) for s in ['active', 'done']])).all() - movies = [x.library.identifier for x in active_movies] + active_movies = db.query(Movie) \ + .join(Library) \ + .with_entities(Library.identifier) \ + .filter(Movie.status_id.in_([active_status.get('id'), done_status.get('id')])).all() + movies = [x[0] for x in active_movies] + movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True) @@ -86,6 +102,6 @@ class Suggestion(Plugin): if suggestions: new_suggestions.extend(suggestions) - self.setCache(md5(ss('suggestion_cached')), new_suggestions, timeout = 6048000) + self.setCache('suggestion_cached', new_suggestions, timeout = 6048000) return new_suggestions diff --git a/couchpotato/core/plugins/suggestion/static/suggest.css b/couchpotato/core/plugins/suggestion/static/suggest.css index 2b05abf..c321ca2 100644 --- a/couchpotato/core/plugins/suggestion/static/suggest.css +++ b/couchpotato/core/plugins/suggestion/static/suggest.css @@ -105,7 +105,7 @@ bottom: 10px; right: 10px; display: none; - width: 120px; + width: 140px; } .suggestions .movie_result:hover .actions { display: block; diff --git a/couchpotato/core/plugins/suggestion/static/suggest.js b/couchpotato/core/plugins/suggestion/static/suggest.js index 16feca9..e622671 100644 --- a/couchpotato/core/plugins/suggestion/static/suggest.js +++ b/couchpotato/core/plugins/suggestion/static/suggest.js @@ -26,6 +26,20 @@ var SuggestList = new Class({ 'onComplete': self.fill.bind(self) }); + }, + 'click:relay(a.eye-open)': function(e, el){ + (e).stop(); + + $(el).getParent('.movie_result').destroy(); + + Api.request('suggestion.ignore', { + 'data': { + 'imdb': el.get('data-seen'), + 'mark_seen': 1 + }, + 'onComplete': self.fill.bind(self) + }); + } } }).grab( @@ -43,7 +57,7 @@ var SuggestList = new Class({ fill: function(json){ var self = this; - + if(!json) return; Object.each(json.suggestions, function(movie){ @@ -69,6 +83,10 @@ var SuggestList = new Class({ new Element('a.delete.icon2', { 'title': 'Don\'t suggest this movie again', 'data-ignore': movie.imdb + }), + new Element('a.eye-open.icon2', { + 'title': 'Seen it, like it, don\'t add', + 'data-seen': movie.imdb }) ) ); @@ -89,6 +107,8 @@ var SuggestList = new Class({ }); + self.fireEvent('loaded'); + }, afterAdded: function(m, movie){ diff --git a/couchpotato/core/plugins/trailer/main.py b/couchpotato/core/plugins/trailer/main.py index 1a8955f..e27e3f9 100644 --- a/couchpotato/core/plugins/trailer/main.py +++ b/couchpotato/core/plugins/trailer/main.py @@ -12,8 +12,8 @@ class Trailer(Plugin): def __init__(self): addEvent('renamer.after', self.searchSingle) - def searchSingle(self, message = None, group = {}): - + def searchSingle(self, message = None, group = None): + if not group: group = {} if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group = group, merge = True) @@ -40,4 +40,3 @@ class Trailer(Plugin): break return True - diff --git a/couchpotato/core/providers/automation/imdb/main.py b/couchpotato/core/providers/automation/imdb/main.py index c4aef7f..e9d14b5 100644 --- a/couchpotato/core/providers/automation/imdb/main.py +++ b/couchpotato/core/providers/automation/imdb/main.py @@ -58,7 +58,7 @@ class IMDBWatchlist(IMDBBase): break except: - log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc())) + log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) return movies diff --git a/couchpotato/core/providers/info/couchpotatoapi/main.py b/couchpotato/core/providers/info/couchpotatoapi/main.py index cdbc513..ef7db1f 100644 --- a/couchpotato/core/providers/info/couchpotatoapi/main.py +++ b/couchpotato/core/providers/info/couchpotatoapi/main.py @@ -80,7 +80,10 @@ class CouchPotatoApi(MovieProvider): return dates - def getSuggestions(self, movies = [], ignore = []): + def getSuggestions(self, movies = None, ignore = None): + if not ignore: ignore = [] + if not movies: movies = [] + suggestions = self.getJsonData(self.urls['suggest'], params = { 'movies': ','.join(movies), 'ignore': ','.join(ignore), diff --git a/couchpotato/core/providers/info/omdbapi/main.py b/couchpotato/core/providers/info/omdbapi/main.py index 2726ef5..87bb0a7 100755 --- a/couchpotato/core/providers/info/omdbapi/main.py +++ b/couchpotato/core/providers/info/omdbapi/main.py @@ -98,7 +98,7 @@ class OMDBAPI(MovieProvider): 'mpaa': str(movie.get('Rated', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released'), - 'year': year if isinstance(year, (int)) else None, + 'year': year if isinstance(year, int) else None, 'plot': movie.get('Plot'), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), diff --git a/couchpotato/core/providers/info/themoviedb/main.py b/couchpotato/core/providers/info/themoviedb/main.py index e2ff937..387355f 100644 --- a/couchpotato/core/providers/info/themoviedb/main.py +++ b/couchpotato/core/providers/info/themoviedb/main.py @@ -1,8 +1,9 @@ from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import simplifyString, toUnicode +from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss +from couchpotato.core.helpers.variable import md5 from couchpotato.core.logger import CPLog from couchpotato.core.providers.info.base import MovieProvider -from themoviedb import tmdb +import tmdb3 import traceback log = CPLog(__name__) @@ -11,47 +12,16 @@ log = CPLog(__name__) class TheMovieDb(MovieProvider): def __init__(self): - addEvent('movie.by_hash', self.byHash) addEvent('movie.search', self.search, priority = 2) addEvent('movie.info', self.getInfo, priority = 2) - addEvent('movie.info_by_tmdb', self.getInfoByTMDBId) + addEvent('movie.info_by_tmdb', self.getInfo) - # Use base wrapper - tmdb.configure(self.conf('api_key')) - - def byHash(self, file): - ''' Find movie by hash ''' - - if self.isDisabled(): - return False - - cache_key = 'tmdb.cache.%s' % simplifyString(file) - results = self.getCache(cache_key) - - if not results: - log.debug('Searching for movie by hash: %s', file) - try: - raw = tmdb.searchByHashingFile(file) - - results = [] - if raw: - try: - results = self.parseMovie(raw) - log.info('Found: %s', results['titles'][0] + ' (' + str(results.get('year', 0)) + ')') - - self.setCache(cache_key, results) - return results - except SyntaxError, e: - log.error('Failed to parse XML response: %s', e) - return False - except: - log.debug('No movies known by hash for: %s', file) - pass - - return results + # Configure TMDB settings + tmdb3.set_key(self.conf('api_key')) + tmdb3.set_cache('null') def search(self, q, limit = 12): - ''' Find movie by name ''' + """ Find movie by name """ if self.isDisabled(): return False @@ -65,7 +35,7 @@ class TheMovieDb(MovieProvider): raw = None try: - raw = tmdb.search(search_string) + raw = tmdb3.searchMovie(search_string) except: log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc())) @@ -75,7 +45,7 @@ class TheMovieDb(MovieProvider): nr = 0 for movie in raw: - results.append(self.parseMovie(movie)) + results.append(self.parseMovie(movie, with_titles = False)) nr += 1 if nr == limit: @@ -100,117 +70,87 @@ class TheMovieDb(MovieProvider): result = self.getCache(cache_key) if not result: - result = {} - movie = None - try: log.debug('Getting info: %s', cache_key) - movie = tmdb.imdbLookup(id = identifier) + movie = tmdb3.Movie(identifier) + result = self.parseMovie(movie) + self.setCache(cache_key, result) except: pass - if movie: - result = self.parseMovie(movie[0]) - self.setCache(cache_key, result) - return result - def getInfoByTMDBId(self, id = None): + def parseMovie(self, movie, with_titles = True): - cache_key = 'tmdb.cache.%s' % id - result = self.getCache(cache_key) + cache_key = 'tmdb.cache.%s' % movie.id + movie_data = self.getCache(cache_key) - if not result: - result = {} - movie = None + if not movie_data: + + # Images + poster = self.getImage(movie, type = 'poster', size = 'poster') + poster_original = self.getImage(movie, type = 'poster', size = 'original') + backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') + # Genres try: - log.debug('Getting info: %s', cache_key) - movie = tmdb.getMovieInfo(id = id) + genres = [genre.name for genre in movie.genres] except: - pass - - if movie: - result = self.parseMovie(movie) - self.setCache(cache_key, result) - - return result - - def parseMovie(self, movie): - - # Images - poster = self.getImage(movie, type = 'poster', size = 'cover') - #backdrop = self.getImage(movie, type = 'backdrop', size = 'w1280') - poster_original = self.getImage(movie, type = 'poster', size = 'original') - backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') - - # Genres - try: - genres = self.getCategory(movie, 'genre') - except: - genres = [] - - # 1900 is the same as None - year = str(movie.get('released', 'none'))[:4] - if year == '1900' or year.lower() == 'none': - year = None - - movie_data = { - 'via_tmdb': True, - 'tmdb_id': int(movie.get('id', 0)), - 'titles': [toUnicode(movie.get('name'))], - 'original_title': movie.get('original_name'), - 'images': { - 'poster': [poster] if poster else [], - #'backdrop': [backdrop] if backdrop else [], - 'poster_original': [poster_original] if poster_original else [], - 'backdrop_original': [backdrop_original] if backdrop_original else [], - }, - 'imdb': movie.get('imdb_id'), - 'mpaa': movie.get('certification', ''), - 'runtime': movie.get('runtime'), - 'released': movie.get('released'), - 'year': year, - 'plot': movie.get('overview'), - 'genres': genres, - } - - movie_data = dict((k, v) for k, v in movie_data.iteritems() if v) - - # Add alternative names - for alt in ['original_name', 'alternative_name']: - alt_name = toUnicode(movie.get(alt)) - if alt_name and not alt_name in movie_data['titles'] and alt_name.lower() != 'none' and alt_name != None: - movie_data['titles'].append(alt_name) + genres = [] + + # 1900 is the same as None + year = str(movie.releasedate or '')[:4] + if not movie.releasedate or year == '1900' or year.lower() == 'none': + year = None + + movie_data = { + 'via_tmdb': True, + 'tmdb_id': movie.id, + 'titles': [toUnicode(movie.title)], + 'original_title': movie.originaltitle, + 'images': { + 'poster': [poster] if poster else [], + #'backdrop': [backdrop] if backdrop else [], + 'poster_original': [poster_original] if poster_original else [], + 'backdrop_original': [backdrop_original] if backdrop_original else [], + }, + 'imdb': movie.imdb, + 'runtime': movie.runtime, + 'released': str(movie.releasedate), + 'year': year, + 'plot': movie.overview, + 'genres': genres, + } + + movie_data = dict((k, v) for k, v in movie_data.iteritems() if v) + + # Add alternative names + if with_titles: + movie_data['titles'].append(movie.originaltitle) + for alt in movie.alternate_titles: + alt_name = alt.title + if alt_name and not alt_name in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None: + movie_data['titles'].append(alt_name) + + movie_data['titles'] = list(set(movie_data['titles'])) + + # Cache movie parsed + self.setCache(cache_key, movie_data) return movie_data - def getImage(self, movie, type = 'poster', size = 'cover'): + def getImage(self, movie, type = 'poster', size = 'poster'): image_url = '' - for image in movie.get('images', []): - if(image.get('type') == type) and image.get(size): - image_url = image.get(size) - break + try: + image_url = getattr(movie, type).geturl(size='original') + except: + log.debug('Failed getting %s.%s for "%s"', (type, size, movie.title)) return image_url - def getCategory(self, movie, type = 'genre'): - - cats = movie.get('categories', {}).get(type) - - categories = [] - for category in cats: - try: - categories.append(category) - except: - pass - - return categories - def isDisabled(self): if self.conf('api_key') == '': log.error('No API key provided.') - True - else: - False + return True + return False diff --git a/couchpotato/core/providers/metadata/base.py b/couchpotato/core/providers/metadata/base.py index d2393dd..f561003 100644 --- a/couchpotato/core/providers/metadata/base.py +++ b/couchpotato/core/providers/metadata/base.py @@ -17,8 +17,9 @@ class MetaDataBase(Plugin): def __init__(self): addEvent('renamer.after', self.create) - def create(self, message = None, group = {}): + def create(self, message = None, group = None): if self.isDisabled(): return + if not group: group = {} log.info('Creating %s metadata.', self.getName()) @@ -48,6 +49,11 @@ class MetaDataBase(Plugin): log.debug('Creating %s file: %s', (file_type, name)) if os.path.isfile(content): shutil.copy2(content, name) + shutil.copyfile(content, name) + + # Try and copy stats seperately + try: shutil.copystat(content, name) + except: pass else: self.createFile(name, content) group['renamed_files'].append(name) @@ -60,7 +66,8 @@ class MetaDataBase(Plugin): except: log.error('Unable to create %s file: %s', (file_type, traceback.format_exc())) - def getRootName(self, data = {}): + def getRootName(self, data = None): + if not data: data = {} return os.path.join(data['destination_dir'], data['filename']) def getFanartName(self, name, root): @@ -72,13 +79,19 @@ class MetaDataBase(Plugin): def getNfoName(self, name, root): return - def getNfo(self, movie_info = {}, data = {}): - return + def getNfo(self, movie_info = None, data = None): + if not data: data = {} + if not movie_info: movie_info = {} - def getThumbnail(self, movie_info = {}, data = {}, wanted_file_type = 'poster_original'): + def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original'): + if not data: data = {} + if not movie_info: movie_info = {} file_types = fireEvent('file.types', single = True) - for file_type in file_types: - if file_type.get('identifier') == wanted_file_type: + file_type = {} + + for ft in file_types: + if ft.get('identifier') == wanted_file_type: + file_type = ft break # See if it is in current files @@ -94,5 +107,7 @@ class MetaDataBase(Plugin): except: pass - def getFanart(self, movie_info = {}, data = {}): + def getFanart(self, movie_info = None, data = None): + if not data: data = {} + if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original') diff --git a/couchpotato/core/providers/metadata/xbmc/main.py b/couchpotato/core/providers/metadata/xbmc/main.py index 820df15..e865e2d 100644 --- a/couchpotato/core/providers/metadata/xbmc/main.py +++ b/couchpotato/core/providers/metadata/xbmc/main.py @@ -24,7 +24,9 @@ class XBMC(MetaDataBase): def createMetaName(self, basename, name, root): return os.path.join(root, basename.replace('%s', name)) - def getNfo(self, movie_info = {}, data = {}): + def getNfo(self, movie_info = None, data = None): + if not data: data = {} + if not movie_info: movie_info = {} # return imdb url only if self.conf('meta_url_only'): diff --git a/couchpotato/core/providers/nzb/binsearch/main.py b/couchpotato/core/providers/nzb/binsearch/main.py index 1d86300..dee5fc7 100644 --- a/couchpotato/core/providers/nzb/binsearch/main.py +++ b/couchpotato/core/providers/nzb/binsearch/main.py @@ -86,8 +86,10 @@ class BinSearch(NZBProvider): def download(self, url = '', nzb_id = ''): - params = {'action': 'nzb'} - params[nzb_id] = 'on' + params = { + 'action': 'nzb', + nzb_id: 'on' + } try: return self.urlopen(url, params = params, show_error = False) diff --git a/couchpotato/core/providers/nzb/newznab/main.py b/couchpotato/core/providers/nzb/newznab/main.py index 8eb3e84..02ffcfd 100644 --- a/couchpotato/core/providers/nzb/newznab/main.py +++ b/couchpotato/core/providers/nzb/newznab/main.py @@ -118,7 +118,7 @@ class Newznab(NZBProvider, RSS): return list - def belongsTo(self, url, provider = None): + def belongsTo(self, url, provider = None, host = None): hosts = self.getHosts() diff --git a/couchpotato/core/providers/torrent/publichd/main.py b/couchpotato/core/providers/torrent/publichd/main.py index c93f5cd..7b497fd 100644 --- a/couchpotato/core/providers/torrent/publichd/main.py +++ b/couchpotato/core/providers/torrent/publichd/main.py @@ -68,14 +68,21 @@ class PublicHD(TorrentMagnetProvider): def getMoreInfo(self, item): - try: - full_description = self.getCache('publichd.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('div', attrs = {'id':'torrmain'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - except: - log.error('Failed getting more info for %s', item['name']) - description = '' + cache_key = 'publichd.%s' % item['id'] + description = self.getCache(cache_key) + + if not description: + + try: + full_description = self.urlopen(item['detail_url']) + html = BeautifulSoup(full_description) + nfo_pre = html.find('div', attrs = {'id':'torrmain'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + except: + log.error('Failed getting more info for %s', item['name']) + description = '' + + self.setCache(cache_key, description, timeout = 25920000) item['description'] = description return item diff --git a/couchpotato/core/providers/torrent/scenehd/main.py b/couchpotato/core/providers/torrent/scenehd/main.py index f471ec0..2b76e43 100644 --- a/couchpotato/core/providers/torrent/scenehd/main.py +++ b/couchpotato/core/providers/torrent/scenehd/main.py @@ -65,7 +65,7 @@ class SceneHD(TorrentProvider): log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - def getLoginParams(self, params): + def getLoginParams(self): return tryUrlencode({ 'username': self.conf('username'), 'password': self.conf('password'), diff --git a/couchpotato/core/providers/torrent/thepiratebay/main.py b/couchpotato/core/providers/torrent/thepiratebay/main.py index 82cbbe9..6aa2216 100644 --- a/couchpotato/core/providers/torrent/thepiratebay/main.py +++ b/couchpotato/core/providers/torrent/thepiratebay/main.py @@ -86,10 +86,10 @@ class ThePirateBay(TorrentMagnetProvider): if link and download: def extra_score(item): - trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None] - vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None] - confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None] - moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None] + trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None] + vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None] + confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None] + moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None] return confirmed + trusted + vip + moderated diff --git a/couchpotato/core/providers/userscript/allocine/main.py b/couchpotato/core/providers/userscript/allocine/main.py index 8cc889e..f8ca630 100644 --- a/couchpotato/core/providers/userscript/allocine/main.py +++ b/couchpotato/core/providers/userscript/allocine/main.py @@ -19,9 +19,6 @@ class AlloCine(UserscriptBase): except: return - name = None - year = None - try: start = data.find('') end = data.find('', start) diff --git a/couchpotato/core/providers/userscript/tmdb/main.py b/couchpotato/core/providers/userscript/tmdb/main.py index 6205851..cab38fc 100644 --- a/couchpotato/core/providers/userscript/tmdb/main.py +++ b/couchpotato/core/providers/userscript/tmdb/main.py @@ -9,7 +9,7 @@ class TMDB(UserscriptBase): def getMovie(self, url): match = re.search('(?P\d+)', url) - movie = fireEvent('movie.info_by_tmdb', id = match.group('id'), merge = True) + movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), merge = True) if movie['imdb']: return self.getInfo(movie['imdb']) diff --git a/couchpotato/core/settings/__init__.py b/couchpotato/core/settings/__init__.py index e08adb8..61d982f 100644 --- a/couchpotato/core/settings/__init__.py +++ b/couchpotato/core/settings/__init__.py @@ -1,13 +1,10 @@ from __future__ import with_statement from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import isInt, toUnicode +from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat from couchpotato.core.settings.model import Properties import ConfigParser -import os.path -import time -import traceback class Settings(object): @@ -75,7 +72,9 @@ class Settings(object): addEvent('settings.register', self.registerDefaults) addEvent('settings.save', self.save) - def registerDefaults(self, section_name, options = {}, save = True): + def registerDefaults(self, section_name, options = None, save = True): + if not options: options = {} + self.addSection(section_name) for option_name, option in options.iteritems(): @@ -92,7 +91,7 @@ class Settings(object): self.setType(section_name, option_name, option.get('type')) if save: - self.save(self) + self.save() def set(self, section, option, value): return self.p.set(section, option, value) diff --git a/couchpotato/core/settings/model.py b/couchpotato/core/settings/model.py index 911f78a..1c2c049 100644 --- a/couchpotato/core/settings/model.py +++ b/couchpotato/core/settings/model.py @@ -240,7 +240,10 @@ class Release(Entity): files = ManyToMany('File') info = OneToMany('ReleaseInfo', cascade = 'all, delete-orphan') - def to_dict(self, deep = {}, exclude = []): + def to_dict(self, deep = None, exclude = None): + if not exclude: exclude = [] + if not deep: deep = {} + orig_dict = super(Release, self).to_dict(deep = deep, exclude = exclude) new_info = {} @@ -302,7 +305,10 @@ class Profile(Entity): media = OneToMany('Media') types = OneToMany('ProfileType', cascade = 'all, delete-orphan') - def to_dict(self, deep = {}, exclude = []): + def to_dict(self, deep = None, exclude = None): + if not exclude: exclude = [] + if not deep: deep = {} + orig_dict = super(Profile, self).to_dict(deep = deep, exclude = exclude) orig_dict['core'] = orig_dict.get('core') or False orig_dict['hide'] = orig_dict.get('hide') or False diff --git a/couchpotato/environment.py b/couchpotato/environment.py index ac0f729..0f04d83 100644 --- a/couchpotato/environment.py +++ b/couchpotato/environment.py @@ -74,7 +74,7 @@ class Env(object): s = Env.get('settings') # Return setting - if value == None: + if value is None: return s.get(attr, default = default, section = section, type = type) # Set setting @@ -86,7 +86,7 @@ class Env(object): @staticmethod def prop(identifier, value = None, default = None): s = Env.get('settings') - if value == None: + if value is None: v = s.getProperty(identifier) return v if v else default diff --git a/couchpotato/runner.py b/couchpotato/runner.py index c3783b0..3e1505e 100644 --- a/couchpotato/runner.py +++ b/couchpotato/runner.py @@ -1,6 +1,6 @@ from argparse import ArgumentParser from cache import FileSystemCache -from couchpotato import KeyHandler +from couchpotato import KeyHandler, LoginHandler, LogoutHandler from couchpotato.api import NonBlockHandler, ApiHandler from couchpotato.core.event import fireEventAsync, fireEvent from couchpotato.core.helpers.encoding import toUnicode @@ -91,7 +91,12 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En src_files = [options.config_file, db_path, db_path + '-shm', db_path + '-wal'] for src_file in src_files: if os.path.isfile(src_file): - shutil.copy2(src_file, toUnicode(os.path.join(new_backup, os.path.basename(src_file)))) + dst_file = toUnicode(os.path.join(new_backup, os.path.basename(src_file))) + shutil.copyfile(src_file, dst_file) + + # Try and copy stats seperately + try: shutil.copystat(src_file, dst_file) + except: pass # Remove older backups, keep backups 3 days or at least 3 backups = [] @@ -109,7 +114,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En try: if os.path.isfile(file_path): os.remove(file_path) - except Exception, e: + except: raise os.rmdir(backup) @@ -230,10 +235,11 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En log_function = lambda x : None, debug = config['use_reloader'], gzip = True, + cookie_secret = api_key, + login_url = '%slogin/' % web_base, ) Env.set('app', application) - # Request handlers application.add_handlers(".*$", [ (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler), @@ -243,18 +249,22 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En (r'%sgetkey(/?)' % web_base, KeyHandler), # Get API key (r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}), # API docs + # Login handlers + (r'%slogin(/?)' % web_base, LoginHandler), + (r'%slogout(/?)' % web_base, LogoutHandler), + # Catch all webhandlers (r'%s(.*)(/?)' % web_base, WebHandler), (r'(.*)', WebHandler), ]) # Static paths - static_path = '%sstatic/' % api_base + static_path = '%sstatic/' % web_base for dir_name in ['fonts', 'images', 'scripts', 'style']: application.add_handlers(".*$", [ ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': toUnicode(os.path.join(base_path, 'couchpotato', 'static', dir_name))}) ]) - Env.set('static_path', static_path); + Env.set('static_path', static_path) # Load configs & plugins diff --git a/couchpotato/static/fonts/Lobster-webfont.eot b/couchpotato/static/fonts/Lobster-webfont.eot new file mode 100755 index 0000000..56f66aa Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.eot differ diff --git a/couchpotato/static/fonts/Lobster-webfont.svg b/couchpotato/static/fonts/Lobster-webfont.svg new file mode 100755 index 0000000..e445583 --- /dev/null +++ b/couchpotato/static/fonts/Lobster-webfont.svg @@ -0,0 +1,244 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/couchpotato/static/fonts/Lobster-webfont.ttf b/couchpotato/static/fonts/Lobster-webfont.ttf new file mode 100755 index 0000000..4c46e93 Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.ttf differ diff --git a/couchpotato/static/fonts/Lobster-webfont.woff b/couchpotato/static/fonts/Lobster-webfont.woff new file mode 100755 index 0000000..af59caa Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.woff differ diff --git a/couchpotato/static/scripts/api.js b/couchpotato/static/scripts/api.js index 5e507bc..38d1874 100644 --- a/couchpotato/static/scripts/api.js +++ b/couchpotato/static/scripts/api.js @@ -1,7 +1,7 @@ var ApiClass = new Class({ setup: function(options){ - var self = this + var self = this; self.options = options; }, @@ -13,7 +13,7 @@ var ApiClass = new Class({ return new Request[r_type](Object.merge({ 'callbackKey': 'callback_func', 'method': 'get', - 'url': self.createUrl(type, {'t': randomString()}), + 'url': self.createUrl(type, {'t': randomString()}) }, options)).send() }, @@ -26,4 +26,4 @@ var ApiClass = new Class({ } }); -window.Api = new ApiClass() \ No newline at end of file +window.Api = new ApiClass(); \ No newline at end of file diff --git a/couchpotato/static/scripts/block.js b/couchpotato/static/scripts/block.js index 82193ca..7407b7f 100644 --- a/couchpotato/static/scripts/block.js +++ b/couchpotato/static/scripts/block.js @@ -36,4 +36,4 @@ var BlockBase = new Class({ }); -var Block = BlockBase \ No newline at end of file +var Block = BlockBase; \ No newline at end of file diff --git a/couchpotato/static/scripts/block/menu.js b/couchpotato/static/scripts/block/menu.js index 8d315f5..91e29a2 100644 --- a/couchpotato/static/scripts/block/menu.js +++ b/couchpotato/static/scripts/block/menu.js @@ -18,11 +18,11 @@ Block.Menu = new Class({ self.button = new Element('a.button' + (self.options.button_class ? '.' + self.options.button_class : ''), { 'events': { 'click': function(){ - self.el.toggleClass('show') - self.fireEvent(self.el.hasClass('show') ? 'open' : 'close') + self.el.toggleClass('show'); + self.fireEvent(self.el.hasClass('show') ? 'open' : 'close'); if(self.el.hasClass('show')){ - self.el.addEvent('outerClick', self.removeOuterClick.bind(self)) + self.el.addEvent('outerClick', self.removeOuterClick.bind(self)); this.addEvent('outerClick', function(e){ if(e.target.get('tag') != 'input') self.removeOuterClick() @@ -41,7 +41,7 @@ Block.Menu = new Class({ removeOuterClick: function(){ var self = this; - self.el.removeClass('show') + self.el.removeClass('show'); self.el.removeEvents('outerClick'); self.button.removeEvents('outerClick'); @@ -49,8 +49,7 @@ Block.Menu = new Class({ addLink: function(tab, position){ var self = this; - var el = new Element('li').adopt(tab).inject(self.more_option_ul, position || 'bottom'); - return el; + return new Element('li').adopt(tab).inject(self.more_option_ul, position || 'bottom'); } }); \ No newline at end of file diff --git a/couchpotato/static/scripts/block/navigation.js b/couchpotato/static/scripts/block/navigation.js index 8389ff9..f5642df 100644 --- a/couchpotato/static/scripts/block/navigation.js +++ b/couchpotato/static/scripts/block/navigation.js @@ -5,7 +5,6 @@ Block.Navigation = new Class({ create: function(){ var self = this; - var settings_added = false; self.el = new Element('div.navigation').adopt( self.foldout = new Element('a.foldout.icon2.menu', { 'events': { @@ -28,7 +27,7 @@ Block.Navigation = new Class({ 'duration': 100 } }) - ) + ); new ScrollSpy({ min: 400, @@ -58,7 +57,7 @@ Block.Navigation = new Class({ }, - toggleMenu: function(e){ + toggleMenu: function(){ var self = this, body = $(document.body), html = body.getParent(); diff --git a/couchpotato/static/scripts/couchpotato.js b/couchpotato/static/scripts/couchpotato.js index 1c8d460..26178fb 100644 --- a/couchpotato/static/scripts/couchpotato.js +++ b/couchpotato/static/scripts/couchpotato.js @@ -15,7 +15,7 @@ var self = this; self.setOptions(options); - self.c = $(document.body) + self.c = $(document.body); self.route = new Route(self.defaults); @@ -48,7 +48,6 @@ }, pushState: function(e){ - var self = this; if((!e.meta && Browser.Platform.mac) || (!e.control && !Browser.Platform.mac)){ (e).preventDefault(); var url = e.target.get('href'); @@ -112,11 +111,11 @@ 'click': self.shutdownQA.bind(self) } }) - ] + ]; setting_links.each(function(a){ self.block.more.addLink(a) - }) + }); new ScrollSpy({ @@ -134,7 +133,7 @@ var self = this; Object.each(Page, function(page_class, class_name){ - pg = new Page[class_name](self, {}); + var pg = new Page[class_name](self, {}); self.pages[class_name] = pg; $(pg).inject(self.content); @@ -157,7 +156,7 @@ return; if(self.current_page) - self.current_page.hide() + self.current_page.hide(); try { var page = self.pages[page_name] || self.pages.Home; @@ -191,7 +190,7 @@ self.checkAvailable(1000); }, - shutdownQA: function(e){ + shutdownQA: function(){ var self = this; var q = new Question('Are you sure you want to shutdown CouchPotato?', '', [{ @@ -240,7 +239,7 @@ checkForUpdate: function(onComplete){ var self = this; - Updater.check(onComplete) + Updater.check(onComplete); self.blockPage('Please wait. If this takes too long, something must have gone wrong.', 'Checking for updates'); self.checkAvailable(3000); @@ -258,7 +257,7 @@ }, 'onSuccess': function(){ if(onAvailable) - onAvailable() + onAvailable(); self.unBlockPage(); self.fireEvent('reload'); } @@ -272,7 +271,6 @@ self.unBlockPage(); - var body = $(document.body); self.mask = new Element('div.mask').adopt( new Element('div').adopt( new Element('h1', {'text': title || 'Unavailable'}), @@ -329,7 +327,7 @@ 'target': '', 'events': { 'click': function(e){ - (e).stop() + (e).stop(); alert('Drag it to your bookmark ;)') } } @@ -352,35 +350,35 @@ var Route = new Class({ params: {}, initialize: function(defaults){ - var self = this + var self = this; self.defaults = defaults }, parse: function(){ var self = this; - var rep = function(pa){ + var rep = function (pa) { return pa.replace(Api.getOption('url'), '/').replace(App.getOption('base_url'), '/') - } + }; - var path = rep(History.getPath()) + var path = rep(History.getPath()); if(path == '/' && location.hash){ path = rep(location.hash.replace('#', '/')) } - self.current = path.replace(/^\/+|\/+$/g, '') - var url = self.current.split('/') + self.current = path.replace(/^\/+|\/+$/g, ''); + var url = self.current.split('/'); - self.page = (url.length > 0) ? url.shift() : self.defaults.page - self.action = (url.length > 0) ? url.shift() : self.defaults.action + self.page = (url.length > 0) ? url.shift() : self.defaults.page; + self.action = (url.length > 0) ? url.shift() : self.defaults.action; self.params = Object.merge({}, self.defaults.params); if(url.length > 1){ - var key + var key; url.each(function(el, nr){ if(nr%2 == 0) - key = el + key = el; else if(key) { - self.params[key] = el + self.params[key] = el; key = null } }) @@ -488,8 +486,8 @@ function randomString(length, extra) { var comparer = function(a, b) { for (var i = 0, l = keyPaths.length; i < l; i++) { - aVal = valueOf(a, keyPaths[i].path); - bVal = valueOf(b, keyPaths[i].path); + var aVal = valueOf(a, keyPaths[i].path), + bVal = valueOf(b, keyPaths[i].path); if (aVal > bVal) return keyPaths[i].sign; if (aVal < bVal) return -keyPaths[i].sign; } @@ -530,4 +528,4 @@ var createSpinner = function(target, options){ }, options); return new Spinner(opts).spin(target); -} \ No newline at end of file +}; diff --git a/couchpotato/static/scripts/page.js b/couchpotato/static/scripts/page.js index 1af800e..58ba5ac 100644 --- a/couchpotato/static/scripts/page.js +++ b/couchpotato/static/scripts/page.js @@ -12,7 +12,7 @@ var PageBase = new Class({ initialize: function(options) { var self = this; - self.setOptions(options) + self.setOptions(options); // Create main page container self.el = new Element('div.page.'+self.name); @@ -74,4 +74,4 @@ var PageBase = new Class({ } }); -var Page = {} +var Page = {}; diff --git a/couchpotato/static/scripts/page/about.js b/couchpotato/static/scripts/page/about.js index ba451c8..f931335 100644 --- a/couchpotato/static/scripts/page/about.js +++ b/couchpotato/static/scripts/page/about.js @@ -13,7 +13,7 @@ var AboutSettingTab = new Class({ addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ var tab = self.settings.createTab('about', { 'label': 'About', @@ -72,7 +72,7 @@ var AboutSettingTab = new Class({ ); if(!self.fillVersion(Updater.getInfo())) - Updater.addEvent('loaded', self.fillVersion.bind(self)) + Updater.addEvent('loaded', self.fillVersion.bind(self)); self.settings.createGroup({ 'name': 'Help Support CouchPotato' diff --git a/couchpotato/static/scripts/page/home.js b/couchpotato/static/scripts/page/home.js index 01344ad..b93db5b 100644 --- a/couchpotato/static/scripts/page/home.js +++ b/couchpotato/static/scripts/page/home.js @@ -5,7 +5,7 @@ Page.Home = new Class({ name: 'home', title: 'Manage new stuff for things and such', - indexAction: function(param){ + indexAction: function () { var self = this; if(self.soon_list){ @@ -14,10 +14,24 @@ Page.Home = new Class({ self.available_list.update(); self.late_list.update(); - return + return; } - // Snatched + self.chain = new Chain(); + self.chain.chain( + self.createAvailable.bind(self), + self.createSoon.bind(self), + self.createSuggestions.bind(self), + self.createLate.bind(self) + ); + + self.chain.callChain(); + + }, + + createAvailable: function(){ + var self = this; + self.available_list = new MovieList({ 'navigation': false, 'identifier': 'snatched', @@ -40,9 +54,19 @@ Page.Home = new Class({ 'filter': { 'release_status': 'snatched,available' }, - 'limit': null + 'limit': null, + 'onLoaded': function(){ + self.chain.callChain(); + } }); + $(self.available_list).inject(self.el); + + }, + + createSoon: function(){ + var self = this; + // Coming Soon self.soon_list = new MovieList({ 'navigation': false, @@ -50,10 +74,6 @@ Page.Home = new Class({ 'limit': 12, 'title': 'Available soon', 'description': 'These are being searched for and should be available soon as they will be released on DVD in the next few weeks.', - 'on_empty_element': new Element('div').adopt( - new Element('h2', {'text': 'Available soon'}), - new Element('span', {'text': 'There are no movies available soon. Add some movies, so you have something to watch later.'}) - ), 'filter': { 'random': true }, @@ -61,7 +81,10 @@ Page.Home = new Class({ 'load_more': false, 'view': 'thumbs', 'force_view': true, - 'api_call': 'dashboard.soon' + 'api_call': 'dashboard.soon', + 'onLoaded': function(){ + self.chain.callChain(); + } }); // Make all thumbnails the same size @@ -99,10 +122,30 @@ Page.Home = new Class({ images.setStyle('height', highest); }).delay(300); }); + }); + $(self.soon_list).inject(self.el); + + }, + + createSuggestions: function(){ + var self = this; + // Suggest - self.suggestion_list = new SuggestList(); + self.suggestion_list = new SuggestList({ + 'onLoaded': function(){ + self.chain.callChain(); + } + }); + + $(self.suggestion_list).inject(self.el); + + + }, + + createLate: function(){ + var self = this; // Still not available self.late_list = new MovieList({ @@ -110,7 +153,7 @@ Page.Home = new Class({ 'identifier': 'late', 'limit': 50, 'title': 'Still not available', - 'description': 'Try another quality profile or maybe add more providers in Settings.', + 'description': 'Try another quality profile or maybe add more providers in Settings.', 'filter': { 'late': true }, @@ -118,25 +161,14 @@ Page.Home = new Class({ 'load_more': false, 'view': 'list', 'actions': [MA.IMDB, MA.Trailer, MA.Edit, MA.Refresh, MA.Delete], - 'api_call': 'dashboard.soon' + 'api_call': 'dashboard.soon', + 'onLoaded': function(){ + self.chain.callChain(); + } }); - self.el.adopt( - $(self.available_list), - $(self.soon_list), - $(self.suggestion_list), - $(self.late_list) - ); - - // Recent - // Snatched - // Renamed - // Added - - // Free space - - // Shortcuts + $(self.late_list).inject(self.el); } -}) \ No newline at end of file +}); \ No newline at end of file diff --git a/couchpotato/static/scripts/page/manage.js b/couchpotato/static/scripts/page/manage.js index 36b1ef8..4827f51 100644 --- a/couchpotato/static/scripts/page/manage.js +++ b/couchpotato/static/scripts/page/manage.js @@ -5,7 +5,7 @@ Page.Manage = new Class({ name: 'manage', title: 'Do stuff to your existing movies!', - indexAction: function(param){ + indexAction: function(){ var self = this; if(!self.list){ @@ -73,7 +73,7 @@ Page.Manage = new Class({ 'data': { 'full': +full } - }) + }); self.startProgressInterval(); @@ -108,7 +108,7 @@ Page.Manage = new Class({ return; if(!self.progress_container) - self.progress_container = new Element('div.progress').inject(self.list.navigation, 'after') + self.progress_container = new Element('div.progress').inject(self.list.navigation, 'after'); self.progress_container.empty(); diff --git a/couchpotato/static/scripts/page/settings.js b/couchpotato/static/scripts/page/settings.js index f760754..68b41d0 100644 --- a/couchpotato/static/scripts/page/settings.js +++ b/couchpotato/static/scripts/page/settings.js @@ -46,16 +46,16 @@ Page.Settings = new Class({ var t = self.tabs[tab_name] || self.tabs[self.action] || self.tabs.general; // Subtab - var subtab = null + var subtab = null; Object.each(self.params, function(param, subtab_name){ subtab = subtab_name; - }) + }); self.el.getElements('li.'+c+' , .tab_content.'+c).each(function(active){ active.removeClass(c); }); - if (t.subtabs[subtab]){ + if(t.subtabs[subtab]){ t.tab[a](c); t.subtabs[subtab].tab[a](c); t.subtabs[subtab].content[a](c); @@ -87,7 +87,7 @@ Page.Settings = new Class({ self.data = json; onComplete(json); } - }) + }); return self.data; }, @@ -139,7 +139,7 @@ Page.Settings = new Class({ Object.each(json.options, function(section, section_name){ section['section_name'] = section_name; options.include(section); - }) + }); options.sort(function(a, b){ return (a.order || 100) - (b.order || 100) @@ -156,13 +156,13 @@ Page.Settings = new Class({ // Create tab if(!self.tabs[group.tab] || !self.tabs[group.tab].groups) self.createTab(group.tab, {}); - var content_container = self.tabs[group.tab].content + var content_container = self.tabs[group.tab].content; // Create subtab if(group.subtab){ - if (!self.tabs[group.tab].subtabs[group.subtab]) + if(!self.tabs[group.tab].subtabs[group.subtab]) self.createSubTab(group.subtab, group, self.tabs[group.tab], group.tab); - var content_container = self.tabs[group.tab].subtabs[group.subtab].content + content_container = self.tabs[group.tab].subtabs[group.subtab].content } if(group.list && !self.lists[group.list]){ @@ -170,12 +170,10 @@ Page.Settings = new Class({ } // Create the group - if(!self.tabs[group.tab].groups[group.name]){ - var group_el = self.createGroup(group) + if(!self.tabs[group.tab].groups[group.name]) + self.tabs[group.tab].groups[group.name] = self.createGroup(group) .inject(group.list ? self.lists[group.list] : content_container) .addClass('section_'+section_name); - self.tabs[group.tab].groups[group.name] = group_el; - } // Create list if needed if(group.type && group.type == 'list'){ @@ -208,9 +206,9 @@ Page.Settings = new Class({ var self = this; if(self.tabs[tab_name] && self.tabs[tab_name].tab) - return self.tabs[tab_name].tab + return self.tabs[tab_name].tab; - var label = tab.label || (tab.name || tab_name).capitalize() + var label = tab.label || (tab.name || tab_name).capitalize(); var tab_el = new Element('li.t_'+tab_name).adopt( new Element('a', { 'href': App.createUrl(self.name+'/'+tab_name), @@ -221,14 +219,14 @@ Page.Settings = new Class({ if(!self.tabs[tab_name]) self.tabs[tab_name] = { 'label': label - } + }; self.tabs[tab_name] = Object.merge(self.tabs[tab_name], { 'tab': tab_el, 'subtabs': {}, - 'content': new Element('div.tab_content.tab_'+tab_name).inject(self.containers), + 'content': new Element('div.tab_content.tab_' + tab_name).inject(self.containers), 'groups': {} - }) + }); return self.tabs[tab_name] @@ -238,12 +236,12 @@ Page.Settings = new Class({ var self = this; if(parent_tab.subtabs[tab_name]) - return parent_tab.subtabs[tab_name] + return parent_tab.subtabs[tab_name]; if(!parent_tab.subtabs_el) parent_tab.subtabs_el = new Element('ul.subtabs').inject(parent_tab.tab); - var label = tab.subtab_label || tab_name.replace('_', ' ').capitalize() + var label = tab.subtab_label || tab_name.replace('_', ' ').capitalize(); var tab_el = new Element('li.t_'+tab_name).adopt( new Element('a', { 'href': App.createUrl(self.name+'/'+parent_tab_name+'/'+tab_name), @@ -254,7 +252,7 @@ Page.Settings = new Class({ if(!parent_tab.subtabs[tab_name]) parent_tab.subtabs[tab_name] = { 'label': label - } + }; parent_tab.subtabs[tab_name] = Object.merge(parent_tab.subtabs[tab_name], { 'tab': tab_el, @@ -267,21 +265,17 @@ Page.Settings = new Class({ }, createGroup: function(group){ - var self = this; - - var group_el = new Element('fieldset', { + return new Element('fieldset', { 'class': (group.advanced ? 'inlineLabels advanced' : 'inlineLabels') + ' group_' + (group.name || '') + ' subtab_' + (group.subtab || '') }).adopt( - new Element('h2', { - 'text': group.label || (group.name).capitalize() - }).adopt( - new Element('span.hint', { - 'html': group.description || '' - }) - ) - ) - - return group_el + new Element('h2', { + 'text': group.label || (group.name).capitalize() + }).adopt( + new Element('span.hint', { + 'html': group.description || '' + }) + ) + ); }, createList: function(content_container){ @@ -299,12 +293,12 @@ var OptionBase = new Class({ Implements: [Options, Events], klass: 'textInput', - focused_class : 'focused', + focused_class: 'focused', save_on_change: true, initialize: function(section, name, value, options){ - var self = this - self.setOptions(options) + var self = this; + self.setOptions(options); self.section = section; self.name = name; @@ -330,10 +324,11 @@ var OptionBase = new Class({ */ createBase: function(){ var self = this; - self.el = new Element('div.ctrlHolder.'+self.section + '_' + self.name) + self.el = new Element('div.ctrlHolder.' + self.section + '_' + self.name) }, - create: function(){}, + create: function(){ + }, createLabel: function(){ var self = this; @@ -343,7 +338,7 @@ var OptionBase = new Class({ }, setAdvanced: function(){ - this.el.addClass(this.options.advanced ? 'advanced': '') + this.el.addClass(this.options.advanced ? 'advanced' : '') }, createHint: function(){ @@ -354,7 +349,8 @@ var OptionBase = new Class({ }).inject(self.el); }, - afterInject: function(){}, + afterInject: function(){ + }, // Element has changed, do something changed: function(){ @@ -407,7 +403,7 @@ var OptionBase = new Class({ postName: function(){ var self = this; - return self.section +'['+self.name+']'; + return self.section + '[' + self.name + ']'; }, getValue: function(){ @@ -427,16 +423,16 @@ var OptionBase = new Class({ toElement: function(){ return this.el; } -}) +}); -var Option = {} +var Option = {}; Option.String = new Class({ Extends: OptionBase, type: 'string', create: function(){ - var self = this + var self = this; self.el.adopt( self.createLabel(), @@ -458,21 +454,21 @@ Option.Dropdown = new Class({ Extends: OptionBase, create: function(){ - var self = this + var self = this; self.el.adopt( self.createLabel(), self.input = new Element('select', { 'name': self.postName() }) - ) + ); Object.each(self.options.values, function(value){ new Element('option', { 'text': value[0], 'value': value[1] }).inject(self.input) - }) + }); self.input.set('value', self.getSettingValue()); @@ -491,7 +487,7 @@ Option.Checkbox = new Class({ create: function(){ var self = this; - var randomId = 'r-'+randomString() + var randomId = 'r-' + randomString(); self.el.adopt( self.createLabel().set('for', randomId), @@ -520,8 +516,8 @@ Option.Password = new Class({ create: function(){ var self = this; - self.parent() - self.input.set('type', 'password') + self.parent(); + self.input.set('type', 'password'); self.input.addEvent('focus', function(){ self.input.set('value', '') @@ -570,9 +566,9 @@ Option.Enabler = new Class({ afterInject: function(){ var self = this; - self.parentFieldset = self.el.getParent('fieldset').addClass('enabler') + self.parentFieldset = self.el.getParent('fieldset').addClass('enabler'); self.parentList = self.parentFieldset.getParent('.option_list'); - self.el.inject(self.parentFieldset, 'top') + self.el.inject(self.parentFieldset, 'top'); self.checkState() } @@ -622,7 +618,7 @@ Option.Directory = new Class({ self.getDirs() }, - previousDirectory: function(e){ + previousDirectory: function(){ var self = this; self.selectDirectory(self.getParentDir()) @@ -697,8 +693,8 @@ Option.Directory = new Class({ self.initial_directory = self.input.get('text'); - self.getDirs() - self.browser.show() + self.getDirs(); + self.browser.show(); self.el.addEvent('outerClick', self.hideBrowser.bind(self)) }, @@ -707,11 +703,11 @@ Option.Directory = new Class({ (e).preventDefault(); if(save) - self.save() + self.save(); else self.input.set('text', self.initial_directory); - self.browser.hide() + self.browser.hide(); self.el.removeEvents('outerClick') }, @@ -732,11 +728,11 @@ Option.Directory = new Class({ var prev_dirname = self.getCurrentDirname(previous_dir); if(previous_dir == json.home) prev_dirname = 'Home'; - else if (previous_dir == '/' && json.platform == 'nt') + else if(previous_dir == '/' && json.platform == 'nt') prev_dirname = 'Computer'; - self.back_button.set('data-value', previous_dir) - self.back_button.set('html', '« '+prev_dirname) + self.back_button.set('data-value', previous_dir); + self.back_button.set('html', '« ' + prev_dirname); self.back_button.show() } else { @@ -798,8 +794,6 @@ Option.Directory = new Class({ }, getCurrentDirname: function(dir){ - var self = this; - var dir_split = dir.split(Api.getOption('path_sep')); return dir_split[dir_split.length-2] || Api.getOption('path_sep') @@ -848,7 +842,7 @@ Option.Directories = new Class({ var parent = self.el.getParent('fieldset'); var dirs = parent.getElements('.multi_directory'); if(dirs.length == 0) - $(dir).inject(parent) + $(dir).inject(parent); else $(dir).inject(dirs.getLast(), 'after'); @@ -885,7 +879,7 @@ Option.Directories = new Class({ saveItems: function(){ var self = this; - var dirs = [] + var dirs = []; self.directories.each(function(dir){ if(dir.getValue()){ $(dir).removeClass('is_empty'); @@ -957,7 +951,7 @@ Option.Choice = new Class({ }).inject(self.input, 'after'); self.el.addClass('tag_input'); - var mtches = [] + var mtches = []; if(matches) matches.each(function(match, mnr){ var pos = value.indexOf(match), @@ -1037,7 +1031,7 @@ Option.Choice = new Class({ var prev_index = self.tags.indexOf(from_tag)-1; if(prev_index >= 0) - self.tags[prev_index].selectFrom('right') + self.tags[prev_index].selectFrom('right'); else from_tag.focus(); @@ -1049,7 +1043,7 @@ Option.Choice = new Class({ var next_index = self.tags.indexOf(from_tag)+1; if(next_index < self.tags.length) - self.tags[next_index].selectFrom('left') + self.tags[next_index].selectFrom('left'); else from_tag.focus(); }, @@ -1139,7 +1133,7 @@ Option.Choice.Tag = new Class({ if(e.key == 'left' && current_caret_pos == self.last_caret_pos){ self.fireEvent('goLeft'); } - else if (e.key == 'right' && self.last_caret_pos === current_caret_pos){ + else if(e.key == 'right' && self.last_caret_pos === current_caret_pos){ self.fireEvent('goRight'); } self.last_caret_pos = self.input.getCaretPosition(); @@ -1195,11 +1189,11 @@ Option.Choice.Tag = new Class({ self.fireEvent('goRight'); this.destroy(); } - else if (e.key == 'left'){ + else if(e.key == 'left'){ self.fireEvent('goLeft'); this.destroy(); } - else if (e.key == 'backspace'){ + else if(e.key == 'backspace'){ self.del(); this.destroy(); self.fireEvent('goLeft'); @@ -1213,7 +1207,7 @@ Option.Choice.Tag = new Class({ 'top': -200 } }); - self.el.adopt(temp_input) + self.el.adopt(temp_input); temp_input.focus(); } }, @@ -1266,10 +1260,10 @@ Option.Combined = new Class({ self.fieldset = self.input.getParent('fieldset'); self.combined_list = new Element('div.combined_table').inject(self.fieldset.getElement('h2'), 'after'); - self.values = {} - self.inputs = {} - self.items = [] - self.labels = {} + self.values = {}; + self.inputs = {}; + self.items = []; + self.labels = {}; self.options.combine.each(function(name){ @@ -1277,7 +1271,7 @@ Option.Combined = new Class({ var values = self.inputs[name].get('value').split(','); values.each(function(value, nr){ - if (!self.values[nr]) self.values[nr] = {}; + if(!self.values[nr]) self.values[nr] = {}; self.values[nr][name] = value.trim(); }); @@ -1286,19 +1280,18 @@ Option.Combined = new Class({ }); - var head = new Element('div.head').inject(self.combined_list) + var head = new Element('div.head').inject(self.combined_list); Object.each(self.inputs, function(input, name){ - self.labels[name] = input.getPrevious().get('text') + self.labels[name] = input.getPrevious().get('text'); new Element('abbr', { 'class': name, - 'text': self.labels[name], - //'title': input.getNext().get('text') + 'text': self.labels[name] }).inject(head) - }) + }); - Object.each(self.values, function(item, nr){ + Object.each(self.values, function(item){ self.createItem(item); }); @@ -1316,7 +1309,7 @@ Option.Combined = new Class({ self.items.each(function(ctrl_holder){ var empty_count = 0; self.options.combine.each(function(name){ - var input = ctrl_holder.getElement('input.'+name) + var input = ctrl_holder.getElement('input.' + name); if(input.get('value') == '' || input.get('type') == 'checkbox') empty_count++ }); @@ -1338,7 +1331,7 @@ Option.Combined = new Class({ value_empty = 0; self.options.combine.each(function(name){ - var value = values[name] || '' + var value = values[name] || ''; if(name.indexOf('use') != -1){ var checkbox = new Element('input[type=checkbox].inlay.'+name, { @@ -1375,7 +1368,7 @@ Option.Combined = new Class({ 'events': { 'click': self.deleteCombinedItem.bind(self) } - }).inject(item) + }).inject(item); self.items.include(item); @@ -1386,7 +1379,7 @@ Option.Combined = new Class({ var self = this; - var temp = {} + var temp = {}; self.items.each(function(item, nr){ self.options.combine.each(function(name){ var input = item.getElement('input.'+name); diff --git a/couchpotato/static/scripts/page/wanted.js b/couchpotato/static/scripts/page/wanted.js index 6adffbd..98a676c 100644 --- a/couchpotato/static/scripts/page/wanted.js +++ b/couchpotato/static/scripts/page/wanted.js @@ -5,7 +5,7 @@ Page.Wanted = new Class({ name: 'wanted', title: 'Gimmy gimmy gimmy!', - indexAction: function(param){ + indexAction: function(){ var self = this; if(!self.wanted){ @@ -35,7 +35,7 @@ Page.Wanted = new Class({ }, - doFullSearch: function(full){ + doFullSearch: function(){ var self = this; if(!self.search_in_progress){ diff --git a/couchpotato/static/style/api.css b/couchpotato/static/style/api.css index c635409..0c9f0f0 100644 --- a/couchpotato/static/style/api.css +++ b/couchpotato/static/style/api.css @@ -1,6 +1,5 @@ html { - font-size: 12px; - line-height: 1.5; + line-height: 1.5; font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; font-size: 14px; } diff --git a/couchpotato/static/style/main.css b/couchpotato/static/style/main.css index ae461c4..0ade518 100644 --- a/couchpotato/static/style/main.css +++ b/couchpotato/static/style/main.css @@ -127,6 +127,8 @@ body > .spinner, .mask{ line-height: 1; border-radius: 2px; cursor: pointer; + border: none; + -webkit-appearance: none; } .button.red { background-color: #ff0000; } .button.green { background-color: #2aa300; } @@ -142,7 +144,7 @@ body > .spinner, .mask{ .icon.download { background-image: url('../images/icon.download.png'); } .icon.edit { background-image: url('../images/icon.edit.png'); } .icon.completed { background-image: url('../images/icon.check.png'); } -.icon.folder { background-image: url('../images/icon.folder.png'); } +.icon.folder { background-image: url('../images/icon.folder.gif'); } .icon.imdb { background-image: url('../images/icon.imdb.png'); } .icon.refresh { background-image: url('../images/icon.refresh.png'); } .icon.readd { background-image: url('../images/icon.readd.png'); } @@ -199,14 +201,22 @@ body > .spinner, .mask{ top: -3px; } .icon2.menu:before { - content: "\e076 \e076 \e076"; + content: "\e076\00a0 \e076\00a0 \e076\00a0"; line-height: 6px; transform: scaleX(2); width: 20px; font-size: 10px; display: inline-block; vertical-align: middle; + word-wrap: break-word; + text-align:center; + margin-left: 5px; } + @media screen and (-webkit-min-device-pixel-ratio:0) { + .icon2.menu:before { + margin-top: -7px; + } + } /*** Navigation ***/ .header { @@ -257,14 +267,14 @@ body > .spinner, .mask{ .header .logo { display: inline-block; - font-size: 1.75em; - padding: 15px 30px 0 15px; + font-size: 3em; + padding: 4px 30px 0 15px; height: 100%; - vertical-align: middle; - border-right: 1px solid rgba(255,255,255,.07); + border-right: 1px solid rgba(255,255,255,.07); color: #FFF; font-weight: normal; vertical-align: top; + font-family: Lobster; } @media all and (max-width: 480px) { @@ -275,6 +285,7 @@ body > .spinner, .mask{ .header .logo { padding-top: 7px; border: 0; + font-size: 1.7em; } } @@ -489,7 +500,6 @@ body > .spinner, .mask{ display: block; font-size: .85em; color: #aaa; - text-align: ; } .header .notification_menu li .more { @@ -606,7 +616,7 @@ body > .spinner, .mask{ .onlay, .inlay .selected, .inlay:not(.reversed) > li:hover, .inlay > li.active, .inlay.reversed > li { border-radius:3px; border: 1px solid #252930; - box-shadow: inset 0 1px 0px rgba(255,255,255,0.20); + box-shadow: inset 0 1px 0 rgba(255,255,255,0.20); background: rgb(55,62,74); background-image: linear-gradient( 0, @@ -729,7 +739,7 @@ body > .spinner, .mask{ .more_menu .wrapper li .separator { border-bottom: 1px solid rgba(0,0,0,.1); display: block; - height: 1; + height: 1px; margin: 5px 0; } @@ -790,6 +800,73 @@ body > .spinner, .mask{ right: 0; color: #FFF; } + +/*** Login ***/ +.page.login { + display: block; +} + + .login h1 { + padding: 0 0 10px; + font-size: 60px; + font-family: Lobster; + font-weight: normal; + } + + .login form { + padding: 0; + height: 300px; + width: 400px; + position: fixed; + left: 50%; + top: 50%; + margin: -200px 0 0 -200px; + } + @media all and (max-width: 480px) { + + .login form { + padding: 0; + height: 300px; + width: 90%; + position: absolute; + left: 5%; + top: 10px; + margin: 0; + } + + } + + .page.login .ctrlHolder { + padding: 0; + margin: 0 0 20px; + } + .page.login .ctrlHolder:hover { + background: none; + } + + .page.login input[type=text], + .page.login input[type=password] { + width: 100% !important; + font-size: 25px; + padding: 14px !important; + } + + .page.login .remember_me { + font-size: 15px; + float: left; + width: 150px; + padding: 20px 0; + } + + .page.login .remember_me .check { + margin: 5px 5px 0 0; + } + + .page.login .button { + font-size: 25px; + padding: 20px; + float: right; + } /* Fonts */ @font-face { @@ -848,5 +925,15 @@ body > .spinner, .mask{ url('../fonts/OpenSans-BoldItalic-webfont.svg#OpenSansBoldItalic') format('svg'); font-weight: bold; font-style: italic; +} +@font-face { + font-family: 'Lobster'; + src: url('../fonts/Lobster-webfont.eot'); + src: url('../fonts/Lobster-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/Lobster-webfont.woff') format('woff'), + url('../fonts/Lobster-webfont.ttf') format('truetype'), + url('../fonts/Lobster-webfont.svg#lobster_1.4regular') format('svg'); + font-weight: normal; + font-style: normal; } \ No newline at end of file diff --git a/couchpotato/static/style/settings.css b/couchpotato/static/style/settings.css index 132d9c5..61d5239 100644 --- a/couchpotato/static/style/settings.css +++ b/couchpotato/static/style/settings.css @@ -90,7 +90,7 @@ padding: 0 9px 10px 30px; margin: 0; border-bottom: 1px solid #333; - box-shadow: 0 1px 0px rgba(255,255,255, 0.15); + box-shadow: 0 1px 0 rgba(255,255,255, 0.15); } .page fieldset h2 .hint { font-size: 12px; @@ -107,10 +107,8 @@ .page fieldset > .ctrlHolder:first-child { display: block; padding: 0; - width: auto; - margin: 0; position: relative; - margin-bottom: -23px; + margin: 0 0 -23px; border: none; width: 20px; } @@ -132,12 +130,11 @@ .page .ctrlHolder .formHint { width: 47%; margin: -18px 0; - padding: 0; - color: #fff !important; + color: #fff !important; display: inline-block; vertical-align: middle; - padding-left: 2%; - line-height: 14px; + padding: 0 0 0 2%; + line-height: 14px; } .page .check { @@ -219,7 +216,7 @@ font-weight: bold; border: none; border-top: 1px solid rgba(255,255,255, 0.15); - box-shadow: 0 -1px 0px #333; + box-shadow: 0 -1px 0 #333; margin: 0; padding: 10px 0 5px 25px; } @@ -308,7 +305,7 @@ border-bottom: 6px solid #5c697b; display: block; position: absolute; - width: 0px; + width: 0; margin: -6px 0 0 45%; } @@ -689,7 +686,6 @@ .group_userscript .bookmarklet { display: block; - display: block; float: left; padding: 20px 15px 0 25px; border-radius: 5px; diff --git a/couchpotato/static/style/uniform.generic.css b/couchpotato/static/style/uniform.generic.css index e70a915..8ac4136 100644 --- a/couchpotato/static/style/uniform.generic.css +++ b/couchpotato/static/style/uniform.generic.css @@ -92,9 +92,8 @@ border-radius: 4px; -webkit-border-radius: 4px; -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } + + } .uniForm #errorMsg h3{} /* Feel free to use a heading level suitable to your page structure */ .uniForm #errorMsg ol{ margin: 0 0 1.5em 0; padding: 0; } .uniForm #errorMsg ol li{ margin: 0 0 3px 1.5em; padding: 7px; background: #f6bec1; position: relative; font-size: .85em; @@ -102,9 +101,8 @@ border-radius: 4px; -webkit-border-radius: 4px; -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } + + } .uniForm .ctrlHolder.error, .uniForm .ctrlHolder.focused.error{ background: #ffdfdf; border: 1px solid #f3afb5; @@ -112,9 +110,8 @@ border-radius: 4px; -webkit-border-radius: 4px; -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } + + } .uniForm .ctrlHolder.error input.error, .uniForm .ctrlHolder.error select.error, .uniForm .ctrlHolder.error textarea.error{ color: #af4c4c; margin: 0 0 6px 0; padding: 4px; } @@ -125,9 +122,8 @@ border-radius: 4px; -webkit-border-radius: 4px; -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } + + } .uniForm #OKMsg p{ margin: 0; } /* ----------------------------------------------------------------------------- */ diff --git a/couchpotato/templates/login.html b/couchpotato/templates/login.html new file mode 100644 index 0000000..3562622 --- /dev/null +++ b/couchpotato/templates/login.html @@ -0,0 +1,38 @@ +{% autoescape None %} + + + + + + + {% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %} + {% end %} + + {% for url in fireEvent('clientscript.get_scripts', as_html = True, location = 'front', single = True) %} + {% end %} + + + + + + + CouchPotato + + +
+

CouchPotato

+
+
+
+ + +
+
+ + \ No newline at end of file diff --git a/libs/themoviedb/__init__.py b/libs/themoviedb/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/libs/themoviedb/tmdb.py b/libs/themoviedb/tmdb.py deleted file mode 100644 index 6a0e591..0000000 --- a/libs/themoviedb/tmdb.py +++ /dev/null @@ -1,740 +0,0 @@ -#!/usr/bin/env python -#-*- coding:utf-8 -*- -#author:doganaydin /// forked from dbr/Ben -#project:themoviedb -#repository:http://github.com/doganaydin/themoviedb -#license: LGPLv2 http://www.gnu.org/licenses/lgpl.html - -"""An interface to the themoviedb.org API""" - -__author__ = "doganaydin" -__version__ = "0.5" - - -config = {} - -def configure(api_key): - config['apikey'] = api_key - config['urls'] = {} - config['urls']['movie.search'] = "http://api.themoviedb.org/2.1/Movie.search/en/xml/%(apikey)s/%%s" % (config) - config['urls']['movie.getInfo'] = "http://api.themoviedb.org/2.1/Movie.getInfo/en/xml/%(apikey)s/%%s" % (config) - config['urls']['media.getInfo'] = "http://api.themoviedb.org/2.1/Media.getInfo/en/xml/%(apikey)s/%%s/%%s" % (config) - config['urls']['imdb.lookUp'] = "http://api.themoviedb.org/2.1/Movie.imdbLookup/en/xml/%(apikey)s/%%s" % (config) - config['urls']['movie.browse'] = "http://api.themoviedb.org/2.1/Movie.browse/en-US/xml/%(apikey)s?%%s" % (config) - -import os, struct, urllib, urllib2, xml.etree.cElementTree as ElementTree - -class TmdBaseError(Exception): - pass - -class TmdNoResults(TmdBaseError): - pass - -class TmdHttpError(TmdBaseError): - pass - -class TmdXmlError(TmdBaseError): - pass - -class TmdConfigError(TmdBaseError): - pass - -def opensubtitleHashFile(name): - """Hashes a file using OpenSubtitle's method. - > In natural language it calculates: size + 64bit chksum of the first and - > last 64k (even if they overlap because the file is smaller than 128k). - A slightly more Pythonic version of the Python solution on.. - http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes - """ - longlongformat = 'q' - bytesize = struct.calcsize(longlongformat) - - f = open(name, "rb") - - filesize = os.path.getsize(name) - fhash = filesize - - if filesize < 65536 * 2: - raise ValueError("File size must be larger than %s bytes (is %s)" % (65536 * 2, filesize)) - - for x in range(65536 / bytesize): - buf = f.read(bytesize) - (l_value,) = struct.unpack(longlongformat, buf) - fhash += l_value - fhash = fhash & 0xFFFFFFFFFFFFFFFF # to remain as 64bit number - - f.seek(max(0, filesize - 65536), 0) - for x in range(65536 / bytesize): - buf = f.read(bytesize) - (l_value,) = struct.unpack(longlongformat, buf) - fhash += l_value - fhash = fhash & 0xFFFFFFFFFFFFFFFF - - f.close() - return "%016x" % fhash - -class XmlHandler: - """Deals with retrieval of XML files from API""" - def __init__(self, url): - self.url = url - - def _grabUrl(self, url): - try: - urlhandle = urllib2.urlopen(url) - except IOError, errormsg: - raise TmdHttpError(errormsg) - if urlhandle.code >= 400: - raise TmdHttpError("HTTP status code was %d" % urlhandle.code) - return urlhandle.read() - - def getEt(self): - xml = self._grabUrl(self.url) - try: - et = ElementTree.fromstring(xml) - except SyntaxError, errormsg: - raise TmdXmlError(errormsg) - return et - -class SearchResults(list): - """Stores a list of Movie's that matched the search""" - def __repr__(self): - return "" % (list.__repr__(self)) - -class MovieResult(dict): - """A dict containing the information about a specific search result""" - def __repr__(self): - return "" % (self.get("name"), self.get("released")) - - def info(self): - """Performs a MovieDb.getMovieInfo search on the current id, returns - a Movie object - """ - cur_id = self['id'] - info = MovieDb().getMovieInfo(cur_id) - return info - -class Movie(dict): - """A dict containing the information about the film""" - def __repr__(self): - return "" % (self.get("name"), self.get("released")) - -class Categories(dict): - """Stores category information""" - def set(self, category_et): - """Takes an elementtree Element ('category') and stores the url, - using the type and name as the dict key. - For example: - - ..becomes: - categories['genre']['Crime'] = 'http://themoviedb.org/encyclopedia/category/80' - """ - _type = category_et.get("type") - name = category_et.get("name") - url = category_et.get("url") - self.setdefault(_type, {})[name] = url - self[_type][name] = url - -class Studios(dict): - """Stores category information""" - def set(self, studio_et): - """Takes an elementtree Element ('studio') and stores the url, - using the name as the dict key. - For example: - - ..becomes: - studios['name'] = 'http://www.themoviedb.org/encyclopedia/company/20' - """ - name = studio_et.get("name") - url = studio_et.get("url") - self[name] = url - -class Countries(dict): - """Stores country information""" - def set(self, country_et): - """Takes an elementtree Element ('country') and stores the url, - using the name and code as the dict key. - For example: - - ..becomes: - countries['code']['name'] = 'http://www.themoviedb.org/encyclopedia/country/223' - """ - code = country_et.get("code") - name = country_et.get("name") - url = country_et.get("url") - self.setdefault(code, {})[name] = url - -class Image(dict): - """Stores image information for a single poster/backdrop (includes - multiple sizes) - """ - def __init__(self, _id, _type, size, url): - self['id'] = _id - self['type'] = _type - - def largest(self): - for csize in ["original", "mid", "cover", "thumb"]: - if csize in self: - return csize - - def __repr__(self): - return "" % (self['type'], self['id']) - -class ImagesList(list): - """Stores a list of Images, and functions to filter "only posters" etc""" - def set(self, image_et): - """Takes an elementtree Element ('image') and stores the url, - along with the type, id and size. - Is a list containing each image as a dictionary (which includes the - various sizes) - For example: - - ..becomes: - images[0] = {'id':4181', 'type': 'poster', 'original': 'http://images.themov...'} - """ - _type = image_et.get("type") - _id = image_et.get("id") - size = image_et.get("size") - url = image_et.get("url") - cur = self.find_by('id', _id) - if len(cur) == 0: - nimg = Image(_id = _id, _type = _type, size = size, url = url) - self.append(nimg) - elif len(cur) == 1: - cur[0][size] = url - else: - raise ValueError("Found more than one poster with id %s, this should never happen" % (_id)) - - def find_by(self, key, value): - ret = [] - for cur in self: - if cur[key] == value: - ret.append(cur) - return ret - - @property - def posters(self): - return self.find_by('type', 'poster') - - @property - def backdrops(self): - return self.find_by('type', 'backdrop') - -class CrewRoleList(dict): - """Stores a list of roles, such as director, actor etc - >>> import tmdb - >>> tmdb.getMovieInfo(550)['cast'].keys()[:5] - ['casting', 'producer', 'author', 'sound editor', 'actor'] - """ - pass - -class CrewList(list): - """Stores list of crew in specific role - >>> import tmdb - >>> tmdb.getMovieInfo(550)['cast']['author'] - [, ] - """ - pass - -class Person(dict): - """Stores information about a specific member of cast""" - def __init__(self, job, _id, name, character, url): - self['job'] = job - self['id'] = _id - self['name'] = name - self['character'] = character - self['url'] = url - - def __repr__(self): - if self['character'] is None or self['character'] == "": - return "<%(job)s (id %(id)s): %(name)s>" % self - else: - return "<%(job)s (id %(id)s): %(name)s (as %(character)s)>" % self - -class MovieDb: - """Main interface to www.themoviedb.com - The search() method searches for the film by title. - The getMovieInfo() method retrieves information about a specific movie using themoviedb id. - """ - def _parseSearchResults(self, movie_element): - cur_movie = MovieResult() - cur_images = ImagesList() - for item in movie_element.getchildren(): - if item.tag.lower() == "images": - for subitem in item.getchildren(): - cur_images.set(subitem) - else: - cur_movie[item.tag] = item.text - cur_movie['images'] = cur_images - return cur_movie - - def _parseMovie(self, movie_element): - cur_movie = Movie() - cur_categories = Categories() - cur_studios = Studios() - cur_countries = Countries() - cur_images = ImagesList() - cur_cast = CrewRoleList() - for item in movie_element.getchildren(): - if item.tag.lower() == "categories": - for subitem in item.getchildren(): - cur_categories.set(subitem) - elif item.tag.lower() == "studios": - for subitem in item.getchildren(): - cur_studios.set(subitem) - elif item.tag.lower() == "countries": - for subitem in item.getchildren(): - cur_countries.set(subitem) - elif item.tag.lower() == "images": - for subitem in item.getchildren(): - cur_images.set(subitem) - elif item.tag.lower() == "cast": - for subitem in item.getchildren(): - job = subitem.get("job").lower() - p = Person( - job = job, - _id = subitem.get("id"), - name = subitem.get("name"), - character = subitem.get("character"), - url = subitem.get("url"), - ) - cur_cast.setdefault(job, CrewList()).append(p) - else: - cur_movie[item.tag] = item.text - - cur_movie['categories'] = cur_categories - cur_movie['studios'] = cur_studios - cur_movie['countries'] = cur_countries - cur_movie['images'] = cur_images - cur_movie['cast'] = cur_cast - return cur_movie - - def search(self, title): - """Searches for a film by its title. - Returns SearchResults (a list) containing all matches (Movie instances) - """ - title = urllib.quote(title.encode("utf-8")) - url = config['urls']['movie.search'] % (title) - etree = XmlHandler(url).getEt() - search_results = SearchResults() - for cur_result in etree.find("movies").findall("movie"): - cur_movie = self._parseSearchResults(cur_result) - search_results.append(cur_movie) - return search_results - - def getMovieInfo(self, id): - """Returns movie info by it's TheMovieDb ID. - Returns a Movie instance - """ - url = config['urls']['movie.getInfo'] % (id) - etree = XmlHandler(url).getEt() - moviesTree = etree.find("movies").findall("movie") - - if len(moviesTree) == 0: - raise TmdNoResults("No results for id %s" % id) - return self._parseMovie(moviesTree[0]) - - def mediaGetInfo(self, hash, size): - """Used to retrieve specific information about a movie but instead of - passing a TMDb ID, you pass a file hash and filesize in bytes - """ - url = config['urls']['media.getInfo'] % (hash, size) - etree = XmlHandler(url).getEt() - moviesTree = etree.find("movies").findall("movie") - if len(moviesTree) == 0: - raise TmdNoResults("No results for hash %s" % hash) - return [self._parseMovie(x) for x in moviesTree] - - def imdbLookup(self, id = 0, title = False): - if not config.get('apikey'): - raise TmdConfigError("API Key not set") - if id > 0: - url = config['urls']['imdb.lookUp'] % (id) - else: - _imdb_id = self.search(title)[0]["imdb_id"] - url = config['urls']['imdb.lookUp'] % (_imdb_id) - etree = XmlHandler(url).getEt() - lookup_results = SearchResults() - for cur_lookup in etree.find("movies").findall("movie"): - cur_movie = self._parseSearchResults(cur_lookup) - lookup_results.append(cur_movie) - return lookup_results - -class Browse: - - def __init__(self, params = {}): - """ - tmdb.Browse(params) - default params = {"order_by":"release","order":"desc"} - params = {"query":"some query","release_max":"1991",...} - all posible parameters = http://api.themoviedb.org/2.1/methods/Movie.browse - """ - if "order_by" not in params: - params.update({"order_by":"release"}) - if "order" not in params: - params.update({"order":"desc"}) - - self.params = urllib.urlencode(params) - self.movie = self.look(self.params) - - def look(self, look_for): - url = config['urls']['movie.browse'] % (look_for) - etree = XmlHandler(url).getEt() - look_results = SearchResults() - for cur_lookup in etree.find("movies").findall("movie"): - cur_movie = self._parseSearchResults(cur_lookup) - look_results.append(cur_movie) - return look_results - - def _parseSearchResults(self, movie_element): - cur_movie = MovieResult() - cur_images = ImagesList() - for item in movie_element.getchildren(): - if item.tag.lower() == "images": - for subitem in item.getchildren(): - cur_images.set(subitem) - else: - cur_movie[item.tag] = item.text - cur_movie['images'] = cur_images - return cur_movie - - def getTotal(self): - return len(self.movie) - - def getRating(self, i): - return self.movie[i]["rating"] - - def getVotes(self, i): - return self.movie[i]["votes"] - - def getName(self, i): - return self.movie[i]["name"] - - def getLanguage(self, i): - return self.movie[i]["language"] - - def getCertification(self, i): - return self.movie[i]["certification"] - - def getUrl(self, i): - return self.movie[i]["url"] - - def getOverview(self, i): - return self.movie[i]["overview"] - - def getPopularity(self, i): - return self.movie[i]["popularity"] - - def getOriginalName(self, i): - return self.movie[i]["original_name"] - - def getLastModified(self, i): - return self.movie[i]["last_modified_at"] - - def getImdbId(self, i): - return self.movie[i]["imdb_id"] - - def getReleased(self, i): - return self.movie[i]["released"] - - def getScore(self, i): - return self.movie[i]["score"] - - def getAdult(self, i): - return self.movie[i]["adult"] - - def getVersion(self, i): - return self.movie[i]["version"] - - def getTranslated(self, i): - return self.movie[i]["translated"] - - def getType(self, i): - return self.movie[i]["type"] - - def getId(self, i): - return self.movie[i]["id"] - - def getAlternativeName(self, i): - return self.movie[i]["alternative_name"] - - def getPoster(self, i, size): - if size == "thumb" or size == "t": - return self.movie[i]["images"][0]["thumb"] - elif size == "cover" or size == "c": - return self.movie[i]["images"][0]["cover"] - else: - return self.movie[i]["images"][0]["mid"] - - def getBackdrop(self, i, size): - if size == "poster" or size == "p": - return self.movie[i]["images"][1]["poster"] - else: - return self.movie[i]["images"][1]["thumb"] - - - -# Shortcuts for tmdb search method -# using: -# movie = tmdb.tmdb("Sin City") -# print movie.getRating -> 7.0 -class tmdb: - - def __init__(self, name): - """Convenience wrapper for MovieDb.search - so you can do.. - >>> import tmdb - >>> movie = tmdb.tmdb("Fight Club") - >>> ranking = movie.getRanking() or votes = movie.getVotes() - ]> - """ - mdb = MovieDb() - self.movie = mdb.search(name) - - def getTotal(self): - return len(self.movie) - - def getRating(self, i): - return self.movie[i]["rating"] - - def getVotes(self, i): - return self.movie[i]["votes"] - - def getName(self, i): - return self.movie[i]["name"] - - def getLanguage(self, i): - return self.movie[i]["language"] - - def getCertification(self, i): - return self.movie[i]["certification"] - - def getUrl(self, i): - return self.movie[i]["url"] - - def getOverview(self, i): - return self.movie[i]["overview"] - - def getPopularity(self, i): - return self.movie[i]["popularity"] - - def getOriginalName(self, i): - return self.movie[i]["original_name"] - - def getLastModified(self, i): - return self.movie[i]["last_modified_at"] - - def getImdbId(self, i): - return self.movie[i]["imdb_id"] - - def getReleased(self, i): - return self.movie[i]["released"] - - def getScore(self, i): - return self.movie[i]["score"] - - def getAdult(self, i): - return self.movie[i]["adult"] - - def getVersion(self, i): - return self.movie[i]["version"] - - def getTranslated(self, i): - return self.movie[i]["translated"] - - def getType(self, i): - return self.movie[i]["type"] - - def getId(self, i): - return self.movie[i]["id"] - - def getAlternativeName(self, i): - return self.movie[i]["alternative_name"] - - def getPoster(self, i, size): - if size == "thumb" or size == "t": - return self.movie[i]["images"][0]["thumb"] - elif size == "cover" or size == "c": - return self.movie[i]["images"][0]["cover"] - else: - return self.movie[i]["images"][0]["mid"] - - def getBackdrop(self, i, size): - if size == "poster" or size == "p": - return self.movie[i]["images"][1]["poster"] - else: - return self.movie[i]["images"][1]["thumb"] - -# Shortcuts for imdb lookup method -# using: -# movie = tmdb.imdb("Sin City") -# print movie.getRating -> 7.0 -class imdb: - - def __init__(self, id = 0, title = False): - # get first movie if result=0 - """Convenience wrapper for MovieDb.search - so you can do.. - >>> import tmdb - >>> movie = tmdb.imdb(title="Fight Club") # or movie = tmdb.imdb(id=imdb_id) - >>> ranking = movie.getRanking() or votes = movie.getVotes() - ]> - """ - self.id = id - self.title = title - self.mdb = MovieDb() - self.movie = self.mdb.imdbLookup(self.id, self.title) - - def getTotal(self): - return len(self.movie) - - def getRuntime(self, i): - return self.movie[i]["runtime"] - - def getCategories(self): - from xml.dom.minidom import parse - adres = config['urls']['imdb.lookUp'] % self.getImdbId() - d = parse(urllib2.urlopen(adres)) - s = d.getElementsByTagName("categories") - ds = [] - for i in range(len(s[0].childNodes)): - if i % 2 > 0: - ds.append(s[0].childNodes[i].getAttribute("name")) - return ds - - def getRating(self, i): - return self.movie[i]["rating"] - - def getVotes(self, i): - return self.movie[i]["votes"] - - def getName(self, i): - return self.movie[i]["name"] - - def getLanguage(self, i): - return self.movie[i]["language"] - - def getCertification(self, i): - return self.movie[i]["certification"] - - def getUrl(self, i): - return self.movie[i]["url"] - - def getOverview(self, i): - return self.movie[i]["overview"] - - def getPopularity(self, i): - return self.movie[i]["popularity"] - - def getOriginalName(self, i): - return self.movie[i]["original_name"] - - def getLastModified(self, i): - return self.movie[i]["last_modified_at"] - - def getImdbId(self, i): - return self.movie[i]["imdb_id"] - - def getReleased(self, i): - return self.movie[i]["released"] - - def getAdult(self, i): - return self.movie[i]["adult"] - - def getVersion(self, i): - return self.movie[i]["version"] - - def getTranslated(self, i): - return self.movie[i]["translated"] - - def getType(self, i): - return self.movie[i]["type"] - - def getId(self, i): - return self.movie[i]["id"] - - def getAlternativeName(self, i): - return self.movie[i]["alternative_name"] - - def getPoster(self, i, size): - poster = [] - if size == "thumb" or size == "t": - _size = "thumb" - elif size == "cover" or size == "c": - _size = "cover" - else: - _size = "mid" - for a in self.movie[i]["images"]: - if a["type"] == "poster": - poster.append(a[_size]) - return poster - del poster - - def getBackdrop(self, i, size): - backdrop = [] - if size == "thumb" or size == "t": - _size = "thumb" - elif size == "cover" or size == "c": - _size = "cover" - else: - _size = "mid" - for a in self.movie[i]["images"]: - if a["type"] == "backdrop": - backdrop.append(a[_size]) - return backdrop - del backdrop - -def imdbLookup(id = 0, title = False): - """Convenience wrapper for Imdb.Lookup - so you can do.. - >>> import tmdb - >>> tmdb.imdbLookup("Fight Club") - ]> - """ - mdb = MovieDb() - return mdb.imdbLookup(id, title) - -def search(name): - """Convenience wrapper for MovieDb.search - so you can do.. - >>> import tmdb - >>> tmdb.search("Fight Club") - ]> - """ - mdb = MovieDb() - return mdb.search(name) - -def getMovieInfo(id): - """Convenience wrapper for MovieDb.search - so you can do.. - >>> import tmdb - >>> tmdb.getMovieInfo(187) - - """ - mdb = MovieDb() - return mdb.getMovieInfo(id) - -def mediaGetInfo(hash, size): - """Convenience wrapper for MovieDb.mediaGetInfo - so you can do.. - - >>> import tmdb - >>> tmdb.mediaGetInfo('907172e7fe51ba57', size = 742086656)[0] - - """ - mdb = MovieDb() - return mdb.mediaGetInfo(hash, size) - -def searchByHashingFile(filename): - """Searches for the specified file using the OpenSubtitle hashing method - """ - return mediaGetInfo(opensubtitleHashFile(filename), os.path.size(filename)) - -def main(): - results = search("Fight Club") - searchResult = results[0] - movie = getMovieInfo(searchResult['id']) - print movie['name'] - - print "Producers:" - for prodr in movie['cast']['producer']: - print " " * 4, prodr['name'] - print movie['images'] - for genreName in movie['categories']['genre']: - print "%s (%s)" % (genreName, movie['categories']['genre'][genreName]) - -if __name__ == '__main__': - main() diff --git a/libs/tmdb3/__init__.py b/libs/tmdb3/__init__.py new file mode 100755 index 0000000..92ca551 --- /dev/null +++ b/libs/tmdb3/__init__.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python + +from tmdb_api import Configuration, searchMovie, searchMovieWithYear, \ + searchPerson, searchStudio, searchList, searchCollection, \ + Person, Movie, Collection, Genre, List, __version__ +from request import set_key, set_cache +from locales import get_locale, set_locale +from tmdb_auth import get_session, set_session +from cache_engine import CacheEngine +from tmdb_exceptions import * + diff --git a/libs/tmdb3/cache.py b/libs/tmdb3/cache.py new file mode 100755 index 0000000..3b10677 --- /dev/null +++ b/libs/tmdb3/cache.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: cache.py +# Python Library +# Author: Raymond Wagner +# Purpose: Caching framework to store TMDb API results +#----------------------- + +from tmdb_exceptions import * +from cache_engine import Engines + +import cache_null +import cache_file + +class Cache( object ): + """ + This class implements a persistent cache, backed in a file specified in + the object creation. The file is protected for safe, concurrent access + by multiple instances using flock. + This cache uses JSON for speed and storage efficiency, so only simple + data types are supported. + Data is stored in a simple format {key:(expiretimestamp, data)} + """ + def __init__(self, engine=None, *args, **kwargs): + self._engine = None + self._data = {} + self._age = 0 + self.configure(engine, *args, **kwargs) + + def _import(self, data=None): + if data is None: + data = self._engine.get(self._age) + for obj in sorted(data, key=lambda x: x.creation): + if not obj.expired: + self._data[obj.key] = obj + self._age = max(self._age, obj.creation) + + def _expire(self): + for k,v in self._data.items(): + if v.expired: + del self._data[k] + + def configure(self, engine, *args, **kwargs): + if engine is None: + engine = 'file' + elif engine not in Engines: + raise TMDBCacheError("Invalid cache engine specified: "+engine) + self._engine = Engines[engine](self) + self._engine.configure(*args, **kwargs) + + def put(self, key, data, lifetime=60*60*12): + # pull existing data, so cache will be fresh when written back out + if self._engine is None: + raise TMDBCacheError("No cache engine configured") + self._expire() + self._import(self._engine.put(key, data, lifetime)) + + def get(self, key): + if self._engine is None: + raise TMDBCacheError("No cache engine configured") + self._expire() + if key not in self._data: + self._import() + try: + return self._data[key].data + except: + return None + + def cached(self, callback): + """ + Returns a decorator that uses a callback to specify the key to use + for caching the responses from the decorated function. + """ + return self.Cached(self, callback) + + class Cached( object ): + def __init__(self, cache, callback, func=None, inst=None): + self.cache = cache + self.callback = callback + self.func = func + self.inst = inst + + if func: + self.__module__ = func.__module__ + self.__name__ = func.__name__ + self.__doc__ = func.__doc__ + + def __call__(self, *args, **kwargs): + if self.func is None: # decorator is waiting to be given a function + if len(kwargs) or (len(args) != 1): + raise TMDBCacheError('Cache.Cached decorator must be called '+\ + 'a single callable argument before it '+\ + 'be used.') + elif args[0] is None: + raise TMDBCacheError('Cache.Cached decorator called before '+\ + 'being given a function to wrap.') + elif not callable(args[0]): + raise TMDBCacheError('Cache.Cached must be provided a '+\ + 'callable object.') + return self.__class__(self.cache, self.callback, args[0]) + elif self.inst.lifetime == 0: + return self.func(*args, **kwargs) + else: + key = self.callback() + data = self.cache.get(key) + if data is None: + data = self.func(*args, **kwargs) + if hasattr(self.inst, 'lifetime'): + self.cache.put(key, data, self.inst.lifetime) + else: + self.cache.put(key, data) + return data + + def __get__(self, inst, owner): + if inst is None: + return self + func = self.func.__get__(inst, owner) + callback = self.callback.__get__(inst, owner) + return self.__class__(self.cache, callback, func, inst) + diff --git a/libs/tmdb3/cache_engine.py b/libs/tmdb3/cache_engine.py new file mode 100755 index 0000000..99ad4cd --- /dev/null +++ b/libs/tmdb3/cache_engine.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: cache_engine.py +# Python Library +# Author: Raymond Wagner +# Purpose: Base cache engine class for collecting registered engines +#----------------------- + +import time +from weakref import ref + +class Engines( object ): + def __init__(self): + self._engines = {} + def register(self, engine): + self._engines[engine.__name__] = engine + self._engines[engine.name] = engine + def __getitem__(self, key): + return self._engines[key] + def __contains__(self, key): + return self._engines.__contains__(key) +Engines = Engines() + +class CacheEngineType( type ): + """ + Cache Engine Metaclass that registers new engines against the cache + for named selection and use. + """ + def __init__(mcs, name, bases, attrs): + super(CacheEngineType, mcs).__init__(name, bases, attrs) + if name != 'CacheEngine': + # skip base class + Engines.register(mcs) + +class CacheEngine( object ): + __metaclass__ = CacheEngineType + + name = 'unspecified' + def __init__(self, parent): + self.parent = ref(parent) + def configure(self): + raise RuntimeError + def get(self, date): + raise RuntimeError + def put(self, key, value, lifetime): + raise RuntimeError + def expire(self, key): + raise RuntimeError + +class CacheObject( object ): + """ + Cache object class, containing one stored record. + """ + + def __init__(self, key, data, lifetime=0, creation=None): + self.key = key + self.data = data + self.lifetime = lifetime + self.creation = creation if creation is not None else time.time() + + def __len__(self): + return len(self.data) + + @property + def expired(self): + return (self.remaining == 0) + + @property + def remaining(self): + return max((self.creation + self.lifetime) - time.time(), 0) + diff --git a/libs/tmdb3/cache_file.py b/libs/tmdb3/cache_file.py new file mode 100755 index 0000000..5918071 --- /dev/null +++ b/libs/tmdb3/cache_file.py @@ -0,0 +1,391 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: cache_file.py +# Python Library +# Author: Raymond Wagner +# Purpose: Persistant file-backed cache using /tmp/ to share data +# using flock or msvcrt.locking to allow safe concurrent +# access. +#----------------------- + +import struct +import errno +import json +import os +import io + +from cStringIO import StringIO + +from tmdb_exceptions import * +from cache_engine import CacheEngine, CacheObject + +#################### +# Cache File Format +#------------------ +# cache version (2) unsigned short +# slot count (2) unsigned short +# slot 0: timestamp (8) double +# slot 0: lifetime (4) unsigned int +# slot 0: seek point (4) unsigned int +# slot 1: timestamp +# slot 1: lifetime index slots are IDd by their query date and +# slot 1: seek point are filled incrementally forwards. lifetime +# .... is how long after query date before the item +# .... expires, and seek point is the location of the +# slot N-2: timestamp start of data for that entry. 256 empty slots +# slot N-2: lifetime are pre-allocated, allowing fast updates. +# slot N-2: seek point when all slots are filled, the cache file is +# slot N-1: timestamp rewritten from scrach to add more slots. +# slot N-1: lifetime +# slot N-1: seek point +# block 1 (?) ASCII +# block 2 +# .... blocks are just simple ASCII text, generated +# .... as independent objects by the JSON encoder +# block N-2 +# block N-1 +# +#################### + + +def _donothing(*args, **kwargs): + pass + +try: + import fcntl + class Flock( object ): + """ + Context manager to flock file for the duration the object exists. + Referenced file will be automatically unflocked as the interpreter + exits the context. + Supports an optional callback to process the error and optionally + suppress it. + """ + LOCK_EX = fcntl.LOCK_EX + LOCK_SH = fcntl.LOCK_SH + + def __init__(self, fileobj, operation, callback=None): + self.fileobj = fileobj + self.operation = operation + self.callback = callback + def __enter__(self): + fcntl.flock(self.fileobj, self.operation) + def __exit__(self, exc_type, exc_value, exc_tb): + suppress = False + if callable(self.callback): + suppress = self.callback(exc_type, exc_value, exc_tb) + fcntl.flock(self.fileobj, fcntl.LOCK_UN) + return suppress + + def parse_filename(filename): + if '$' in filename: + # replace any environmental variables + filename = os.path.expandvars(filename) + if filename.startswith('~'): + # check for home directory + return os.path.expanduser(filename) + elif filename.startswith('/'): + # check for absolute path + return filename + # return path with temp directory prepended + return '/tmp/' + filename + +except ImportError: + import msvcrt + class Flock( object ): + LOCK_EX = msvcrt.LK_LOCK + LOCK_SH = msvcrt.LK_LOCK + + def __init__(self, fileobj, operation, callback=None): + self.fileobj = fileobj + self.operation = operation + self.callback = callback + def __enter__(self): + self.size = os.path.getsize(self.fileobj.name) + msvcrt.locking(self.fileobj.fileno(), self.operation, self.size) + def __exit__(self, exc_type, exc_value, exc_tb): + suppress = False + if callable(self.callback): + suppress = self.callback(exc_type, exc_value, exc_tb) + msvcrt.locking(self.fileobj.fileno(), msvcrt.LK_UNLCK, self.size) + return suppress + + def parse_filename(filename): + if '%' in filename: + # replace any environmental variables + filename = os.path.expandvars(filename) + if filename.startswith('~'): + # check for home directory + return os.path.expanduser(filename) + elif (ord(filename[0]) in (range(65,91)+range(99,123))) \ + and (filename[1:3] == ':\\'): + # check for absolute drive path (e.g. C:\...) + return filename + elif (filename.count('\\') >= 3) and (filename.startswith('\\\\')): + # check for absolute UNC path (e.g. \\server\...) + return filename + # return path with temp directory prepended + return os.path.expandvars(os.path.join('%TEMP%',filename)) + + +class FileCacheObject( CacheObject ): + _struct = struct.Struct('dII') # double and two ints + # timestamp, lifetime, position + + @classmethod + def fromFile(cls, fd): + dat = cls._struct.unpack(fd.read(cls._struct.size)) + obj = cls(None, None, dat[1], dat[0]) + obj.position = dat[2] + return obj + + def __init__(self, *args, **kwargs): + self._key = None + self._data = None + self._size = None + self._buff = StringIO() + super(FileCacheObject, self).__init__(*args, **kwargs) + + @property + def size(self): + if self._size is None: + self._buff.seek(0,2) + size = self._buff.tell() + if size == 0: + if (self._key is None) or (self._data is None): + raise RuntimeError + json.dump([self.key, self.data], self._buff) + self._size = self._buff.tell() + self._size = size + return self._size + @size.setter + def size(self, value): self._size = value + + @property + def key(self): + if self._key is None: + try: + self._key, self._data = json.loads(self._buff.getvalue()) + except: + pass + return self._key + @key.setter + def key(self, value): self._key = value + + @property + def data(self): + if self._data is None: + self._key, self._data = json.loads(self._buff.getvalue()) + return self._data + @data.setter + def data(self, value): self._data = value + + def load(self, fd): + fd.seek(self.position) + self._buff.seek(0) + self._buff.write(fd.read(self.size)) + + def dumpslot(self, fd): + pos = fd.tell() + fd.write(self._struct.pack(self.creation, self.lifetime, self.position)) + + def dumpdata(self, fd): + self.size + fd.seek(self.position) + fd.write(self._buff.getvalue()) + + +class FileEngine( CacheEngine ): + """Simple file-backed engine.""" + name = 'file' + _struct = struct.Struct('HH') # two shorts for version and count + _version = 2 + + def __init__(self, parent): + super(FileEngine, self).__init__(parent) + self.configure(None) + + def configure(self, filename, preallocate=256): + self.preallocate = preallocate + self.cachefile = filename + self.size = 0 + self.free = 0 + self.age = 0 + + def _init_cache(self): + # only run this once + self._init_cache = _donothing + + if self.cachefile is None: + raise TMDBCacheError("No cache filename given.") + + self.cachefile = parse_filename(self.cachefile) + + try: + # attempt to read existing cache at filename + # handle any errors that occur + self._open('r+b') + # seems to have read fine, make sure we have write access + if not os.access(self.cachefile, os.W_OK): + raise TMDBCacheWriteError(self.cachefile) + + except IOError as e: + if e.errno == errno.ENOENT: + # file does not exist, create a new one + try: + self._open('w+b') + self._write([]) + except IOError as e: + if e.errno == errno.ENOENT: + # directory does not exist + raise TMDBCacheDirectoryError(self.cachefile) + elif e.errno == errno.EACCES: + # user does not have rights to create new file + raise TMDBCacheWriteError(self.cachefile) + else: + # let the unhandled error continue through + raise + elif e.errno == errno.EACCESS: + # file exists, but we do not have permission to access it + raise TMDBCacheReadError(self.cachefile) + else: + # let the unhandled error continue through + raise + + def get(self, date): + self._init_cache() + self._open('r+b') + + with Flock(self.cachefd, Flock.LOCK_SH): # lock for shared access + # return any new objects in the cache + return self._read(date) + + def put(self, key, value, lifetime): + self._init_cache() + self._open('r+b') + + with Flock(self.cachefd, Flock.LOCK_EX): # lock for exclusive access + newobjs = self._read(self.age) + newobjs.append(FileCacheObject(key, value, lifetime)) + + # this will cause a new file object to be opened with the proper + # access mode, however the Flock should keep the old object open + # and properly locked + self._open('r+b') + self._write(newobjs) + return newobjs + + def _open(self, mode='r+b'): + # enforce binary operation + try: + if self.cachefd.mode == mode: + # already opened in requested mode, nothing to do + self.cachefd.seek(0) + return + except: pass # catch issue of no cachefile yet opened + self.cachefd = io.open(self.cachefile, mode) + + def _read(self, date): + try: + self.cachefd.seek(0) + version, count = self._struct.unpack(\ + self.cachefd.read(self._struct.size)) + if version != self._version: + # old version, break out and well rewrite when finished + raise Exception + + self.size = count + cache = [] + while count: + # loop through storage definitions + obj = FileCacheObject.fromFile(self.cachefd) + cache.append(obj) + count -= 1 + + except: + # failed to read information, so just discard it and return empty + self.size = 0 + self.free = 0 + return [] + + # get end of file + self.cachefd.seek(0,2) + position = self.cachefd.tell() + newobjs = [] + emptycount = 0 + + # walk backward through all, collecting new content and populating size + while len(cache): + obj = cache.pop() + if obj.creation == 0: + # unused slot, skip + emptycount += 1 + elif obj.expired: + # object has passed expiration date, no sense processing + continue + elif obj.creation > date: + # used slot with new data, process + obj.size, position = position - obj.position, obj.position + newobjs.append(obj) + # update age + self.age = max(self.age, obj.creation) + elif len(newobjs): + # end of new data, break + break + + # walk forward and load new content + for obj in newobjs: + obj.load(self.cachefd) + + self.free = emptycount + return newobjs + + def _write(self, data): + if self.free and (self.size != self.free): + # we only care about the last data point, since the rest are + # already stored in the file + data = data[-1] + + # determine write position of data in cache + self.cachefd.seek(0,2) + end = self.cachefd.tell() + data.position = end + + # write incremental update to free slot + self.cachefd.seek(4 + 16*(self.size-self.free)) + data.dumpslot(self.cachefd) + data.dumpdata(self.cachefd) + + else: + # rewrite cache file from scratch + # pull data from parent cache + data.extend(self.parent()._data.values()) + data.sort(key=lambda x: x.creation) + # write header + size = len(data) + self.preallocate + self.cachefd.seek(0) + self.cachefd.truncate() + self.cachefd.write(self._struct.pack(self._version, size)) + # write storage slot definitions + prev = None + for d in data: + if prev == None: + d.position = 4 + 16*size + else: + d.position = prev.position + prev.size + d.dumpslot(self.cachefd) + prev = d + # fill in allocated slots + for i in range(2**8): + self.cachefd.write(FileCacheObject._struct.pack(0, 0, 0)) + # write stored data + for d in data: + d.dumpdata(self.cachefd) + + self.cachefd.flush() + + def expire(self, key): + pass + + diff --git a/libs/tmdb3/cache_null.py b/libs/tmdb3/cache_null.py new file mode 100755 index 0000000..a59741c --- /dev/null +++ b/libs/tmdb3/cache_null.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: cache_null.py +# Python Library +# Author: Raymond Wagner +# Purpose: Null caching engine for debugging purposes +#----------------------- + +from cache_engine import CacheEngine + +class NullEngine( CacheEngine ): + """Non-caching engine for debugging.""" + name = 'null' + def configure(self): pass + def get(self, date): return [] + def put(self, key, value, lifetime): return [] + def expire(self, key): pass + diff --git a/libs/tmdb3/locales.py b/libs/tmdb3/locales.py new file mode 100755 index 0000000..97efec7 --- /dev/null +++ b/libs/tmdb3/locales.py @@ -0,0 +1,634 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: locales.py Stores locale information for filtering results +# Python Library +# Author: Raymond Wagner +#----------------------- + +from tmdb_exceptions import * +import locale + +syslocale = None + +class LocaleBase( object ): + __slots__ = ['__immutable'] + _stored = {} + fallthrough = False + + def __init__(self, *keys): + for key in keys: + self._stored[key.lower()] = self + self.__immutable = True + + def __setattr__(self, key, value): + if getattr(self, '__immutable', False): + raise NotImplementedError(self.__class__.__name__ + + ' does not support modification.') + super(LocaleBase, self).__setattr__(key, value) + + def __delattr__(self, key): + if getattr(self, '__immutable', False): + raise NotImplementedError(self.__class__.__name__ + + ' does not support modification.') + super(LocaleBase, self).__delattr__(key) + + def __lt__(self, other): + return (id(self) != id(other)) and (str(self) > str(other)) + def __gt__(self, other): + return (id(self) != id(other)) and (str(self) < str(other)) + def __eq__(self, other): + return (id(self) == id(other)) or (str(self) == str(other)) + + @classmethod + def getstored(cls, key): + if key is None: + return None + try: + return cls._stored[key.lower()] + except: + raise TMDBLocaleError("'{0}' is not a known valid {1} code."\ + .format(key, cls.__name__)) + +class Language( LocaleBase ): + __slots__ = ['ISO639_1', 'ISO639_2', 'ISO639_2B', 'englishname', + 'nativename'] + _stored = {} + + def __init__(self, iso1, iso2, ename): + self.ISO639_1 = iso1 + self.ISO639_2 = iso2 +# self.ISO639_2B = iso2b + self.englishname = ename +# self.nativename = nname + super(Language, self).__init__(iso1, iso2) + + def __str__(self): + return self.ISO639_1 + + def __repr__(self): + return u"".format(self) + +class Country( LocaleBase ): + __slots__ = ['alpha2', 'name'] + _stored = {} + + def __init__(self, alpha2, name): + self.alpha2 = alpha2 + self.name = name + super(Country, self).__init__(alpha2) + + def __str__(self): + return self.alpha2 + + def __repr__(self): + return u"".format(self) + +class Locale( LocaleBase ): + __slots__ = ['language', 'country', 'encoding'] + + def __init__(self, language, country, encoding): + self.language = Language.getstored(language) + self.country = Country.getstored(country) + self.encoding = encoding if encoding else 'latin-1' + + def __str__(self): + return u"{0}_{1}".format(self.language, self.country) + + def __repr__(self): + return u"".format(self) + + def encode(self, dat): + """Encode using system default encoding for network/file output.""" + try: + return dat.encode(self.encoding) + except AttributeError: + # not a string type, pass along + return dat + except UnicodeDecodeError: + # just return unmodified and hope for the best + return dat + + def decode(self, dat): + """Decode to system default encoding for internal use.""" + try: + return dat.decode(self.encoding) + except AttributeError: + # not a string type, pass along + return dat + except UnicodeEncodeError: + # just return unmodified and hope for the best + return dat + +def set_locale(language=None, country=None, fallthrough=False): + global syslocale + LocaleBase.fallthrough = fallthrough + + sysloc, sysenc = locale.getdefaultlocale() + + if (not language) or (not country): + dat = None + if syslocale is not None: + dat = (str(syslocale.language), str(syslocale.country)) + else: + if (sysloc is None) or ('_' not in sysloc): + dat = ('en', 'US') + else: + dat = sysloc.split('_') + if language is None: + language = dat[0] + if country is None: + country = dat[1] + + syslocale = Locale(language, country, sysenc) + +def get_locale(language=-1, country=-1): + """Output locale using provided attributes, or return system locale.""" + global syslocale + # pull existing stored values + if syslocale is None: + loc = Locale(None, None, locale.getdefaultlocale()[1]) + else: + loc = syslocale + + # both options are default, return stored values + if language == country == -1: + return loc + + # supplement default option with stored values + if language == -1: + language = loc.language + elif country == -1: + country = loc.country + return Locale(language, country, loc.encoding) + +######## AUTOGENERATED LANGUAGE AND COUNTRY DATA BELOW HERE ######### + +Language("ab", "abk", u"Abkhazian") +Language("aa", "aar", u"Afar") +Language("af", "afr", u"Afrikaans") +Language("ak", "aka", u"Akan") +Language("sq", "alb/sqi", u"Albanian") +Language("am", "amh", u"Amharic") +Language("ar", "ara", u"Arabic") +Language("an", "arg", u"Aragonese") +Language("hy", "arm/hye", u"Armenian") +Language("as", "asm", u"Assamese") +Language("av", "ava", u"Avaric") +Language("ae", "ave", u"Avestan") +Language("ay", "aym", u"Aymara") +Language("az", "aze", u"Azerbaijani") +Language("bm", "bam", u"Bambara") +Language("ba", "bak", u"Bashkir") +Language("eu", "baq/eus", u"Basque") +Language("be", "bel", u"Belarusian") +Language("bn", "ben", u"Bengali") +Language("bh", "bih", u"Bihari languages") +Language("bi", "bis", u"Bislama") +Language("nb", "nob", u"Bokmål, Norwegian") +Language("bs", "bos", u"Bosnian") +Language("br", "bre", u"Breton") +Language("bg", "bul", u"Bulgarian") +Language("my", "bur/mya", u"Burmese") +Language("es", "spa", u"Castilian") +Language("ca", "cat", u"Catalan") +Language("km", "khm", u"Central Khmer") +Language("ch", "cha", u"Chamorro") +Language("ce", "che", u"Chechen") +Language("ny", "nya", u"Chewa") +Language("ny", "nya", u"Chichewa") +Language("zh", "chi/zho", u"Chinese") +Language("za", "zha", u"Chuang") +Language("cu", "chu", u"Church Slavic") +Language("cu", "chu", u"Church Slavonic") +Language("cv", "chv", u"Chuvash") +Language("kw", "cor", u"Cornish") +Language("co", "cos", u"Corsican") +Language("cr", "cre", u"Cree") +Language("hr", "hrv", u"Croatian") +Language("cs", "cze/ces", u"Czech") +Language("da", "dan", u"Danish") +Language("dv", "div", u"Dhivehi") +Language("dv", "div", u"Divehi") +Language("nl", "dut/nld", u"Dutch") +Language("dz", "dzo", u"Dzongkha") +Language("en", "eng", u"English") +Language("eo", "epo", u"Esperanto") +Language("et", "est", u"Estonian") +Language("ee", "ewe", u"Ewe") +Language("fo", "fao", u"Faroese") +Language("fj", "fij", u"Fijian") +Language("fi", "fin", u"Finnish") +Language("nl", "dut/nld", u"Flemish") +Language("fr", "fre/fra", u"French") +Language("ff", "ful", u"Fulah") +Language("gd", "gla", u"Gaelic") +Language("gl", "glg", u"Galician") +Language("lg", "lug", u"Ganda") +Language("ka", "geo/kat", u"Georgian") +Language("de", "ger/deu", u"German") +Language("ki", "kik", u"Gikuyu") +Language("el", "gre/ell", u"Greek, Modern (1453-)") +Language("kl", "kal", u"Greenlandic") +Language("gn", "grn", u"Guarani") +Language("gu", "guj", u"Gujarati") +Language("ht", "hat", u"Haitian") +Language("ht", "hat", u"Haitian Creole") +Language("ha", "hau", u"Hausa") +Language("he", "heb", u"Hebrew") +Language("hz", "her", u"Herero") +Language("hi", "hin", u"Hindi") +Language("ho", "hmo", u"Hiri Motu") +Language("hu", "hun", u"Hungarian") +Language("is", "ice/isl", u"Icelandic") +Language("io", "ido", u"Ido") +Language("ig", "ibo", u"Igbo") +Language("id", "ind", u"Indonesian") +Language("ia", "ina", u"Interlingua (International Auxiliary Language Association)") +Language("ie", "ile", u"Interlingue") +Language("iu", "iku", u"Inuktitut") +Language("ik", "ipk", u"Inupiaq") +Language("ga", "gle", u"Irish") +Language("it", "ita", u"Italian") +Language("ja", "jpn", u"Japanese") +Language("jv", "jav", u"Javanese") +Language("kl", "kal", u"Kalaallisut") +Language("kn", "kan", u"Kannada") +Language("kr", "kau", u"Kanuri") +Language("ks", "kas", u"Kashmiri") +Language("kk", "kaz", u"Kazakh") +Language("ki", "kik", u"Kikuyu") +Language("rw", "kin", u"Kinyarwanda") +Language("ky", "kir", u"Kirghiz") +Language("kv", "kom", u"Komi") +Language("kg", "kon", u"Kongo") +Language("ko", "kor", u"Korean") +Language("kj", "kua", u"Kuanyama") +Language("ku", "kur", u"Kurdish") +Language("kj", "kua", u"Kwanyama") +Language("ky", "kir", u"Kyrgyz") +Language("lo", "lao", u"Lao") +Language("la", "lat", u"Latin") +Language("lv", "lav", u"Latvian") +Language("lb", "ltz", u"Letzeburgesch") +Language("li", "lim", u"Limburgan") +Language("li", "lim", u"Limburger") +Language("li", "lim", u"Limburgish") +Language("ln", "lin", u"Lingala") +Language("lt", "lit", u"Lithuanian") +Language("lu", "lub", u"Luba-Katanga") +Language("lb", "ltz", u"Luxembourgish") +Language("mk", "mac/mkd", u"Macedonian") +Language("mg", "mlg", u"Malagasy") +Language("ms", "may/msa", u"Malay") +Language("ml", "mal", u"Malayalam") +Language("dv", "div", u"Maldivian") +Language("mt", "mlt", u"Maltese") +Language("gv", "glv", u"Manx") +Language("mi", "mao/mri", u"Maori") +Language("mr", "mar", u"Marathi") +Language("mh", "mah", u"Marshallese") +Language("ro", "rum/ron", u"Moldavian") +Language("ro", "rum/ron", u"Moldovan") +Language("mn", "mon", u"Mongolian") +Language("na", "nau", u"Nauru") +Language("nv", "nav", u"Navaho") +Language("nv", "nav", u"Navajo") +Language("nd", "nde", u"Ndebele, North") +Language("nr", "nbl", u"Ndebele, South") +Language("ng", "ndo", u"Ndonga") +Language("ne", "nep", u"Nepali") +Language("nd", "nde", u"North Ndebele") +Language("se", "sme", u"Northern Sami") +Language("no", "nor", u"Norwegian") +Language("nb", "nob", u"Norwegian Bokmål") +Language("nn", "nno", u"Norwegian Nynorsk") +Language("ii", "iii", u"Nuosu") +Language("ny", "nya", u"Nyanja") +Language("nn", "nno", u"Nynorsk, Norwegian") +Language("ie", "ile", u"Occidental") +Language("oc", "oci", u"Occitan (post 1500)") +Language("oj", "oji", u"Ojibwa") +Language("cu", "chu", u"Old Bulgarian") +Language("cu", "chu", u"Old Church Slavonic") +Language("cu", "chu", u"Old Slavonic") +Language("or", "ori", u"Oriya") +Language("om", "orm", u"Oromo") +Language("os", "oss", u"Ossetian") +Language("os", "oss", u"Ossetic") +Language("pi", "pli", u"Pali") +Language("pa", "pan", u"Panjabi") +Language("ps", "pus", u"Pashto") +Language("fa", "per/fas", u"Persian") +Language("pl", "pol", u"Polish") +Language("pt", "por", u"Portuguese") +Language("pa", "pan", u"Punjabi") +Language("ps", "pus", u"Pushto") +Language("qu", "que", u"Quechua") +Language("ro", "rum/ron", u"Romanian") +Language("rm", "roh", u"Romansh") +Language("rn", "run", u"Rundi") +Language("ru", "rus", u"Russian") +Language("sm", "smo", u"Samoan") +Language("sg", "sag", u"Sango") +Language("sa", "san", u"Sanskrit") +Language("sc", "srd", u"Sardinian") +Language("gd", "gla", u"Scottish Gaelic") +Language("sr", "srp", u"Serbian") +Language("sn", "sna", u"Shona") +Language("ii", "iii", u"Sichuan Yi") +Language("sd", "snd", u"Sindhi") +Language("si", "sin", u"Sinhala") +Language("si", "sin", u"Sinhalese") +Language("sk", "slo/slk", u"Slovak") +Language("sl", "slv", u"Slovenian") +Language("so", "som", u"Somali") +Language("st", "sot", u"Sotho, Southern") +Language("nr", "nbl", u"South Ndebele") +Language("es", "spa", u"Spanish") +Language("su", "sun", u"Sundanese") +Language("sw", "swa", u"Swahili") +Language("ss", "ssw", u"Swati") +Language("sv", "swe", u"Swedish") +Language("tl", "tgl", u"Tagalog") +Language("ty", "tah", u"Tahitian") +Language("tg", "tgk", u"Tajik") +Language("ta", "tam", u"Tamil") +Language("tt", "tat", u"Tatar") +Language("te", "tel", u"Telugu") +Language("th", "tha", u"Thai") +Language("bo", "tib/bod", u"Tibetan") +Language("ti", "tir", u"Tigrinya") +Language("to", "ton", u"Tonga (Tonga Islands)") +Language("ts", "tso", u"Tsonga") +Language("tn", "tsn", u"Tswana") +Language("tr", "tur", u"Turkish") +Language("tk", "tuk", u"Turkmen") +Language("tw", "twi", u"Twi") +Language("ug", "uig", u"Uighur") +Language("uk", "ukr", u"Ukrainian") +Language("ur", "urd", u"Urdu") +Language("ug", "uig", u"Uyghur") +Language("uz", "uzb", u"Uzbek") +Language("ca", "cat", u"Valencian") +Language("ve", "ven", u"Venda") +Language("vi", "vie", u"Vietnamese") +Language("vo", "vol", u"Volapük") +Language("wa", "wln", u"Walloon") +Language("cy", "wel/cym", u"Welsh") +Language("fy", "fry", u"Western Frisian") +Language("wo", "wol", u"Wolof") +Language("xh", "xho", u"Xhosa") +Language("yi", "yid", u"Yiddish") +Language("yo", "yor", u"Yoruba") +Language("za", "zha", u"Zhuang") +Language("zu", "zul", u"Zulu") +Country("AF", u"AFGHANISTAN") +Country("AX", u"ÅLAND ISLANDS") +Country("AL", u"ALBANIA") +Country("DZ", u"ALGERIA") +Country("AS", u"AMERICAN SAMOA") +Country("AD", u"ANDORRA") +Country("AO", u"ANGOLA") +Country("AI", u"ANGUILLA") +Country("AQ", u"ANTARCTICA") +Country("AG", u"ANTIGUA AND BARBUDA") +Country("AR", u"ARGENTINA") +Country("AM", u"ARMENIA") +Country("AW", u"ARUBA") +Country("AU", u"AUSTRALIA") +Country("AT", u"AUSTRIA") +Country("AZ", u"AZERBAIJAN") +Country("BS", u"BAHAMAS") +Country("BH", u"BAHRAIN") +Country("BD", u"BANGLADESH") +Country("BB", u"BARBADOS") +Country("BY", u"BELARUS") +Country("BE", u"BELGIUM") +Country("BZ", u"BELIZE") +Country("BJ", u"BENIN") +Country("BM", u"BERMUDA") +Country("BT", u"BHUTAN") +Country("BO", u"BOLIVIA, PLURINATIONAL STATE OF") +Country("BQ", u"BONAIRE, SINT EUSTATIUS AND SABA") +Country("BA", u"BOSNIA AND HERZEGOVINA") +Country("BW", u"BOTSWANA") +Country("BV", u"BOUVET ISLAND") +Country("BR", u"BRAZIL") +Country("IO", u"BRITISH INDIAN OCEAN TERRITORY") +Country("BN", u"BRUNEI DARUSSALAM") +Country("BG", u"BULGARIA") +Country("BF", u"BURKINA FASO") +Country("BI", u"BURUNDI") +Country("KH", u"CAMBODIA") +Country("CM", u"CAMEROON") +Country("CA", u"CANADA") +Country("CV", u"CAPE VERDE") +Country("KY", u"CAYMAN ISLANDS") +Country("CF", u"CENTRAL AFRICAN REPUBLIC") +Country("TD", u"CHAD") +Country("CL", u"CHILE") +Country("CN", u"CHINA") +Country("CX", u"CHRISTMAS ISLAND") +Country("CC", u"COCOS (KEELING) ISLANDS") +Country("CO", u"COLOMBIA") +Country("KM", u"COMOROS") +Country("CG", u"CONGO") +Country("CD", u"CONGO, THE DEMOCRATIC REPUBLIC OF THE") +Country("CK", u"COOK ISLANDS") +Country("CR", u"COSTA RICA") +Country("CI", u"CÔTE D'IVOIRE") +Country("HR", u"CROATIA") +Country("CU", u"CUBA") +Country("CW", u"CURAÇAO") +Country("CY", u"CYPRUS") +Country("CZ", u"CZECH REPUBLIC") +Country("DK", u"DENMARK") +Country("DJ", u"DJIBOUTI") +Country("DM", u"DOMINICA") +Country("DO", u"DOMINICAN REPUBLIC") +Country("EC", u"ECUADOR") +Country("EG", u"EGYPT") +Country("SV", u"EL SALVADOR") +Country("GQ", u"EQUATORIAL GUINEA") +Country("ER", u"ERITREA") +Country("EE", u"ESTONIA") +Country("ET", u"ETHIOPIA") +Country("FK", u"FALKLAND ISLANDS (MALVINAS)") +Country("FO", u"FAROE ISLANDS") +Country("FJ", u"FIJI") +Country("FI", u"FINLAND") +Country("FR", u"FRANCE") +Country("GF", u"FRENCH GUIANA") +Country("PF", u"FRENCH POLYNESIA") +Country("TF", u"FRENCH SOUTHERN TERRITORIES") +Country("GA", u"GABON") +Country("GM", u"GAMBIA") +Country("GE", u"GEORGIA") +Country("DE", u"GERMANY") +Country("GH", u"GHANA") +Country("GI", u"GIBRALTAR") +Country("GR", u"GREECE") +Country("GL", u"GREENLAND") +Country("GD", u"GRENADA") +Country("GP", u"GUADELOUPE") +Country("GU", u"GUAM") +Country("GT", u"GUATEMALA") +Country("GG", u"GUERNSEY") +Country("GN", u"GUINEA") +Country("GW", u"GUINEA-BISSAU") +Country("GY", u"GUYANA") +Country("HT", u"HAITI") +Country("HM", u"HEARD ISLAND AND MCDONALD ISLANDS") +Country("VA", u"HOLY SEE (VATICAN CITY STATE)") +Country("HN", u"HONDURAS") +Country("HK", u"HONG KONG") +Country("HU", u"HUNGARY") +Country("IS", u"ICELAND") +Country("IN", u"INDIA") +Country("ID", u"INDONESIA") +Country("IR", u"IRAN, ISLAMIC REPUBLIC OF") +Country("IQ", u"IRAQ") +Country("IE", u"IRELAND") +Country("IM", u"ISLE OF MAN") +Country("IL", u"ISRAEL") +Country("IT", u"ITALY") +Country("JM", u"JAMAICA") +Country("JP", u"JAPAN") +Country("JE", u"JERSEY") +Country("JO", u"JORDAN") +Country("KZ", u"KAZAKHSTAN") +Country("KE", u"KENYA") +Country("KI", u"KIRIBATI") +Country("KP", u"KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF") +Country("KR", u"KOREA, REPUBLIC OF") +Country("KW", u"KUWAIT") +Country("KG", u"KYRGYZSTAN") +Country("LA", u"LAO PEOPLE'S DEMOCRATIC REPUBLIC") +Country("LV", u"LATVIA") +Country("LB", u"LEBANON") +Country("LS", u"LESOTHO") +Country("LR", u"LIBERIA") +Country("LY", u"LIBYA") +Country("LI", u"LIECHTENSTEIN") +Country("LT", u"LITHUANIA") +Country("LU", u"LUXEMBOURG") +Country("MO", u"MACAO") +Country("MK", u"MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF") +Country("MG", u"MADAGASCAR") +Country("MW", u"MALAWI") +Country("MY", u"MALAYSIA") +Country("MV", u"MALDIVES") +Country("ML", u"MALI") +Country("MT", u"MALTA") +Country("MH", u"MARSHALL ISLANDS") +Country("MQ", u"MARTINIQUE") +Country("MR", u"MAURITANIA") +Country("MU", u"MAURITIUS") +Country("YT", u"MAYOTTE") +Country("MX", u"MEXICO") +Country("FM", u"MICRONESIA, FEDERATED STATES OF") +Country("MD", u"MOLDOVA, REPUBLIC OF") +Country("MC", u"MONACO") +Country("MN", u"MONGOLIA") +Country("ME", u"MONTENEGRO") +Country("MS", u"MONTSERRAT") +Country("MA", u"MOROCCO") +Country("MZ", u"MOZAMBIQUE") +Country("MM", u"MYANMAR") +Country("NA", u"NAMIBIA") +Country("NR", u"NAURU") +Country("NP", u"NEPAL") +Country("NL", u"NETHERLANDS") +Country("NC", u"NEW CALEDONIA") +Country("NZ", u"NEW ZEALAND") +Country("NI", u"NICARAGUA") +Country("NE", u"NIGER") +Country("NG", u"NIGERIA") +Country("NU", u"NIUE") +Country("NF", u"NORFOLK ISLAND") +Country("MP", u"NORTHERN MARIANA ISLANDS") +Country("NO", u"NORWAY") +Country("OM", u"OMAN") +Country("PK", u"PAKISTAN") +Country("PW", u"PALAU") +Country("PS", u"PALESTINIAN TERRITORY, OCCUPIED") +Country("PA", u"PANAMA") +Country("PG", u"PAPUA NEW GUINEA") +Country("PY", u"PARAGUAY") +Country("PE", u"PERU") +Country("PH", u"PHILIPPINES") +Country("PN", u"PITCAIRN") +Country("PL", u"POLAND") +Country("PT", u"PORTUGAL") +Country("PR", u"PUERTO RICO") +Country("QA", u"QATAR") +Country("RE", u"RÉUNION") +Country("RO", u"ROMANIA") +Country("RU", u"RUSSIAN FEDERATION") +Country("RW", u"RWANDA") +Country("BL", u"SAINT BARTHÉLEMY") +Country("SH", u"SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA") +Country("KN", u"SAINT KITTS AND NEVIS") +Country("LC", u"SAINT LUCIA") +Country("MF", u"SAINT MARTIN (FRENCH PART)") +Country("PM", u"SAINT PIERRE AND MIQUELON") +Country("VC", u"SAINT VINCENT AND THE GRENADINES") +Country("WS", u"SAMOA") +Country("SM", u"SAN MARINO") +Country("ST", u"SAO TOME AND PRINCIPE") +Country("SA", u"SAUDI ARABIA") +Country("SN", u"SENEGAL") +Country("RS", u"SERBIA") +Country("SC", u"SEYCHELLES") +Country("SL", u"SIERRA LEONE") +Country("SG", u"SINGAPORE") +Country("SX", u"SINT MAARTEN (DUTCH PART)") +Country("SK", u"SLOVAKIA") +Country("SI", u"SLOVENIA") +Country("SB", u"SOLOMON ISLANDS") +Country("SO", u"SOMALIA") +Country("ZA", u"SOUTH AFRICA") +Country("GS", u"SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS") +Country("SS", u"SOUTH SUDAN") +Country("ES", u"SPAIN") +Country("LK", u"SRI LANKA") +Country("SD", u"SUDAN") +Country("SR", u"SURINAME") +Country("SJ", u"SVALBARD AND JAN MAYEN") +Country("SZ", u"SWAZILAND") +Country("SE", u"SWEDEN") +Country("CH", u"SWITZERLAND") +Country("SY", u"SYRIAN ARAB REPUBLIC") +Country("TW", u"TAIWAN, PROVINCE OF CHINA") +Country("TJ", u"TAJIKISTAN") +Country("TZ", u"TANZANIA, UNITED REPUBLIC OF") +Country("TH", u"THAILAND") +Country("TL", u"TIMOR-LESTE") +Country("TG", u"TOGO") +Country("TK", u"TOKELAU") +Country("TO", u"TONGA") +Country("TT", u"TRINIDAD AND TOBAGO") +Country("TN", u"TUNISIA") +Country("TR", u"TURKEY") +Country("TM", u"TURKMENISTAN") +Country("TC", u"TURKS AND CAICOS ISLANDS") +Country("TV", u"TUVALU") +Country("UG", u"UGANDA") +Country("UA", u"UKRAINE") +Country("AE", u"UNITED ARAB EMIRATES") +Country("GB", u"UNITED KINGDOM") +Country("US", u"UNITED STATES") +Country("UM", u"UNITED STATES MINOR OUTLYING ISLANDS") +Country("UY", u"URUGUAY") +Country("UZ", u"UZBEKISTAN") +Country("VU", u"VANUATU") +Country("VE", u"VENEZUELA, BOLIVARIAN REPUBLIC OF") +Country("VN", u"VIET NAM") +Country("VG", u"VIRGIN ISLANDS, BRITISH") +Country("VI", u"VIRGIN ISLANDS, U.S.") +Country("WF", u"WALLIS AND FUTUNA") +Country("EH", u"WESTERN SAHARA") +Country("YE", u"YEMEN") +Country("ZM", u"ZAMBIA") +Country("ZW", u"ZIMBABWE") diff --git a/libs/tmdb3/pager.py b/libs/tmdb3/pager.py new file mode 100755 index 0000000..6cb874c --- /dev/null +++ b/libs/tmdb3/pager.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: pager.py List-like structure designed for handling paged results +# Python Library +# Author: Raymond Wagner +#----------------------- + +from collections import Sequence, Iterator + +class PagedIterator( Iterator ): + def __init__(self, parent): + self._parent = parent + self._index = -1 + self._len = len(parent) + + def __iter__(self): + return self + + def next(self): + self._index += 1 + if self._index == self._len: + raise StopIteration + return self._parent[self._index] + +class UnpagedData( object ): + def copy(self): + return self.__class__() + + def __mul__(self, other): + return (self.copy() for a in range(other)) + + def __rmul__(self, other): + return (self.copy() for a in range(other)) + +class PagedList( Sequence ): + """ + List-like object, with support for automatically grabbing additional + pages from a data source. + """ + _iter_class = None + + def __iter__(self): + if self._iter_class is None: + self._iter_class = type(self.__class__.__name__ + 'Iterator', + (PagedIterator,), {}) + return self._iter_class(self) + + def __len__(self): + try: + return self._len + except: + return len(self._data) + + def __init__(self, iterable, pagesize=20): + self._data = list(iterable) + self._pagesize = pagesize + + def __getitem__(self, index): + if isinstance(index, slice): + return [self[x] for x in xrange(*index.indices(len(self)))] + if index >= len(self): + raise IndexError("list index outside range") + if (index >= len(self._data)) \ + or isinstance(self._data[index], UnpagedData): + self._populatepage(index/self._pagesize + 1) + return self._data[index] + + def __setitem__(self, index, value): + raise NotImplementedError + + def __delitem__(self, index): + raise NotImplementedError + + def __contains__(self, item): + raise NotImplementedError + + def _populatepage(self, page): + pagestart = (page-1) * self._pagesize + if len(self._data) < pagestart: + self._data.extend(UnpagedData()*(pagestart-len(self._data))) + if len(self._data) == pagestart: + self._data.extend(self._getpage(page)) + else: + for data in self._getpage(page): + self._data[pagestart] = data + pagestart += 1 + + def _getpage(self, page): + raise NotImplementedError("PagedList._getpage() must be provided "+\ + "by subclass") + +class PagedRequest( PagedList ): + """ + Derived PageList that provides a list-like object with automatic paging + intended for use with search requests. + """ + def __init__(self, request, handler=None): + self._request = request + if handler: self._handler = handler + super(PagedRequest, self).__init__(self._getpage(1), 20) + + def _getpage(self, page): + req = self._request.new(page=page) + res = req.readJSON() + self._len = res['total_results'] + for item in res['results']: + yield self._handler(item) + diff --git a/libs/tmdb3/request.py b/libs/tmdb3/request.py new file mode 100755 index 0000000..109630d --- /dev/null +++ b/libs/tmdb3/request.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: tmdb_request.py +# Python Library +# Author: Raymond Wagner +# Purpose: Wrapped urllib2.Request class pre-configured for accessing the +# TMDb v3 API +#----------------------- + +from tmdb_exceptions import * +from locales import get_locale +from cache import Cache + +from urllib import urlencode +import urllib2 +import json + +DEBUG = False +cache = Cache(filename='pytmdb3.cache') + +#DEBUG = True +#cache = Cache(engine='null') + +def set_key(key): + """ + Specify the API key to use retrieving data from themoviedb.org. This + key must be set before any calls will function. + """ + if len(key) != 32: + raise TMDBKeyInvalid("Specified API key must be 128-bit hex") + try: + int(key, 16) + except: + raise TMDBKeyInvalid("Specified API key must be 128-bit hex") + Request._api_key = key + +def set_cache(engine=None, *args, **kwargs): + """Specify caching engine and properties.""" + cache.configure(engine, *args, **kwargs) + +class Request( urllib2.Request ): + _api_key = None + _base_url = "http://api.themoviedb.org/3/" + + @property + def api_key(self): + if self._api_key is None: + raise TMDBKeyMissing("API key must be specified before "+\ + "requests can be made") + return self._api_key + + def __init__(self, url, **kwargs): + """Return a request object, using specified API path and arguments.""" + kwargs['api_key'] = self.api_key + self._url = url.lstrip('/') + self._kwargs = dict([(kwa,kwv) for kwa,kwv in kwargs.items() + if kwv is not None]) + + locale = get_locale() + kwargs = {} + for k,v in self._kwargs.items(): + kwargs[k] = locale.encode(v) + url = '{0}{1}?{2}'.format(self._base_url, self._url, urlencode(kwargs)) + + urllib2.Request.__init__(self, url) + self.add_header('Accept', 'application/json') + self.lifetime = 3600 # 1hr + + def new(self, **kwargs): + """Create a new instance of the request, with tweaked arguments.""" + args = dict(self._kwargs) + for k,v in kwargs.items(): + if v is None: + if k in args: + del args[k] + else: + args[k] = v + obj = self.__class__(self._url, **args) + obj.lifetime = self.lifetime + return obj + + def add_data(self, data): + """Provide data to be sent with POST.""" + urllib2.Request.add_data(self, urlencode(data)) + + def open(self): + """Open a file object to the specified URL.""" + try: + if DEBUG: + print 'loading '+self.get_full_url() + if self.has_data(): + print ' '+self.get_data() + return urllib2.urlopen(self) + except urllib2.HTTPError, e: + raise TMDBHTTPError(e) + + def read(self): + """Return result from specified URL as a string.""" + return self.open().read() + + @cache.cached(urllib2.Request.get_full_url) + def readJSON(self): + """Parse result from specified URL as JSON data.""" + url = self.get_full_url() + try: + # catch HTTP error from open() + data = json.load(self.open()) + except TMDBHTTPError, e: + try: + # try to load whatever was returned + data = json.loads(e.response) + except: + # cannot parse json, just raise existing error + raise e + else: + # response parsed, try to raise error from TMDB + handle_status(data, url) + # no error from TMDB, just raise existing error + raise e + handle_status(data, url) + #if DEBUG: + # import pprint + # pprint.PrettyPrinter().pprint(data) + return data + +status_handlers = { + 1: None, + 2: TMDBRequestInvalid('Invalid service - This service does not exist.'), + 3: TMDBRequestError('Authentication Failed - You do not have '+\ + 'permissions to access this service.'), + 4: TMDBRequestInvalid("Invalid format - This service doesn't exist "+\ + 'in that format.'), + 5: TMDBRequestInvalid('Invalid parameters - Your request parameters '+\ + 'are incorrect.'), + 6: TMDBRequestInvalid('Invalid id - The pre-requisite id is invalid '+\ + 'or not found.'), + 7: TMDBKeyInvalid('Invalid API key - You must be granted a valid key.'), + 8: TMDBRequestError('Duplicate entry - The data you tried to submit '+\ + 'already exists.'), + 9: TMDBOffline('This service is tempirarily offline. Try again later.'), + 10: TMDBKeyRevoked('Suspended API key - Access to your account has been '+\ + 'suspended, contact TMDB.'), + 11: TMDBError('Internal error - Something went wrong. Contact TMDb.'), + 12: None, + 13: None, + 14: TMDBRequestError('Authentication Failed.'), + 15: TMDBError('Failed'), + 16: TMDBError('Device Denied'), + 17: TMDBError('Session Denied')} + +def handle_status(data, query): + status = status_handlers[data.get('status_code', 1)] + if status is not None: + status.tmdberrno = data['status_code'] + status.query = query + raise status diff --git a/libs/tmdb3/tmdb_api.py b/libs/tmdb3/tmdb_api.py new file mode 100755 index 0000000..b5cb0a9 --- /dev/null +++ b/libs/tmdb3/tmdb_api.py @@ -0,0 +1,689 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: tmdb_api.py Simple-to-use Python interface to TMDB's API v3 +# Python Library +# Author: Raymond Wagner +# Purpose: This Python library is intended to provide a series of classes +# and methods for search and retrieval of text metadata and image +# URLs from TMDB. +# Preliminary API specifications can be found at +# http://help.themoviedb.org/kb/api/about-3 +# License: Creative Commons GNU GPL v2 +# (http://creativecommons.org/licenses/GPL/2.0/) +#----------------------- + +__title__ = "tmdb_api - Simple-to-use Python interface to TMDB's API v3 "+\ + "(www.themoviedb.org)" +__author__ = "Raymond Wagner" +__purpose__ = """ +This Python library is intended to provide a series of classes and methods +for search and retrieval of text metadata and image URLs from TMDB. +Preliminary API specifications can be found at +http://help.themoviedb.org/kb/api/about-3""" + +__version__="v0.6.17" +# 0.1.0 Initial development +# 0.2.0 Add caching mechanism for API queries +# 0.2.1 Temporary work around for broken search paging +# 0.3.0 Rework backend machinery for managing OO interface to results +# 0.3.1 Add collection support +# 0.3.2 Remove MythTV key from results.py +# 0.3.3 Add functional language support +# 0.3.4 Re-enable search paging +# 0.3.5 Add methods for grabbing current, popular, and top rated movies +# 0.3.6 Rework paging mechanism +# 0.3.7 Generalize caching mechanism, and allow controllability +# 0.4.0 Add full locale support (language and country) and optional fall through +# 0.4.1 Add custom classmethod for dealing with IMDB movie IDs +# 0.4.2 Improve cache file selection for Windows systems +# 0.4.3 Add a few missed Person properties +# 0.4.4 Add support for additional Studio information +# 0.4.5 Add locale fallthrough for images and alternate titles +# 0.4.6 Add slice support for search results +# 0.5.0 Rework cache framework and improve file cache performance +# 0.6.0 Add user authentication support +# 0.6.1 Add adult filtering for people searches +# 0.6.2 Add similar movie search for Movie objects +# 0.6.3 Add Studio search +# 0.6.4 Add Genre list and associated Movie search +# 0.6.5 Prevent data from being blanked out by subsequent queries +# 0.6.6 Turn date processing errors into mutable warnings +# 0.6.7 Add support for searching by year +# 0.6.8 Add support for collection images +# 0.6.9 Correct Movie image language filtering +# 0.6.10 Add upcoming movie classmethod +# 0.6.11 Fix URL for top rated Movie query +# 0.6.12 Add support for Movie watchlist query and editing +# 0.6.13 Fix URL for rating Movies +# 0.6.14 Add support for Lists +# 0.6.15 Add ability to search Collections +# 0.6.16 Make absent primary images return None (previously u'') +# 0.6.17 Add userrating/votes to Image, add overview to Collection, remove +# releasedate sorting from Collection Movies + +from request import set_key, Request +from util import Datapoint, Datalist, Datadict, Element, NameRepr, SearchRepr +from pager import PagedRequest +from locales import get_locale, set_locale +from tmdb_auth import get_session, set_session +from tmdb_exceptions import * + +import datetime + +DEBUG = False + +def process_date(datestr): + try: + return datetime.date(*[int(x) for x in datestr.split('-')]) + except (TypeError, ValueError): + import sys + import warnings + import traceback + _,_,tb = sys.exc_info() + f,l,_,_ = traceback.extract_tb(tb)[-1] + warnings.warn_explicit(('"{0}" is not a supported date format. ' + 'Please fix upstream data at http://www.themoviedb.org.')\ + .format(datestr), Warning, f, l) + return None + +class Configuration( Element ): + images = Datapoint('images') + def _populate(self): + return Request('configuration') +Configuration = Configuration() + +class Account( NameRepr, Element ): + def _populate(self): + return Request('account', session_id=self._session.sessionid) + + id = Datapoint('id') + adult = Datapoint('include_adult') + country = Datapoint('iso_3166_1') + language = Datapoint('iso_639_1') + name = Datapoint('name') + username = Datapoint('username') + + @property + def locale(self): + return get_locale(self.language, self.country) + +def searchMovie(query, locale=None, adult=False, year=None): + kwargs = {'query':query, 'include_adult':adult} + if year is not None: + try: + kwargs['year'] = year.year + except AttributeError: + kwargs['year'] = year + return MovieSearchResult(Request('search/movie', **kwargs), locale=locale) + +def searchMovieWithYear(query, locale=None, adult=False): + year = None + if (len(query) > 6) and (query[-1] == ')') and (query[-6] == '('): + # simple syntax check, no need for regular expression + try: + year = int(query[-5:-1]) + except ValueError: + pass + else: + if 1885 < year < 2050: + # strip out year from search + query = query[:-7] + else: + # sanity check on resolved year failed, pass through + year = None + return searchMovie(query, locale, adult, year) + +class MovieSearchResult( SearchRepr, PagedRequest ): + """Stores a list of search matches.""" + _name = None + def __init__(self, request, locale=None): + if locale is None: + locale = get_locale() + super(MovieSearchResult, self).__init__( + request.new(language=locale.language), + lambda x: Movie(raw=x, locale=locale)) + +def searchPerson(query, adult=False): + return PeopleSearchResult(Request('search/person', query=query, + include_adult=adult)) + +class PeopleSearchResult( SearchRepr, PagedRequest ): + """Stores a list of search matches.""" + _name = None + def __init__(self, request): + super(PeopleSearchResult, self).__init__(request, + lambda x: Person(raw=x)) + +def searchStudio(query): + return StudioSearchResult(Request('search/company', query=query)) + +class StudioSearchResult( SearchRepr, PagedRequest ): + """Stores a list of search matches.""" + _name = None + def __init__(self, request): + super(StudioSearchResult, self).__init__(request, + lambda x: Studio(raw=x)) + +def searchList(query, adult=False): + ListSearchResult(Request('search/list', query=query, include_adult=adult)) + +class ListSearchResult( SearchRepr, PagedRequest ): + """Stores a list of search matches.""" + _name = None + def __init__(self, request): + super(ListSearchResult, self).__init__(request, + lambda x: List(raw=x)) + +def searchCollection(query, locale=None): + return CollectionSearchResult(Request('search/collection', query=query), + locale=locale) + +class CollectionSearchResult( SearchRepr, PagedRequest ): + """Stores a list of search matches.""" + _name=None + def __init__(self, request, locale=None): + if locale is None: + locale = get_locale() + super(CollectionSearchResult, self).__init__( + request.new(language=locale.language), + lambda x: Collection(raw=x, locale=locale)) + +class Image( Element ): + filename = Datapoint('file_path', initarg=1, + handler=lambda x: x.lstrip('/')) + aspectratio = Datapoint('aspect_ratio') + height = Datapoint('height') + width = Datapoint('width') + language = Datapoint('iso_639_1') + userrating = Datapoint('vote_average') + votes = Datapoint('vote_count') + + def sizes(self): + return ['original'] + + def geturl(self, size='original'): + if size not in self.sizes(): + raise TMDBImageSizeError + url = Configuration.images['base_url'].rstrip('/') + return url+'/{0}/{1}'.format(size, self.filename) + + # sort preferring locale's language, but keep remaining ordering consistent + def __lt__(self, other): + return (self.language == self._locale.language) \ + and (self.language != other.language) + def __gt__(self, other): + return (self.language != other.language) \ + and (other.language == self._locale.language) + # direct match for comparison + def __eq__(self, other): + return self.filename == other.filename + # special handling for boolean to see if exists + def __nonzero__(self): + if len(self.filename) == 0: + return False + return True + + def __repr__(self): + # BASE62 encoded filename, no need to worry about unicode + return u"<{0.__class__.__name__} '{0.filename}'>".format(self) + +class Backdrop( Image ): + def sizes(self): + return Configuration.images['backdrop_sizes'] +class Poster( Image ): + def sizes(self): + return Configuration.images['poster_sizes'] +class Profile( Image ): + def sizes(self): + return Configuration.images['profile_sizes'] +class Logo( Image ): + def sizes(self): + return Configuration.images['logo_sizes'] + +class AlternateTitle( Element ): + country = Datapoint('iso_3166_1') + title = Datapoint('title') + + # sort preferring locale's country, but keep remaining ordering consistent + def __lt__(self, other): + return (self.country == self._locale.country) \ + and (self.country != other.country) + def __gt__(self, other): + return (self.country != other.country) \ + and (other.country == self._locale.country) + def __eq__(self, other): + return self.country == other.country + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.title}' ({0.country})>"\ + .format(self).encode('utf-8') + +class Person( Element ): + id = Datapoint('id', initarg=1) + name = Datapoint('name') + biography = Datapoint('biography') + dayofbirth = Datapoint('birthday', default=None, handler=process_date) + dayofdeath = Datapoint('deathday', default=None, handler=process_date) + homepage = Datapoint('homepage') + birthplace = Datapoint('place_of_birth') + profile = Datapoint('profile_path', handler=Profile, \ + raw=False, default=None) + adult = Datapoint('adult') + aliases = Datalist('also_known_as') + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.name}'>"\ + .format(self).encode('utf-8') + + def _populate(self): + return Request('person/{0}'.format(self.id)) + def _populate_credits(self): + return Request('person/{0}/credits'.format(self.id), \ + language=self._locale.language) + def _populate_images(self): + return Request('person/{0}/images'.format(self.id)) + + roles = Datalist('cast', handler=lambda x: ReverseCast(raw=x), \ + poller=_populate_credits) + crew = Datalist('crew', handler=lambda x: ReverseCrew(raw=x), \ + poller=_populate_credits) + profiles = Datalist('profiles', handler=Profile, poller=_populate_images) + +class Cast( Person ): + character = Datapoint('character') + order = Datapoint('order') + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.name}' as '{0.character}'>"\ + .format(self).encode('utf-8') + +class Crew( Person ): + job = Datapoint('job') + department = Datapoint('department') + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.name}','{0.job}'>"\ + .format(self).encode('utf-8') + +class Keyword( Element ): + id = Datapoint('id') + name = Datapoint('name') + + def __repr__(self): + return u"<{0.__class__.__name__} {0.name}>".format(self).encode('utf-8') + +class Release( Element ): + certification = Datapoint('certification') + country = Datapoint('iso_3166_1') + releasedate = Datapoint('release_date', handler=process_date) + def __repr__(self): + return u"<{0.__class__.__name__} {0.country}, {0.releasedate}>"\ + .format(self).encode('utf-8') + +class Trailer( Element ): + name = Datapoint('name') + size = Datapoint('size') + source = Datapoint('source') + +class YoutubeTrailer( Trailer ): + def geturl(self): + return "http://www.youtube.com/watch?v={0}".format(self.source) + + def __repr__(self): + # modified BASE64 encoding, no need to worry about unicode + return u"<{0.__class__.__name__} '{0.name}'>".format(self) + +class AppleTrailer( Element ): + name = Datapoint('name') + sources = Datadict('sources', handler=Trailer, attr='size') + + def sizes(self): + return self.sources.keys() + + def geturl(self, size=None): + if size is None: + # sort assuming ###p format for now, take largest resolution + size = str(sorted([int(size[:-1]) for size in self.sources])[-1])+'p' + return self.sources[size].source + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.name}'>".format(self) + +class Translation( Element ): + name = Datapoint('name') + language = Datapoint('iso_639_1') + englishname = Datapoint('english_name') + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.name}' ({0.language})>"\ + .format(self).encode('utf-8') + +class Genre( NameRepr, Element ): + id = Datapoint('id') + name = Datapoint('name') + + def _populate_movies(self): + return Request('genre/{0}/movies'.format(self.id), \ + language=self._locale.language) + + @property + def movies(self): + if 'movies' not in self._data: + search = MovieSearchResult(self._populate_movies(), \ + locale=self._locale) + search._name = "{0.name} Movies".format(self) + self._data['movies'] = search + return self._data['movies'] + + @classmethod + def getAll(cls, locale=None): + class GenreList( Element ): + genres = Datalist('genres', handler=Genre) + def _populate(self): + return Request('genre/list', language=self._locale.language) + return GenreList(locale=locale).genres + + +class Studio( NameRepr, Element ): + id = Datapoint('id', initarg=1) + name = Datapoint('name') + description = Datapoint('description') + headquarters = Datapoint('headquarters') + logo = Datapoint('logo_path', handler=Logo, \ + raw=False, default=None) + # FIXME: manage not-yet-defined handlers in a way that will propogate + # locale information properly + parent = Datapoint('parent_company', \ + handler=lambda x: Studio(raw=x)) + + def _populate(self): + return Request('company/{0}'.format(self.id)) + def _populate_movies(self): + return Request('company/{0}/movies'.format(self.id), \ + language=self._locale.language) + + # FIXME: add a cleaner way of adding types with no additional processing + @property + def movies(self): + if 'movies' not in self._data: + search = MovieSearchResult(self._populate_movies(), \ + locale=self._locale) + search._name = "{0.name} Movies".format(self) + self._data['movies'] = search + return self._data['movies'] + +class Country( NameRepr, Element ): + code = Datapoint('iso_3166_1') + name = Datapoint('name') + +class Language( NameRepr, Element ): + code = Datapoint('iso_639_1') + name = Datapoint('name') + +class Movie( Element ): + @classmethod + def latest(cls): + req = Request('latest/movie') + req.lifetime = 600 + return cls(raw=req.readJSON()) + + @classmethod + def nowplaying(cls, locale=None): + res = MovieSearchResult(Request('movie/now-playing'), locale=locale) + res._name = 'Now Playing' + return res + + @classmethod + def mostpopular(cls, locale=None): + res = MovieSearchResult(Request('movie/popular'), locale=locale) + res._name = 'Popular' + return res + + @classmethod + def toprated(cls, locale=None): + res = MovieSearchResult(Request('movie/top_rated'), locale=locale) + res._name = 'Top Rated' + return res + + @classmethod + def upcoming(cls, locale=None): + res = MovieSearchResult(Request('movie/upcoming'), locale=locale) + res._name = 'Upcoming' + return res + + @classmethod + def favorites(cls, session=None): + if session is None: + session = get_session() + account = Account(session=session) + res = MovieSearchResult( + Request('account/{0}/favorite_movies'.format(account.id), + session_id=session.sessionid)) + res._name = "Favorites" + return res + + @classmethod + def ratedmovies(cls, session=None): + if session is None: + session = get_session() + account = Account(session=session) + res = MovieSearchResult( + Request('account/{0}/rated_movies'.format(account.id), + session_id=session.sessionid)) + res._name = "Movies You Rated" + return res + + @classmethod + def watchlist(cls, session=None): + if session is None: + session = get_session() + account = Account(session=session) + res = MovieSearchResult( + Request('account/{0}/movie_watchlist'.format(account.id), + session_id=session.sessionid)) + res._name = "Movies You're Watching" + return res + + @classmethod + def fromIMDB(cls, imdbid, locale=None): + try: + # assume string + if not imdbid.startswith('tt'): + imdbid = "tt{0:0>7}".format(imdbid) + except AttributeError: + # assume integer + imdbid = "tt{0:0>7}".format(imdbid) + if locale is None: + locale = get_locale() + movie = cls(imdbid, locale=locale) + movie._populate() + return movie + + id = Datapoint('id', initarg=1) + title = Datapoint('title') + originaltitle = Datapoint('original_title') + tagline = Datapoint('tagline') + overview = Datapoint('overview') + runtime = Datapoint('runtime') + budget = Datapoint('budget') + revenue = Datapoint('revenue') + releasedate = Datapoint('release_date', handler=process_date) + homepage = Datapoint('homepage') + imdb = Datapoint('imdb_id') + + backdrop = Datapoint('backdrop_path', handler=Backdrop, \ + raw=False, default=None) + poster = Datapoint('poster_path', handler=Poster, \ + raw=False, default=None) + + popularity = Datapoint('popularity') + userrating = Datapoint('vote_average') + votes = Datapoint('vote_count') + + adult = Datapoint('adult') + collection = Datapoint('belongs_to_collection', handler=lambda x: \ + Collection(raw=x)) + genres = Datalist('genres', handler=Genre) + studios = Datalist('production_companies', handler=Studio) + countries = Datalist('production_countries', handler=Country) + languages = Datalist('spoken_languages', handler=Language) + + def _populate(self): + return Request('movie/{0}'.format(self.id), \ + language=self._locale.language) + def _populate_titles(self): + kwargs = {} + if not self._locale.fallthrough: + kwargs['country'] = self._locale.country + return Request('movie/{0}/alternative_titles'.format(self.id), **kwargs) + def _populate_cast(self): + return Request('movie/{0}/casts'.format(self.id)) + def _populate_images(self): + kwargs = {} + if not self._locale.fallthrough: + kwargs['language'] = self._locale.language + return Request('movie/{0}/images'.format(self.id), **kwargs) + def _populate_keywords(self): + return Request('movie/{0}/keywords'.format(self.id)) + def _populate_releases(self): + return Request('movie/{0}/releases'.format(self.id)) + def _populate_trailers(self): + return Request('movie/{0}/trailers'.format(self.id), \ + language=self._locale.language) + def _populate_translations(self): + return Request('movie/{0}/translations'.format(self.id)) + + alternate_titles = Datalist('titles', handler=AlternateTitle, \ + poller=_populate_titles, sort=True) + cast = Datalist('cast', handler=Cast, \ + poller=_populate_cast, sort='order') + crew = Datalist('crew', handler=Crew, poller=_populate_cast) + backdrops = Datalist('backdrops', handler=Backdrop, \ + poller=_populate_images, sort=True) + posters = Datalist('posters', handler=Poster, \ + poller=_populate_images, sort=True) + keywords = Datalist('keywords', handler=Keyword, \ + poller=_populate_keywords) + releases = Datadict('countries', handler=Release, \ + poller=_populate_releases, attr='country') + youtube_trailers = Datalist('youtube', handler=YoutubeTrailer, \ + poller=_populate_trailers) + apple_trailers = Datalist('quicktime', handler=AppleTrailer, \ + poller=_populate_trailers) + translations = Datalist('translations', handler=Translation, \ + poller=_populate_translations) + + def setFavorite(self, value): + req = Request('account/{0}/favorite'.format(\ + Account(session=self._session).id), + session_id=self._session.sessionid) + req.add_data({'movie_id':self.id, 'favorite':str(bool(value)).lower()}) + req.lifetime = 0 + req.readJSON() + + def setRating(self, value): + if not (0 <= value <= 10): + raise TMDBError("Ratings must be between '0' and '10'.") + req = Request('movie/{0}/rating'.format(self.id), \ + session_id=self._session.sessionid) + req.lifetime = 0 + req.add_data({'value':value}) + req.readJSON() + + def setWatchlist(self, value): + req = Request('account/{0}/movie_watchlist'.format(\ + Account(session=self._session).id), + session_id=self._session.sessionid) + req.lifetime = 0 + req.add_data({'movie_id':self.id, + 'movie_watchlist':str(bool(value)).lower()}) + req.readJSON() + + def getSimilar(self): + return self.similar + + @property + def similar(self): + res = MovieSearchResult(Request('movie/{0}/similar_movies'\ + .format(self.id)), + locale=self._locale) + res._name = 'Similar to {0}'.format(self._printable_name()) + return res + + @property + def lists(self): + res = ListSearchResult(Request('movie/{0}/lists'.format(self.id))) + res._name = "Lists containing {0}".format(self._printable_name()) + return res + + def _printable_name(self): + if self.title is not None: + s = u"'{0}'".format(self.title) + elif self.originaltitle is not None: + s = u"'{0}'".format(self.originaltitle) + else: + s = u"'No Title'" + if self.releasedate: + s = u"{0} ({1})".format(s, self.releasedate.year) + return s + + def __repr__(self): + return u"<{0} {1}>".format(self.__class__.__name__,\ + self._printable_name()).encode('utf-8') + +class ReverseCast( Movie ): + character = Datapoint('character') + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.character}' on {1}>"\ + .format(self, self._printable_name()).encode('utf-8') + +class ReverseCrew( Movie ): + department = Datapoint('department') + job = Datapoint('job') + + def __repr__(self): + return u"<{0.__class__.__name__} '{0.job}' for {1}>"\ + .format(self, self._printable_name()).encode('utf-8') + +class Collection( NameRepr, Element ): + id = Datapoint('id', initarg=1) + name = Datapoint('name') + backdrop = Datapoint('backdrop_path', handler=Backdrop, \ + raw=False, default=None) + poster = Datapoint('poster_path', handler=Poster, \ + raw=False, default=None) + members = Datalist('parts', handler=Movie) + overview = Datapoint('overview') + + def _populate(self): + return Request('collection/{0}'.format(self.id), \ + language=self._locale.language) + def _populate_images(self): + kwargs = {} + if not self._locale.fallthrough: + kwargs['language'] = self._locale.language + return Request('collection/{0}/images'.format(self.id), **kwargs) + + backdrops = Datalist('backdrops', handler=Backdrop, \ + poller=_populate_images, sort=True) + posters = Datalist('posters', handler=Poster, \ + poller=_populate_images, sort=True) + +class List( NameRepr, Element ): + id = Datapoint('id', initarg=1) + name = Datapoint('name') + author = Datapoint('created_by') + description = Datapoint('description') + favorites = Datapoint('favorite_count') + language = Datapoint('iso_639_1') + count = Datapoint('item_count') + poster = Datapoint('poster_path', handler=Poster, \ + raw=False, default=None) + + members = Datalist('items', handler=Movie) + + def _populate(self): + return Request('list/{0}'.format(self.id)) + diff --git a/libs/tmdb3/tmdb_auth.py b/libs/tmdb3/tmdb_auth.py new file mode 100755 index 0000000..8583b99 --- /dev/null +++ b/libs/tmdb3/tmdb_auth.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: tmdb_auth.py +# Python Library +# Author: Raymond Wagner +# Purpose: Provide authentication and session services for +# calls against the TMDB v3 API +#----------------------- + +from datetime import datetime as _pydatetime, \ + tzinfo as _pytzinfo +import re +class datetime( _pydatetime ): + """Customized datetime class with ISO format parsing.""" + _reiso = re.compile('(?P[0-9]{4})' + '-(?P[0-9]{1,2})' + '-(?P[0-9]{1,2})' + '.' + '(?P[0-9]{2})' + ':(?P[0-9]{2})' + '(:(?P[0-9]{2}))?' + '(?PZ|' + '(?P[-+])' + '(?P[0-9]{1,2})' + '(:)?' + '(?P[0-9]{2})?' + ')?') + + class _tzinfo( _pytzinfo): + def __init__(self, direc='+', hr=0, min=0): + if direc == '-': + hr = -1*int(hr) + self._offset = timedelta(hours=int(hr), minutes=int(min)) + def utcoffset(self, dt): return self._offset + def tzname(self, dt): return '' + def dst(self, dt): return timedelta(0) + + @classmethod + def fromIso(cls, isotime, sep='T'): + match = cls._reiso.match(isotime) + if match is None: + raise TypeError("time data '%s' does not match ISO 8601 format" \ + % isotime) + + dt = [int(a) for a in match.groups()[:5]] + if match.group('sec') is not None: + dt.append(int(match.group('sec'))) + else: + dt.append(0) + if match.group('tz'): + if match.group('tz') == 'Z': + tz = cls._tzinfo() + elif match.group('tzmin'): + tz = cls._tzinfo(*match.group('tzdirec','tzhour','tzmin')) + else: + tz = cls._tzinfo(*match.group('tzdirec','tzhour')) + dt.append(0) + dt.append(tz) + return cls(*dt) + +from request import Request +from tmdb_exceptions import * + +syssession = None + +def set_session(sessionid): + global syssession + syssession = Session(sessionid) + +def get_session(sessionid=None): + global syssession + if sessionid: + return Session(sessionid) + elif syssession is not None: + return syssession + else: + return Session.new() + +class Session( object ): + + @classmethod + def new(cls): + return cls(None) + + def __init__(self, sessionid): + self.sessionid = sessionid + + @property + def sessionid(self): + if self._sessionid is None: + if self._authtoken is None: + raise TMDBError("No Auth Token to produce Session for") + # TODO: check authtokenexpiration against current time + req = Request('authentication/session/new', \ + request_token=self._authtoken) + req.lifetime = 0 + dat = req.readJSON() + if not dat['success']: + raise TMDBError("Session generation failed") + self._sessionid = dat['session_id'] + return self._sessionid + + @sessionid.setter + def sessionid(self, value): + self._sessionid = value + self._authtoken = None + self._authtokenexpiration = None + if value is None: + self.authenticated = False + else: + self.authenticated = True + + @property + def authtoken(self): + if self.authenticated: + raise TMDBError("Session is already authenticated") + if self._authtoken is None: + req = Request('authentication/token/new') + req.lifetime = 0 + dat = req.readJSON() + if not dat['success']: + raise TMDBError("Auth Token request failed") + self._authtoken = dat['request_token'] + self._authtokenexpiration = datetime.fromIso(dat['expires_at']) + return self._authtoken + + @property + def callbackurl(self): + return "http://www.themoviedb.org/authenticate/"+self._authtoken + diff --git a/libs/tmdb3/tmdb_exceptions.py b/libs/tmdb3/tmdb_exceptions.py new file mode 100755 index 0000000..35e0364 --- /dev/null +++ b/libs/tmdb3/tmdb_exceptions.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: tmdb_exceptions.py Common exceptions used in tmdbv3 API library +# Python Library +# Author: Raymond Wagner +#----------------------- + +class TMDBError( Exception ): + Error = 0 + KeyError = 10 + KeyMissing = 20 + KeyInvalid = 30 + KeyRevoked = 40 + RequestError = 50 + RequestInvalid = 51 + PagingIssue = 60 + CacheError = 70 + CacheReadError = 71 + CacheWriteError = 72 + CacheDirectoryError = 73 + ImageSizeError = 80 + HTTPError = 90 + Offline = 100 + LocaleError = 110 + + def __init__(self, msg=None, errno=0): + self.errno = errno + if errno == 0: + self.errno = getattr(self, 'TMDB'+self.__class__.__name__, errno) + self.args = (msg,) + +class TMDBKeyError( TMDBError ): + pass + +class TMDBKeyMissing( TMDBKeyError ): + pass + +class TMDBKeyInvalid( TMDBKeyError ): + pass + +class TMDBKeyRevoked( TMDBKeyInvalid ): + pass + +class TMDBRequestError( TMDBError ): + pass + +class TMDBRequestInvalid( TMDBRequestError ): + pass + +class TMDBPagingIssue( TMDBRequestError ): + pass + +class TMDBCacheError( TMDBRequestError ): + pass + +class TMDBCacheReadError( TMDBCacheError ): + def __init__(self, filename): + super(TMDBCacheReadError, self).__init__( + "User does not have permission to access cache file: {0}.".format(filename)) + self.filename = filename + +class TMDBCacheWriteError( TMDBCacheError ): + def __init__(self, filename): + super(TMDBCacheWriteError, self).__init__( + "User does not have permission to write cache file: {0}.".format(filename)) + self.filename = filename + +class TMDBCacheDirectoryError( TMDBCacheError ): + def __init__(self, filename): + super(TMDBCacheDirectoryError, self).__init__( + "Directory containing cache file does not exist: {0}.".format(filename)) + self.filename = filename + +class TMDBImageSizeError( TMDBError ): + pass + +class TMDBHTTPError( TMDBError ): + def __init__(self, err): + self.httperrno = err.code + self.response = err.fp.read() + super(TMDBHTTPError, self).__init__(str(err)) + +class TMDBOffline( TMDBError ): + pass + +class TMDBLocaleError( TMDBError ): + pass + diff --git a/libs/tmdb3/util.py b/libs/tmdb3/util.py new file mode 100755 index 0000000..bba9fcc --- /dev/null +++ b/libs/tmdb3/util.py @@ -0,0 +1,366 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +#----------------------- +# Name: util.py Assorted utilities used in tmdb_api +# Python Library +# Author: Raymond Wagner +#----------------------- + +from copy import copy +from locales import get_locale +from tmdb_auth import get_session + +class NameRepr( object ): + """Mixin for __repr__ methods using 'name' attribute.""" + def __repr__(self): + return u"<{0.__class__.__name__} '{0.name}'>"\ + .format(self).encode('utf-8') + +class SearchRepr( object ): + """ + Mixin for __repr__ methods for classes with '_name' and + '_request' attributes. + """ + def __repr__(self): + name = self._name if self._name else self._request._kwargs['query'] + return u"".format(name).encode('utf-8') + +class Poller( object ): + """ + Wrapper for an optional callable to populate an Element derived class + with raw data, or data from a Request. + """ + def __init__(self, func, lookup, inst=None): + self.func = func + self.lookup = lookup + self.inst = inst + if func: + # with function, this allows polling data from the API + self.__doc__ = func.__doc__ + self.__name__ = func.__name__ + self.__module__ = func.__module__ + else: + # without function, this is just a dummy poller used for applying + # raw data to a new Element class with the lookup table + self.__name__ = '_populate' + + def __get__(self, inst, owner): + # normal decorator stuff + # return self for a class + # return instantiated copy of self for an object + if inst is None: + return self + func = None + if self.func: + func = self.func.__get__(inst, owner) + return self.__class__(func, self.lookup, inst) + + def __call__(self): + # retrieve data from callable function, and apply + if not callable(self.func): + raise RuntimeError('Poller object called without a source function') + req = self.func() + if (('language' in req._kwargs) or ('country' in req._kwargs)) \ + and self.inst._locale.fallthrough: + # request specifies a locale filter, and fallthrough is enabled + # run a first pass with specified filter + if not self.apply(req.readJSON(), False): + return + # if first pass results in missed data, run a second pass to + # fill in the gaps + self.apply(req.new(language=None, country=None).readJSON()) + # re-apply the filtered first pass data over top the second + # unfiltered set. this is to work around the issue that the + # properties have no way of knowing when they should or + # should not overwrite existing data. the cache engine will + # take care of the duplicate query + self.apply(req.readJSON()) + + def apply(self, data, set_nones=True): + # apply data directly, bypassing callable function + unfilled = False + for k,v in self.lookup.items(): + if (k in data) and \ + ((data[k] is not None) if callable(self.func) else True): + # argument received data, populate it + setattr(self.inst, v, data[k]) + elif v in self.inst._data: + # argument did not receive data, but Element already contains + # some value, so skip this + continue + elif set_nones: + # argument did not receive data, so fill it with None + # to indicate such and prevent a repeat scan + setattr(self.inst, v, None) + else: + # argument does not need data, so ignore it allowing it to + # trigger a later poll. this is intended for use when + # initializing a class with raw data, or when performing a + # first pass through when performing locale fall through + unfilled = True + return unfilled + +class Data( object ): + """ + Basic response definition class + This maps to a single key in a JSON dictionary received from the API + """ + def __init__(self, field, initarg=None, handler=None, poller=None, + raw=True, default=u'', lang=False): + """ + This defines how the dictionary value is to be processed by the poller + field -- defines the dictionary key that filters what data this uses + initarg -- (optional) specifies that this field must be supplied + when creating a new instance of the Element class this + definition is mapped to. Takes an integer for the order + it should be used in the input arguments + handler -- (optional) callable used to process the received value + before being stored in the Element object. + poller -- (optional) callable to be used if data is requested and + this value has not yet been defined. the callable should + return a dictionary of data from a JSON query. many + definitions may share a single poller, which will be + and the data used to populate all referenced definitions + based off their defined field + raw -- (optional) if the specified handler is an Element class, + the data will be passed into it using the 'raw' keyword + attribute. setting this to false will force the data to + instead be passed in as the first argument + """ + self.field = field + self.initarg = initarg + self.poller = poller + self.raw = raw + self.default = default + self.sethandler(handler) + + def __get__(self, inst, owner): + if inst is None: + return self + if self.field not in inst._data: + if self.poller is None: + return None + self.poller.__get__(inst, owner)() + return inst._data[self.field] + + def __set__(self, inst, value): + if (value is not None) and (value != ''): + value = self.handler(value) + else: + value = self.default + if isinstance(value, Element): + value._locale = inst._locale + value._session = inst._session + inst._data[self.field] = value + + def sethandler(self, handler): + # ensure handler is always callable, even for passthrough data + if handler is None: + self.handler = lambda x: x + elif isinstance(handler, ElementType) and self.raw: + self.handler = lambda x: handler(raw=x) + else: + self.handler = lambda x: handler(x) + +class Datapoint( Data ): + pass + +class Datalist( Data ): + """ + Response definition class for list data + This maps to a key in a JSON dictionary storing a list of data + """ + def __init__(self, field, handler=None, poller=None, sort=None, raw=True): + """ + This defines how the dictionary value is to be processed by the poller + field -- defines the dictionary key that filters what data this uses + handler -- (optional) callable used to process the received value + before being stored in the Element object. + poller -- (optional) callable to be used if data is requested and + this value has not yet been defined. the callable should + return a dictionary of data from a JSON query. many + definitions may share a single poller, which will be + and the data used to populate all referenced definitions + based off their defined field + sort -- (optional) name of attribute in resultant data to be used + to sort the list after processing. this effectively + a handler be defined to process the data into something + that has attributes + raw -- (optional) if the specified handler is an Element class, + the data will be passed into it using the 'raw' keyword + attribute. setting this to false will force the data to + instead be passed in as the first argument + """ + super(Datalist, self).__init__(field, None, handler, poller, raw) + self.sort = sort + def __set__(self, inst, value): + data = [] + if value: + for val in value: + val = self.handler(val) + if isinstance(val, Element): + val._locale = inst._locale + val._session = inst._session + data.append(val) + if self.sort: + if self.sort is True: + data.sort() + else: + data.sort(key=lambda x: getattr(x, self.sort)) + inst._data[self.field] = data + +class Datadict( Data ): + """ + Response definition class for dictionary data + This maps to a key in a JSON dictionary storing a dictionary of data + """ + def __init__(self, field, handler=None, poller=None, raw=True, + key=None, attr=None): + """ + This defines how the dictionary value is to be processed by the poller + field -- defines the dictionary key that filters what data this uses + handler -- (optional) callable used to process the received value + before being stored in the Element object. + poller -- (optional) callable to be used if data is requested and + this value has not yet been defined. the callable should + return a dictionary of data from a JSON query. many + definitions may share a single poller, which will be + and the data used to populate all referenced definitions + based off their defined field + key -- (optional) name of key in resultant data to be used as + the key in the stored dictionary. if this is not the + field name from the source data is used instead + attr -- (optional) name of attribute in resultant data to be used + as the key in the stored dictionary. if this is not + the field name from the source data is used instead + raw -- (optional) if the specified handler is an Element class, + the data will be passed into it using the 'raw' keyword + attribute. setting this to false will force the data to + instead be passed in as the first argument + """ + if key and attr: + raise TypeError("`key` and `attr` cannot both be defined") + super(Datadict, self).__init__(field, None, handler, poller, raw) + if key: + self.getkey = lambda x: x[key] + elif attr: + self.getkey = lambda x: getattr(x, attr) + else: + raise TypeError("Datadict requires `key` or `attr` be defined "+\ + "for populating the dictionary") + def __set__(self, inst, value): + data = {} + if value: + for val in value: + val = self.handler(val) + if isinstance(val, Element): + val._locale = inst._locale + val._session = inst._session + data[self.getkey(val)] = val + inst._data[self.field] = data + +class ElementType( type ): + """ + MetaClass used to pre-process Element-derived classes and set up the + Data definitions + """ + def __new__(mcs, name, bases, attrs): + # any Data or Poller object defined in parent classes must be cloned + # and processed in this class to function properly + # scan through available bases for all such definitions and insert + # a copy into this class's attributes + # run in reverse order so higher priority values overwrite lower ones + data = {} + pollers = {'_populate':None} + + for base in reversed(bases): + if isinstance(base, mcs): + for k, attr in base.__dict__.items(): + if isinstance(attr, Data): + # extract copies of each defined Data element from + # parent classes + attr = copy(attr) + attr.poller = attr.poller.func + data[k] = attr + elif isinstance(attr, Poller): + # extract copies of each defined Poller function + # from parent classes + pollers[k] = attr.func + for k,attr in attrs.items(): + if isinstance(attr, Data): + data[k] = attr + if '_populate' in attrs: + pollers['_populate'] = attrs['_populate'] + + # process all defined Data attribues, testing for use as an initial + # argument, and building a list of what Pollers are used to populate + # which Data points + pollermap = dict([(k,[]) for k in pollers]) + initargs = [] + for k,v in data.items(): + v.name = k + if v.initarg: + initargs.append(v) + if v.poller: + pn = v.poller.__name__ + if pn not in pollermap: + pollermap[pn] = [] + if pn not in pollers: + pollers[pn] = v.poller + pollermap[pn].append(v) + else: + pollermap['_populate'].append(v) + + # wrap each used poller function with a Poller class, and push into + # the new class attributes + for k,v in pollermap.items(): + if len(v) == 0: + continue + lookup = dict([(attr.field, attr.name) for attr in v]) + poller = Poller(pollers[k], lookup) + attrs[k] = poller + # backfill wrapped Poller into each mapped Data object, and ensure + # the data elements are defined for this new class + for attr in v: + attr.poller = poller + attrs[attr.name] = attr + + # build sorted list of arguments used for intialization + attrs['_InitArgs'] = tuple([a.name for a in \ + sorted(initargs, key=lambda x: x.initarg)]) + return type.__new__(mcs, name, bases, attrs) + + def __call__(cls, *args, **kwargs): + obj = cls.__new__(cls) + if ('locale' in kwargs) and (kwargs['locale'] is not None): + obj._locale = kwargs['locale'] + else: + obj._locale = get_locale() + + if 'session' in kwargs: + obj._session = kwargs['session'] + else: + obj._session = get_session() + + obj._data = {} + if 'raw' in kwargs: + # if 'raw' keyword is supplied, create populate object manually + if len(args) != 0: + raise TypeError('__init__() takes exactly 2 arguments (1 given)') + obj._populate.apply(kwargs['raw'], False) + else: + # if not, the number of input arguments must exactly match that + # defined by the Data definitions + if len(args) != len(cls._InitArgs): + raise TypeError('__init__() takes exactly {0} arguments ({1} given)'\ + .format(len(cls._InitArgs)+1, len(args)+1)) + for a,v in zip(cls._InitArgs, args): + setattr(obj, a, v) + + obj.__init__() + return obj + +class Element( object ): + __metaclass__ = ElementType + _lang = 'en' +