From 0b61ec1e13c163978df54dbafee78a63380ec9c7 Mon Sep 17 00:00:00 2001 From: Ruud Date: Tue, 11 Mar 2014 22:47:42 +0100 Subject: [PATCH] Move plugins to single file --- couchpotato/core/plugins/automation.py | 104 ++ couchpotato/core/plugins/automation/__init__.py | 57 - couchpotato/core/plugins/automation/main.py | 49 - couchpotato/core/plugins/browser.py | 108 ++ couchpotato/core/plugins/browser/__init__.py | 7 - couchpotato/core/plugins/browser/main.py | 105 -- couchpotato/core/plugins/category/__init__.py | 4 +- couchpotato/core/plugins/custom.py | 23 + couchpotato/core/plugins/custom/__init__.py | 7 - couchpotato/core/plugins/custom/main.py | 21 - couchpotato/core/plugins/dashboard.py | 91 ++ couchpotato/core/plugins/dashboard/__init__.py | 7 - couchpotato/core/plugins/dashboard/main.py | 89 -- couchpotato/core/plugins/file.py | 79 ++ couchpotato/core/plugins/file/__init__.py | 7 - couchpotato/core/plugins/file/main.py | 77 -- couchpotato/core/plugins/log/__init__.py | 4 +- couchpotato/core/plugins/manage.py | 309 +++++ couchpotato/core/plugins/manage/__init__.py | 43 - couchpotato/core/plugins/manage/main.py | 270 ----- couchpotato/core/plugins/profile/__init__.py | 4 +- couchpotato/core/plugins/quality/__init__.py | 2 +- couchpotato/core/plugins/release/__init__.py | 4 +- couchpotato/core/plugins/renamer.py | 1363 +++++++++++++++++++++++ couchpotato/core/plugins/renamer/__init__.py | 178 --- couchpotato/core/plugins/renamer/main.py | 1189 -------------------- couchpotato/core/plugins/scanner.py | 868 +++++++++++++++ couchpotato/core/plugins/scanner/__init__.py | 7 - couchpotato/core/plugins/scanner/main.py | 868 --------------- couchpotato/core/plugins/score/__init__.py | 4 +- couchpotato/core/plugins/subtitle.py | 76 ++ couchpotato/core/plugins/subtitle/__init__.py | 35 - couchpotato/core/plugins/subtitle/main.py | 49 - couchpotato/core/plugins/trailer.py | 78 ++ couchpotato/core/plugins/trailer/__init__.py | 38 - couchpotato/core/plugins/trailer/main.py | 42 - couchpotato/core/plugins/userscript/__init__.py | 4 +- couchpotato/core/plugins/wizard/__init__.py | 2 +- 38 files changed, 3107 insertions(+), 3165 deletions(-) create mode 100644 couchpotato/core/plugins/automation.py delete mode 100644 couchpotato/core/plugins/automation/__init__.py delete mode 100644 couchpotato/core/plugins/automation/main.py create mode 100644 couchpotato/core/plugins/browser.py delete mode 100644 couchpotato/core/plugins/browser/__init__.py delete mode 100644 couchpotato/core/plugins/browser/main.py create mode 100644 couchpotato/core/plugins/custom.py delete mode 100644 couchpotato/core/plugins/custom/__init__.py delete mode 100644 couchpotato/core/plugins/custom/main.py create mode 100644 couchpotato/core/plugins/dashboard.py delete mode 100644 couchpotato/core/plugins/dashboard/__init__.py delete mode 100644 couchpotato/core/plugins/dashboard/main.py create mode 100644 couchpotato/core/plugins/file.py delete mode 100644 couchpotato/core/plugins/file/__init__.py delete mode 100644 couchpotato/core/plugins/file/main.py create mode 100644 couchpotato/core/plugins/manage.py delete mode 100644 couchpotato/core/plugins/manage/__init__.py delete mode 100644 couchpotato/core/plugins/manage/main.py create mode 100644 couchpotato/core/plugins/renamer.py delete mode 100755 couchpotato/core/plugins/renamer/__init__.py delete mode 100755 couchpotato/core/plugins/renamer/main.py create mode 100644 couchpotato/core/plugins/scanner.py delete mode 100644 couchpotato/core/plugins/scanner/__init__.py delete mode 100644 couchpotato/core/plugins/scanner/main.py create mode 100644 couchpotato/core/plugins/subtitle.py delete mode 100644 couchpotato/core/plugins/subtitle/__init__.py delete mode 100644 couchpotato/core/plugins/subtitle/main.py create mode 100644 couchpotato/core/plugins/trailer.py delete mode 100644 couchpotato/core/plugins/trailer/__init__.py delete mode 100644 couchpotato/core/plugins/trailer/main.py diff --git a/couchpotato/core/plugins/automation.py b/couchpotato/core/plugins/automation.py new file mode 100644 index 0000000..e17dc36 --- /dev/null +++ b/couchpotato/core/plugins/automation.py @@ -0,0 +1,104 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'Automation' + + +class Automation(Plugin): + + def __init__(self): + + addEvent('app.load', self.setCrons) + + if not Env.get('dev'): + addEvent('app.load', self.addMovies) + + addEvent('setting.save.automation.hour.after', self.setCrons) + + def setCrons(self): + fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) + + def addMovies(self): + + movies = fireEvent('automation.get_movies', merge = True) + movie_ids = [] + + for imdb_id in movies: + + if self.shuttingDown(): + break + + prop_name = 'automation.added.%s' % imdb_id + added = Env.prop(prop_name, default = False) + if not added: + added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_library = True, single = True) + if added_movie: + movie_ids.append(added_movie['id']) + Env.prop(prop_name, True) + + for movie_id in movie_ids: + + if self.shuttingDown(): + break + + movie_dict = fireEvent('media.get', movie_id, single = True) + fireEvent('movie.searcher.single', movie_dict) + + return True + + +config = [{ + 'name': 'automation', + 'order': 101, + 'groups': [ + { + 'tab': 'automation', + 'name': 'automation', + 'label': 'Minimal movie requirements', + 'options': [ + { + 'name': 'year', + 'default': 2011, + 'type': 'int', + }, + { + 'name': 'votes', + 'default': 1000, + 'type': 'int', + }, + { + 'name': 'rating', + 'default': 7.0, + 'type': 'float', + }, + { + 'name': 'hour', + 'advanced': True, + 'default': 12, + 'label': 'Check every', + 'type': 'int', + 'unit': 'hours', + 'description': 'hours', + }, + { + 'name': 'required_genres', + 'label': 'Required Genres', + 'default': '', + 'placeholder': 'Example: Action, Crime & Drama', + 'description': ('Ignore movies that don\'t contain at least one set of genres.', 'Sets are separated by "," and each word within a set must be separated with "&"') + }, + { + 'name': 'ignored_genres', + 'label': 'Ignored Genres', + 'default': '', + 'placeholder': 'Example: Horror, Comedy & Drama & Romance', + 'description': 'Ignore movies that contain at least one set of genres. Sets work the same as above.' + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/automation/__init__.py b/couchpotato/core/plugins/automation/__init__.py deleted file mode 100644 index 482a009..0000000 --- a/couchpotato/core/plugins/automation/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -from .main import Automation - - -def start(): - return Automation() - -config = [{ - 'name': 'automation', - 'order': 101, - 'groups': [ - { - 'tab': 'automation', - 'name': 'automation', - 'label': 'Minimal movie requirements', - 'options': [ - { - 'name': 'year', - 'default': 2011, - 'type': 'int', - }, - { - 'name': 'votes', - 'default': 1000, - 'type': 'int', - }, - { - 'name': 'rating', - 'default': 7.0, - 'type': 'float', - }, - { - 'name': 'hour', - 'advanced': True, - 'default': 12, - 'label': 'Check every', - 'type': 'int', - 'unit': 'hours', - 'description': 'hours', - }, - { - 'name': 'required_genres', - 'label': 'Required Genres', - 'default': '', - 'placeholder': 'Example: Action, Crime & Drama', - 'description': ('Ignore movies that don\'t contain at least one set of genres.', 'Sets are separated by "," and each word within a set must be separated with "&"') - }, - { - 'name': 'ignored_genres', - 'label': 'Ignored Genres', - 'default': '', - 'placeholder': 'Example: Horror, Comedy & Drama & Romance', - 'description': 'Ignore movies that contain at least one set of genres. Sets work the same as above.' - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/automation/main.py b/couchpotato/core/plugins/automation/main.py deleted file mode 100644 index 2edcd3b..0000000 --- a/couchpotato/core/plugins/automation/main.py +++ /dev/null @@ -1,49 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env - -log = CPLog(__name__) - - -class Automation(Plugin): - - def __init__(self): - - addEvent('app.load', self.setCrons) - - if not Env.get('dev'): - addEvent('app.load', self.addMovies) - - addEvent('setting.save.automation.hour.after', self.setCrons) - - def setCrons(self): - fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) - - def addMovies(self): - - movies = fireEvent('automation.get_movies', merge = True) - movie_ids = [] - - for imdb_id in movies: - - if self.shuttingDown(): - break - - prop_name = 'automation.added.%s' % imdb_id - added = Env.prop(prop_name, default = False) - if not added: - added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_library = True, single = True) - if added_movie: - movie_ids.append(added_movie['id']) - Env.prop(prop_name, True) - - for movie_id in movie_ids: - - if self.shuttingDown(): - break - - movie_dict = fireEvent('media.get', movie_id, single = True) - fireEvent('movie.searcher.single', movie_dict) - - return True diff --git a/couchpotato/core/plugins/browser.py b/couchpotato/core/plugins/browser.py new file mode 100644 index 0000000..8bfe53c --- /dev/null +++ b/couchpotato/core/plugins/browser.py @@ -0,0 +1,108 @@ +from couchpotato.api import addApiView +from couchpotato.core.helpers.variable import getUserDir +from couchpotato.core.plugins.base import Plugin +import ctypes +import os +import string +import six + +if os.name == 'nt': + import imp + try: + imp.find_module('win32file') + except: + # todo:: subclass ImportError for missing dependencies, vs. broken plugins? + raise ImportError("Missing the win32file module, which is a part of the prerequisite \ + pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/") + else: + import win32file #@UnresolvedImport + +autoload = 'FileBrowser' + + +class FileBrowser(Plugin): + + def __init__(self): + addApiView('directory.list', self.view, docs = { + 'desc': 'Return the directory list of a given directory', + 'params': { + 'path': {'desc': 'The directory to scan'}, + 'show_hidden': {'desc': 'Also show hidden files'} + }, + 'return': {'type': 'object', 'example': """{ + 'is_root': bool, //is top most folder + 'parent': string, //parent folder of requested path + 'home': string, //user home folder + 'empty': bool, //directory is empty + 'dirs': array, //directory names +}"""} + }) + + def getDirectories(self, path = '/', show_hidden = True): + + # Return driveletters or root if path is empty + if path == '/' or not path or path == '\\': + if os.name == 'nt': + return self.getDriveLetters() + path = '/' + + dirs = [] + for f in os.listdir(path): + p = os.path.join(path, f) + if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): + dirs.append(p + os.path.sep) + + return sorted(dirs) + + def getFiles(self): + pass + + def getDriveLetters(self): + + driveletters = [] + for drive in string.ascii_uppercase: + if win32file.GetDriveType(drive + ":") in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]: + driveletters.append(drive + ":\\") + + return driveletters + + def view(self, path = '/', show_hidden = True, **kwargs): + + home = getUserDir() + + if not path: + path = home + + try: + dirs = self.getDirectories(path = path, show_hidden = show_hidden) + except: + dirs = [] + + parent = os.path.dirname(path.rstrip(os.path.sep)) + if parent == path.rstrip(os.path.sep): + parent = '/' + elif parent != '/' and parent[-2:] != ':\\': + parent += os.path.sep + + return { + 'is_root': path == '/', + 'empty': len(dirs) == 0, + 'parent': parent, + 'home': home + os.path.sep, + 'platform': os.name, + 'dirs': dirs, + } + + + def is_hidden(self, filepath): + name = os.path.basename(os.path.abspath(filepath)) + return name.startswith('.') or self.has_hidden_attribute(filepath) + + def has_hidden_attribute(self, filepath): + try: + attrs = ctypes.windll.kernel32.GetFileAttributesW(six.text_type(filepath)) #@UndefinedVariable + assert attrs != -1 + result = bool(attrs & 2) + except (AttributeError, AssertionError): + result = False + return result diff --git a/couchpotato/core/plugins/browser/__init__.py b/couchpotato/core/plugins/browser/__init__.py deleted file mode 100644 index fae5065..0000000 --- a/couchpotato/core/plugins/browser/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import FileBrowser - - -def start(): - return FileBrowser() - -config = [] diff --git a/couchpotato/core/plugins/browser/main.py b/couchpotato/core/plugins/browser/main.py deleted file mode 100644 index 956a768..0000000 --- a/couchpotato/core/plugins/browser/main.py +++ /dev/null @@ -1,105 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.helpers.variable import getUserDir -from couchpotato.core.plugins.base import Plugin -import ctypes -import os -import string -import six - -if os.name == 'nt': - import imp - try: - imp.find_module('win32file') - except: - # todo:: subclass ImportError for missing dependencies, vs. broken plugins? - raise ImportError("Missing the win32file module, which is a part of the prerequisite \ - pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/") - else: - import win32file #@UnresolvedImport - -class FileBrowser(Plugin): - - def __init__(self): - addApiView('directory.list', self.view, docs = { - 'desc': 'Return the directory list of a given directory', - 'params': { - 'path': {'desc': 'The directory to scan'}, - 'show_hidden': {'desc': 'Also show hidden files'} - }, - 'return': {'type': 'object', 'example': """{ - 'is_root': bool, //is top most folder - 'parent': string, //parent folder of requested path - 'home': string, //user home folder - 'empty': bool, //directory is empty - 'dirs': array, //directory names -}"""} - }) - - def getDirectories(self, path = '/', show_hidden = True): - - # Return driveletters or root if path is empty - if path == '/' or not path or path == '\\': - if os.name == 'nt': - return self.getDriveLetters() - path = '/' - - dirs = [] - for f in os.listdir(path): - p = os.path.join(path, f) - if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): - dirs.append(p + os.path.sep) - - return sorted(dirs) - - def getFiles(self): - pass - - def getDriveLetters(self): - - driveletters = [] - for drive in string.ascii_uppercase: - if win32file.GetDriveType(drive + ":") in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]: - driveletters.append(drive + ":\\") - - return driveletters - - def view(self, path = '/', show_hidden = True, **kwargs): - - home = getUserDir() - - if not path: - path = home - - try: - dirs = self.getDirectories(path = path, show_hidden = show_hidden) - except: - dirs = [] - - parent = os.path.dirname(path.rstrip(os.path.sep)) - if parent == path.rstrip(os.path.sep): - parent = '/' - elif parent != '/' and parent[-2:] != ':\\': - parent += os.path.sep - - return { - 'is_root': path == '/', - 'empty': len(dirs) == 0, - 'parent': parent, - 'home': home + os.path.sep, - 'platform': os.name, - 'dirs': dirs, - } - - - def is_hidden(self, filepath): - name = os.path.basename(os.path.abspath(filepath)) - return name.startswith('.') or self.has_hidden_attribute(filepath) - - def has_hidden_attribute(self, filepath): - try: - attrs = ctypes.windll.kernel32.GetFileAttributesW(six.text_type(filepath)) #@UndefinedVariable - assert attrs != -1 - result = bool(attrs & 2) - except (AttributeError, AssertionError): - result = False - return result diff --git a/couchpotato/core/plugins/category/__init__.py b/couchpotato/core/plugins/category/__init__.py index dcdae90..d147092 100644 --- a/couchpotato/core/plugins/category/__init__.py +++ b/couchpotato/core/plugins/category/__init__.py @@ -1,7 +1,5 @@ from .main import CategoryPlugin -def start(): +def autoload(): return CategoryPlugin() - -config = [] diff --git a/couchpotato/core/plugins/custom.py b/couchpotato/core/plugins/custom.py new file mode 100644 index 0000000..c9cff3e --- /dev/null +++ b/couchpotato/core/plugins/custom.py @@ -0,0 +1,23 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +import os + +log = CPLog(__name__) + +autoload = 'Custom' + + +class Custom(Plugin): + + def __init__(self): + addEvent('app.load', self.createStructure) + + def createStructure(self): + + custom_dir = os.path.join(Env.get('data_dir'), 'custom_plugins') + + if not os.path.isdir(custom_dir): + self.makeDir(custom_dir) + self.createFile(os.path.join(custom_dir, '__init__.py'), '# Don\'t remove this file') diff --git a/couchpotato/core/plugins/custom/__init__.py b/couchpotato/core/plugins/custom/__init__.py deleted file mode 100644 index 20a3935..0000000 --- a/couchpotato/core/plugins/custom/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Custom - - -def start(): - return Custom() - -config = [] diff --git a/couchpotato/core/plugins/custom/main.py b/couchpotato/core/plugins/custom/main.py deleted file mode 100644 index a15c915..0000000 --- a/couchpotato/core/plugins/custom/main.py +++ /dev/null @@ -1,21 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -import os - -log = CPLog(__name__) - - -class Custom(Plugin): - - def __init__(self): - addEvent('app.load', self.createStructure) - - def createStructure(self): - - custom_dir = os.path.join(Env.get('data_dir'), 'custom_plugins') - - if not os.path.isdir(custom_dir): - self.makeDir(custom_dir) - self.createFile(os.path.join(custom_dir, '__init__.py'), '# Don\'t remove this file') diff --git a/couchpotato/core/plugins/dashboard.py b/couchpotato/core/plugins/dashboard.py new file mode 100644 index 0000000..4a9aaf9 --- /dev/null +++ b/couchpotato/core/plugins/dashboard.py @@ -0,0 +1,91 @@ +from datetime import date +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.variable import splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +import random as rndm +import time + +log = CPLog(__name__) + +autoload = 'Dashboard' + + +class Dashboard(Plugin): + + def __init__(self): + addApiView('dashboard.soon', self.getSoonView) + + def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs): + + db = get_db() + now = time.time() + + # Get profiles first, determine pre or post theater + profiles = fireEvent('profile.all', single = True) + pre_releases = fireEvent('quality.pre_releases', single = True) + + # See what the profile contain and cache it + profile_pre = {} + for profile in profiles: + contains = {} + for q_identifier in profile.get('qualities', []): + contains['theater' if q_identifier in pre_releases else 'dvd'] = True + + profile_pre[profile.get('_id')] = contains + + # Add limit + limit = 12 + if limit_offset: + splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset + limit = tryInt(splt[0]) + + # Get all active medias + active_ids = [x['_id'] for x in db.run('media', 'with_status', 'active', with_doc = False)] + + medias = [] + now_year = date.today().year + + if len(active_ids) > 0: + + # Order by title or randomize + if not random: + orders_ids = db.all('media_title') + active_ids = [x['_id'] for x in orders_ids if x['_id'] in active_ids] + else: + rndm.shuffle(active_ids) + + for media_id in active_ids: + media = db.get('id', media_id) + + pp = profile_pre.get(media['profile_id']) + if not pp: continue + + eta = media['info'].get('release_date', {}) or {} + coming_soon = False + + # Theater quality + if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, media['info']['year'], single = True): + coming_soon = True + elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, media['info']['year'], single = True): + coming_soon = True + + if coming_soon: + + # Don't list older movies + if ((not late and (media['info']['year'] >= now_year-1) and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or + (late and ((media['info']['year'] < now_year-1) or ((eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))))): + medias.append(media) + + if len(medias) >= limit: + break + + return { + 'success': True, + 'empty': len(medias) == 0, + 'movies': medias, + } + + getLateView = getSoonView diff --git a/couchpotato/core/plugins/dashboard/__init__.py b/couchpotato/core/plugins/dashboard/__init__.py deleted file mode 100644 index c43a44e..0000000 --- a/couchpotato/core/plugins/dashboard/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Dashboard - - -def start(): - return Dashboard() - -config = [] diff --git a/couchpotato/core/plugins/dashboard/main.py b/couchpotato/core/plugins/dashboard/main.py deleted file mode 100644 index d0d3bfe..0000000 --- a/couchpotato/core/plugins/dashboard/main.py +++ /dev/null @@ -1,89 +0,0 @@ -from datetime import date -from couchpotato import get_db -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.variable import splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -import random as rndm -import time - -log = CPLog(__name__) - - -class Dashboard(Plugin): - - def __init__(self): - addApiView('dashboard.soon', self.getSoonView) - - def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs): - - db = get_db() - now = time.time() - - # Get profiles first, determine pre or post theater - profiles = fireEvent('profile.all', single = True) - pre_releases = fireEvent('quality.pre_releases', single = True) - - # See what the profile contain and cache it - profile_pre = {} - for profile in profiles: - contains = {} - for q_identifier in profile.get('qualities', []): - contains['theater' if q_identifier in pre_releases else 'dvd'] = True - - profile_pre[profile.get('_id')] = contains - - # Add limit - limit = 12 - if limit_offset: - splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset - limit = tryInt(splt[0]) - - # Get all active medias - active_ids = [x['_id'] for x in db.run('media', 'with_status', 'active', with_doc = False)] - - medias = [] - now_year = date.today().year - - if len(active_ids) > 0: - - # Order by title or randomize - if not random: - orders_ids = db.all('media_title') - active_ids = [x['_id'] for x in orders_ids if x['_id'] in active_ids] - else: - rndm.shuffle(active_ids) - - for media_id in active_ids: - media = db.get('id', media_id) - - pp = profile_pre.get(media['profile_id']) - if not pp: continue - - eta = media['info'].get('release_date', {}) or {} - coming_soon = False - - # Theater quality - if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, media['info']['year'], single = True): - coming_soon = True - elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, media['info']['year'], single = True): - coming_soon = True - - if coming_soon: - - # Don't list older movies - if ((not late and (media['info']['year'] >= now_year-1) and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or - (late and ((media['info']['year'] < now_year-1) or ((eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))))): - medias.append(media) - - if len(medias) >= limit: - break - - return { - 'success': True, - 'empty': len(medias) == 0, - 'movies': medias, - } - - getLateView = getSoonView diff --git a/couchpotato/core/plugins/file.py b/couchpotato/core/plugins/file.py new file mode 100644 index 0000000..4110437 --- /dev/null +++ b/couchpotato/core/plugins/file.py @@ -0,0 +1,79 @@ +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import md5, getExt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from scandir import scandir +from tornado.web import StaticFileHandler +import os.path +import time +import traceback + +log = CPLog(__name__) + +autoload = 'FileManager' + + +class FileManager(Plugin): + + def __init__(self): + addEvent('file.download', self.download) + + addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { + 'desc': 'Return a file from the cp_data/cache directory', + 'params': { + 'filename': {'desc': 'path/filename of the wanted file'} + }, + 'return': {'type': 'file'} + }) + + addEvent('app.load', self.cleanup) + + def cleanup(self): + + # Wait a bit after starting before cleanup + time.sleep(2) + log.debug('Cleaning up unused files') + + try: + db = get_db() + cache_dir = Env.get('cache_dir') + medias = db.all('media', with_doc = True) + + files = [] + for media in medias: + file_dict = media['doc'].get('files', {}) + for x in file_dict.keys(): + files.extend(file_dict[x]) + + for f in scandir.scandir(cache_dir): + if os.path.splitext(f.name)[1] in ['.png', '.jpg', '.jpeg']: + file_path = os.path.join(cache_dir, f.name) + if toUnicode(file_path) not in files: + os.remove(file_path) + except: + log.error('Failed removing unused file: %s', traceback.format_exc()) + + def showCacheFile(self, route, **kwargs): + Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': Env.get('cache_dir')})]) + + def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): + if not urlopen_kwargs: urlopen_kwargs = {} + + if not dest: # to Cache + dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url))) + + if not overwrite and os.path.isfile(dest): + return dest + + try: + filedata = self.urlopen(url, **urlopen_kwargs) + except: + log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) + return False + + self.createFile(dest, filedata, binary = True) + return dest diff --git a/couchpotato/core/plugins/file/__init__.py b/couchpotato/core/plugins/file/__init__.py deleted file mode 100644 index 3dced3d..0000000 --- a/couchpotato/core/plugins/file/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import FileManager - - -def start(): - return FileManager() - -config = [] diff --git a/couchpotato/core/plugins/file/main.py b/couchpotato/core/plugins/file/main.py deleted file mode 100644 index 1d4313b..0000000 --- a/couchpotato/core/plugins/file/main.py +++ /dev/null @@ -1,77 +0,0 @@ -from couchpotato import get_db -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import md5, getExt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from scandir import scandir -from tornado.web import StaticFileHandler -import os.path -import time -import traceback - -log = CPLog(__name__) - - -class FileManager(Plugin): - - def __init__(self): - addEvent('file.download', self.download) - - addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { - 'desc': 'Return a file from the cp_data/cache directory', - 'params': { - 'filename': {'desc': 'path/filename of the wanted file'} - }, - 'return': {'type': 'file'} - }) - - addEvent('app.load', self.cleanup) - - def cleanup(self): - - # Wait a bit after starting before cleanup - time.sleep(2) - log.debug('Cleaning up unused files') - - try: - db = get_db() - cache_dir = Env.get('cache_dir') - medias = db.all('media', with_doc = True) - - files = [] - for media in medias: - file_dict = media['doc'].get('files', {}) - for x in file_dict.keys(): - files.extend(file_dict[x]) - - for f in scandir.scandir(cache_dir): - if os.path.splitext(f.name)[1] in ['.png', '.jpg', '.jpeg']: - file_path = os.path.join(cache_dir, f.name) - if toUnicode(file_path) not in files: - os.remove(file_path) - except: - log.error('Failed removing unused file: %s', traceback.format_exc()) - - def showCacheFile(self, route, **kwargs): - Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': Env.get('cache_dir')})]) - - def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): - if not urlopen_kwargs: urlopen_kwargs = {} - - if not dest: # to Cache - dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url))) - - if not overwrite and os.path.isfile(dest): - return dest - - try: - filedata = self.urlopen(url, **urlopen_kwargs) - except: - log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) - return False - - self.createFile(dest, filedata, binary = True) - return dest diff --git a/couchpotato/core/plugins/log/__init__.py b/couchpotato/core/plugins/log/__init__.py index f5d9d10..3760b56 100644 --- a/couchpotato/core/plugins/log/__init__.py +++ b/couchpotato/core/plugins/log/__init__.py @@ -1,7 +1,5 @@ from .main import Logging -def start(): +def autoload(): return Logging() - -config = [] diff --git a/couchpotato/core/plugins/manage.py b/couchpotato/core/plugins/manage.py new file mode 100644 index 0000000..205de13 --- /dev/null +++ b/couchpotato/core/plugins/manage.py @@ -0,0 +1,309 @@ +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent, fireEventAsync +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import splitString, getTitle, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +import ctypes +import os +import sys +import time +import traceback + +log = CPLog(__name__) + +autoload = 'Manage' + + +class Manage(Plugin): + + in_progress = False + + def __init__(self): + + fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) + + addEvent('manage.update', self.updateLibrary) + addEvent('manage.diskspace', self.getDiskSpace) + + # Add files after renaming + def after_rename(message = None, group = None): + if not group: group = {} + return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files']) + addEvent('renamer.after', after_rename, priority = 110) + + addApiView('manage.update', self.updateLibraryView, docs = { + 'desc': 'Update the library by scanning for new movies', + 'params': { + 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, + } + }) + + addApiView('manage.progress', self.getProgress, docs = { + 'desc': 'Get the progress of current manage update', + 'return': {'type': 'object', 'example': """{ + 'progress': False || object, total & to_go, +}"""}, + }) + + if not Env.get('dev') and self.conf('startup_scan'): + addEvent('app.load', self.updateLibraryQuick) + + def getProgress(self, **kwargs): + return { + 'progress': self.in_progress + } + + def updateLibraryView(self, full = 1, **kwargs): + + fireEventAsync('manage.update', full = True if full == '1' else False) + + return { + 'progress': self.in_progress, + 'success': True + } + + def updateLibraryQuick(self): + return self.updateLibrary(full = False) + + def updateLibrary(self, full = True): + last_update = float(Env.prop('manage.last_update', default = 0)) + + if self.in_progress: + log.info('Already updating library: %s', self.in_progress) + return + elif self.isDisabled() or (last_update > time.time() - 20): + return + + self.in_progress = {} + fireEvent('notify.frontend', type = 'manage.updating', data = True) + + try: + + directories = self.directories() + directories.sort() + added_identifiers = [] + + # Add some progress + for directory in directories: + self.in_progress[os.path.normpath(directory)] = { + 'started': False, + 'eta': -1, + 'total': None, + 'to_go': None, + } + + for directory in directories: + folder = os.path.normpath(directory) + self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time()) + + if not os.path.isdir(folder): + if len(directory) > 0: + log.error('Directory doesn\'t exist: %s', folder) + continue + + log.info('Updating manage library: %s', folder) + fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) + + onFound = self.createAddToLibrary(folder, added_identifiers) + fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, on_found = onFound, single = True) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # If cleanup option is enabled, remove offline files from database + if self.conf('cleanup') and full and not self.shuttingDown(): + + # Get movies with done status + total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', single = True) + + for done_movie in done_movies: + if done_movie['identifier'] not in added_identifiers: + fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all') + else: + + db = get_db() + releases = list(db.run('release', 'for_media', done_movie.get('_id'))) + + for release in releases: + if release.get('files'): + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + # Remove release not available anymore + if not os.path.isfile(sp(release_file)): + fireEvent('release.clean', release['_id']) + break + + # Check if there are duplicate releases (different quality) use the last one, delete the rest + if len(releases) > 1: + used_files = {} + for release in releases: + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + already_used = used_files.get(release_file) + + if already_used: + if already_used.get('last_edit', 0) < release.get('last_edit', 0): + fireEvent('release.delete', release['_id'], single = True) # delete current one + else: + fireEvent('release.delete', already_used['_id'], single = True) # delete previous one + break + else: + used_files[release_file] = release + del used_files + + Env.prop('manage.last_update', time.time()) + except: + log.error('Failed updating library: %s', (traceback.format_exc())) + + while True and not self.shuttingDown(): + + delete_me = {} + + for folder in self.in_progress: + if self.in_progress[folder]['to_go'] <= 0: + delete_me[folder] = True + + for delete in delete_me: + del self.in_progress[delete] + + if len(self.in_progress) == 0: + break + + time.sleep(1) + + fireEvent('notify.frontend', type = 'manage.updating', data = False) + self.in_progress = False + + def createAddToLibrary(self, folder, added_identifiers = []): + + def addToLibrary(group, total_found, to_go): + if self.in_progress[folder]['total'] is None: + self.in_progress[folder].update({ + 'total': total_found, + 'to_go': total_found, + }) + + if group['media'] and group['identifier']: + added_identifiers.append(group['identifier']) + + # Add it to release and update the info + fireEvent('release.add', group = group, update_info = False) + fireEvent('movie.update_info', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier'])) + else: + self.updateProgress(folder) + + return addToLibrary + + def createAfterUpdate(self, folder, identifier): + + # Notify frontend + def afterUpdate(): + if not self.in_progress or self.shuttingDown(): + return + + self.updateProgress(folder) + total = self.in_progress[folder]['total'] + movie_dict = fireEvent('media.get', identifier, single = True) + + fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict)) + + return afterUpdate + + def updateProgress(self, folder): + + pr = self.in_progress[folder] + pr['to_go'] -= 1 + + avg = (time.time() - pr['started'])/(pr['total'] - pr['to_go']) + pr['eta'] = tryInt(avg * pr['to_go']) + + + def directories(self): + try: + if self.conf('library', default = '').strip(): + return splitString(self.conf('library', default = ''), '::') + except: + pass + + return [] + + def scanFilesToLibrary(self, folder = None, files = None): + + folder = os.path.normpath(folder) + + groups = fireEvent('scanner.scan', folder = folder, files = files, single = True) + + if groups: + for group in groups.values(): + if group.get('media'): + fireEvent('release.add', group = group) + + def getDiskSpace(self): + + free_space = {} + for folder in self.directories(): + + size = None + if os.path.isdir(folder): + if os.name == 'nt': + _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \ + ctypes.c_ulonglong() + if sys.version_info >= (3,) or isinstance(folder, unicode): + fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable + else: + fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable + ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) + if ret == 0: + raise ctypes.WinError() + used = total.value - free.value + return [total.value, used, free.value] + else: + s = os.statvfs(folder) + size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)] + + free_space[folder] = size + + return free_space + + +config = [{ + 'name': 'manage', + 'groups': [ + { + 'tab': 'manage', + 'label': 'Movie Library Manager', + 'description': 'Add your existing movie folders.', + 'options': [ + { + 'name': 'enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'library', + 'type': 'directories', + 'description': 'Folder where the movies should be moved to.', + }, + { + 'label': 'Cleanup After', + 'name': 'cleanup', + 'type': 'bool', + 'description': 'Remove movie from db if it can\'t be found after re-scan.', + 'default': True, + }, + { + 'label': 'Scan at startup', + 'name': 'startup_scan', + 'type': 'bool', + 'default': True, + 'advanced': True, + 'description': 'Do a quick scan on startup. On slow systems better disable this.', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/manage/__init__.py b/couchpotato/core/plugins/manage/__init__.py deleted file mode 100644 index c992dee..0000000 --- a/couchpotato/core/plugins/manage/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -from .main import Manage - - -def start(): - return Manage() - -config = [{ - 'name': 'manage', - 'groups': [ - { - 'tab': 'manage', - 'label': 'Movie Library Manager', - 'description': 'Add your existing movie folders.', - 'options': [ - { - 'name': 'enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'library', - 'type': 'directories', - 'description': 'Folder where the movies should be moved to.', - }, - { - 'label': 'Cleanup After', - 'name': 'cleanup', - 'type': 'bool', - 'description': 'Remove movie from db if it can\'t be found after re-scan.', - 'default': True, - }, - { - 'label': 'Scan at startup', - 'name': 'startup_scan', - 'type': 'bool', - 'default': True, - 'advanced': True, - 'description': 'Do a quick scan on startup. On slow systems better disable this.', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/manage/main.py b/couchpotato/core/plugins/manage/main.py deleted file mode 100644 index 965bd9f..0000000 --- a/couchpotato/core/plugins/manage/main.py +++ /dev/null @@ -1,270 +0,0 @@ -from couchpotato import get_db -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, addEvent, fireEventAsync -from couchpotato.core.helpers.encoding import sp -from couchpotato.core.helpers.variable import splitString, getTitle, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -import ctypes -import os -import sys -import time -import traceback - - -log = CPLog(__name__) - - -class Manage(Plugin): - - in_progress = False - - def __init__(self): - - fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) - - addEvent('manage.update', self.updateLibrary) - addEvent('manage.diskspace', self.getDiskSpace) - - # Add files after renaming - def after_rename(message = None, group = None): - if not group: group = {} - return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files']) - addEvent('renamer.after', after_rename, priority = 110) - - addApiView('manage.update', self.updateLibraryView, docs = { - 'desc': 'Update the library by scanning for new movies', - 'params': { - 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, - } - }) - - addApiView('manage.progress', self.getProgress, docs = { - 'desc': 'Get the progress of current manage update', - 'return': {'type': 'object', 'example': """{ - 'progress': False || object, total & to_go, -}"""}, - }) - - if not Env.get('dev') and self.conf('startup_scan'): - addEvent('app.load', self.updateLibraryQuick) - - def getProgress(self, **kwargs): - return { - 'progress': self.in_progress - } - - def updateLibraryView(self, full = 1, **kwargs): - - fireEventAsync('manage.update', full = True if full == '1' else False) - - return { - 'progress': self.in_progress, - 'success': True - } - - def updateLibraryQuick(self): - return self.updateLibrary(full = False) - - def updateLibrary(self, full = True): - last_update = float(Env.prop('manage.last_update', default = 0)) - - if self.in_progress: - log.info('Already updating library: %s', self.in_progress) - return - elif self.isDisabled() or (last_update > time.time() - 20): - return - - self.in_progress = {} - fireEvent('notify.frontend', type = 'manage.updating', data = True) - - try: - - directories = self.directories() - directories.sort() - added_identifiers = [] - - # Add some progress - for directory in directories: - self.in_progress[os.path.normpath(directory)] = { - 'started': False, - 'eta': -1, - 'total': None, - 'to_go': None, - } - - for directory in directories: - folder = os.path.normpath(directory) - self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time()) - - if not os.path.isdir(folder): - if len(directory) > 0: - log.error('Directory doesn\'t exist: %s', folder) - continue - - log.info('Updating manage library: %s', folder) - fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) - - onFound = self.createAddToLibrary(folder, added_identifiers) - fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, on_found = onFound, single = True) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # If cleanup option is enabled, remove offline files from database - if self.conf('cleanup') and full and not self.shuttingDown(): - - # Get movies with done status - total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', single = True) - - for done_movie in done_movies: - if done_movie['identifier'] not in added_identifiers: - fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all') - else: - - db = get_db() - releases = list(db.run('release', 'for_media', done_movie.get('_id'))) - - for release in releases: - if release.get('files'): - for file_type in release.get('files', {}): - for release_file in release['files'][file_type]: - # Remove release not available anymore - if not os.path.isfile(sp(release_file)): - fireEvent('release.clean', release['_id']) - break - - # Check if there are duplicate releases (different quality) use the last one, delete the rest - if len(releases) > 1: - used_files = {} - for release in releases: - for file_type in release.get('files', {}): - for release_file in release['files'][file_type]: - already_used = used_files.get(release_file) - - if already_used: - if already_used.get('last_edit', 0) < release.get('last_edit', 0): - fireEvent('release.delete', release['_id'], single = True) # delete current one - else: - fireEvent('release.delete', already_used['_id'], single = True) # delete previous one - break - else: - used_files[release_file] = release - del used_files - - Env.prop('manage.last_update', time.time()) - except: - log.error('Failed updating library: %s', (traceback.format_exc())) - - while True and not self.shuttingDown(): - - delete_me = {} - - for folder in self.in_progress: - if self.in_progress[folder]['to_go'] <= 0: - delete_me[folder] = True - - for delete in delete_me: - del self.in_progress[delete] - - if len(self.in_progress) == 0: - break - - time.sleep(1) - - fireEvent('notify.frontend', type = 'manage.updating', data = False) - self.in_progress = False - - def createAddToLibrary(self, folder, added_identifiers = []): - - def addToLibrary(group, total_found, to_go): - if self.in_progress[folder]['total'] is None: - self.in_progress[folder].update({ - 'total': total_found, - 'to_go': total_found, - }) - - if group['media'] and group['identifier']: - added_identifiers.append(group['identifier']) - - # Add it to release and update the info - fireEvent('release.add', group = group, update_info = False) - fireEvent('movie.update_info', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier'])) - else: - self.updateProgress(folder) - - return addToLibrary - - def createAfterUpdate(self, folder, identifier): - - # Notify frontend - def afterUpdate(): - if not self.in_progress or self.shuttingDown(): - return - - self.updateProgress(folder) - total = self.in_progress[folder]['total'] - movie_dict = fireEvent('media.get', identifier, single = True) - - fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict)) - - return afterUpdate - - def updateProgress(self, folder): - - pr = self.in_progress[folder] - pr['to_go'] -= 1 - - avg = (time.time() - pr['started'])/(pr['total'] - pr['to_go']) - pr['eta'] = tryInt(avg * pr['to_go']) - - - def directories(self): - try: - if self.conf('library', default = '').strip(): - return splitString(self.conf('library', default = ''), '::') - except: - pass - - return [] - - def scanFilesToLibrary(self, folder = None, files = None): - - folder = os.path.normpath(folder) - - groups = fireEvent('scanner.scan', folder = folder, files = files, single = True) - - if groups: - for group in groups.values(): - if group.get('media'): - fireEvent('release.add', group = group) - - def getDiskSpace(self): - - free_space = {} - for folder in self.directories(): - - size = None - if os.path.isdir(folder): - if os.name == 'nt': - _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \ - ctypes.c_ulonglong() - if sys.version_info >= (3,) or isinstance(folder, unicode): - fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable - else: - fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable - ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) - if ret == 0: - raise ctypes.WinError() - used = total.value - free.value - return [total.value, used, free.value] - else: - s = os.statvfs(folder) - size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)] - - free_space[folder] = size - - return free_space - diff --git a/couchpotato/core/plugins/profile/__init__.py b/couchpotato/core/plugins/profile/__init__.py index c07bc7c..15a74ee 100644 --- a/couchpotato/core/plugins/profile/__init__.py +++ b/couchpotato/core/plugins/profile/__init__.py @@ -1,7 +1,5 @@ from .main import ProfilePlugin -def start(): +def autoload(): return ProfilePlugin() - -config = [] diff --git a/couchpotato/core/plugins/quality/__init__.py b/couchpotato/core/plugins/quality/__init__.py index 2630f1a..fc9cda6 100644 --- a/couchpotato/core/plugins/quality/__init__.py +++ b/couchpotato/core/plugins/quality/__init__.py @@ -1,7 +1,7 @@ from .main import QualityPlugin -def start(): +def autoload(): return QualityPlugin() config = [] diff --git a/couchpotato/core/plugins/release/__init__.py b/couchpotato/core/plugins/release/__init__.py index 08c6a57..e6e60c4 100644 --- a/couchpotato/core/plugins/release/__init__.py +++ b/couchpotato/core/plugins/release/__init__.py @@ -1,7 +1,5 @@ from .main import Release -def start(): +def autoload(): return Release() - -config = [] diff --git a/couchpotato/core/plugins/renamer.py b/couchpotato/core/plugins/renamer.py new file mode 100644 index 0000000..7e4fc99 --- /dev/null +++ b/couchpotato/core/plugins/renamer.py @@ -0,0 +1,1363 @@ +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import toUnicode, ss, sp +from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ + getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from scandir import scandir +from unrar2 import RarFile +import fnmatch +import os +import re +import shutil +import time +import traceback +import six +from six.moves import filter + +log = CPLog(__name__) + +autoload = 'Renamer' + + +class Renamer(Plugin): + + renaming_started = False + checking_snatched = False + + def __init__(self): + addApiView('renamer.scan', self.scanView, docs = { + 'desc': 'For the renamer to check for new files to rename in a folder', + 'params': { + 'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'}, + 'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'}, + 'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'}, + 'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'}, + 'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'}, + 'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'}, + 'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''}, + }, + }) + + addEvent('renamer.scan', self.scan) + addEvent('renamer.check_snatched', self.checkSnatched) + + addEvent('app.load', self.scan) + addEvent('app.load', self.setCrons) + + # Enable / disable interval + addEvent('setting.save.renamer.enabled.after', self.setCrons) + addEvent('setting.save.renamer.run_every.after', self.setCrons) + addEvent('setting.save.renamer.force_every.after', self.setCrons) + + def setCrons(self): + + fireEvent('schedule.remove', 'renamer.check_snatched') + if self.isEnabled() and self.conf('run_every') > 0: + fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True) + + fireEvent('schedule.remove', 'renamer.check_snatched_forced') + if self.isEnabled() and self.conf('force_every') > 0: + fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True) + + return True + + def scanView(self, **kwargs): + + async = tryInt(kwargs.get('async', 0)) + base_folder = kwargs.get('base_folder') + media_folder = sp(kwargs.get('media_folder')) + + # Backwards compatibility, to be removed after a few versions :) + if not media_folder: + media_folder = sp(kwargs.get('movie_folder')) + + downloader = kwargs.get('downloader') + download_id = kwargs.get('download_id') + files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')]) + status = kwargs.get('status', 'completed') + + release_download = None + if not base_folder and media_folder: + release_download = {'folder': media_folder} + release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {}) + + fire_handle = fireEvent if not async else fireEventAsync + + fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download) + + return { + 'success': True + } + + def scan(self, base_folder = None, release_download = None): + if not release_download: release_download = {} + + if self.isDisabled(): + return + + if self.renaming_started is True: + log.info('Renamer is already running, if you see this often, check the logs above for errors.') + return + + if not base_folder: + base_folder = self.conf('from') + + from_folder = sp(self.conf('from')) + to_folder = sp(self.conf('to')) + + # Get media folder to process + media_folder = release_download.get('folder') + + # Quality order for calculation quality priority + quality_order = fireEvent('quality.order', single = True) + + # Get all folders that should not be processed + no_process = [to_folder] + cat_list = fireEvent('category.all', single = True) or [] + no_process.extend([item['destination'] for item in cat_list]) + try: + if Env.setting('library', section = 'manage').strip(): + no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')]) + except: + pass + + # Check to see if the no_process folders are inside the "from" folder. + if not os.path.isdir(base_folder) or not os.path.isdir(to_folder): + log.error('Both the "To" and "From" folder have to exist.') + return + else: + for item in no_process: + if isSubFolder(item, base_folder): + log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.') + return + + # Check to see if the no_process folders are inside the provided media_folder + if media_folder and not os.path.isdir(media_folder): + log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder) + + # Update to the from folder + if len(splitString(release_download.get('files'), '|')) == 1: + new_media_folder = from_folder + else: + new_media_folder = os.path.join(from_folder, os.path.basename(media_folder)) + + if not os.path.isdir(new_media_folder): + log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder) + return + + # Update the files + new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in splitString(release_download.get('files'), '|')] + if new_files and not os.path.isfile(new_files[0]): + log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder) + return + + # Update release_download info to the from folder + log.debug('Release %s found in the \'from\' folder.', media_folder) + release_download['folder'] = new_media_folder + release_download['files'] = '|'.join(new_files) + media_folder = new_media_folder + + if media_folder: + for item in no_process: + if isSubFolder(item, media_folder): + log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.') + return + + # Make sure a checkSnatched marked all downloads/seeds as such + if not release_download and self.conf('run_every') > 0: + self.checkSnatched(fire_scan = False) + + self.renaming_started = True + + # make sure the media folder name is included in the search + folder = None + files = [] + if media_folder: + log.info('Scanning media folder %s...', media_folder) + folder = os.path.dirname(media_folder) + + if release_download.get('files', ''): + files = splitString(release_download['files'], '|') + + # If there is only one file in the torrent, the downloader did not create a subfolder + if len(files) == 1: + folder = media_folder + else: + # Get all files from the specified folder + try: + for root, folders, names in scandir.walk(media_folder): + files.extend([sp(os.path.join(root, name)) for name in names]) + except: + log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc())) + + db = get_db() + + # Extend the download info with info stored in the downloaded release + release_download = self.extendReleaseDownload(release_download) + + # Unpack any archives + extr_files = None + if self.conf('unrar'): + folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files, + cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download)) + + groups = fireEvent('scanner.scan', folder = folder if folder else base_folder, + files = files, release_download = release_download, return_ignored = False, single = True) or [] + + folder_name = self.conf('folder_name') + file_name = self.conf('file_name') + trailer_name = self.conf('trailer_name') + nfo_name = self.conf('nfo_name') + separator = self.conf('separator') + + # Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader. + if not groups and self.statusInfoComplete(release_download): + self.tagRelease(release_download = release_download, tag = 'failed_rename') + + for group_identifier in groups: + + group = groups[group_identifier] + rename_files = {} + remove_files = [] + remove_releases = [] + + media_title = getTitle(group) + + # Add _UNKNOWN_ if no library item is connected + if not group.get('media') or not media_title: + self.tagRelease(group = group, tag = 'unknown') + continue + # Rename the files using the library data + else: + + # Media not in library, add it first + if not group['media'].get('_id'): + group['media'] = fireEvent('movie.add', params = { + 'identifier': group['identifier'], + 'profile_id': None + }, search_after = False, status = 'done', single = True) + else: + group['media'] = fireEvent('movie.update_info', identifier = group['media']['identifier'], single = True) + + if not group['media'] or not group['media'].get('_id'): + log.error('Could not rename, no library item to work with: %s', group_identifier) + continue + + media = group['media'] + media_title = getTitle(media) + + # Overwrite destination when set in category + destination = to_folder + category_label = '' + + if media.get('category_id'): + try: + category = db.get('id', media['category_id']) + category_label = category['label'] + + if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None': + destination = category['destination'] + log.debug('Setting category destination for "%s": %s' % (media_title, destination)) + else: + log.debug('No category destination found for "%s"' % media_title) + except: + log.error('Failed getting category label: %s', traceback.format_exc()) + + # Find subtitle for renaming + group['before_rename'] = [] + fireEvent('renamer.before', group) + + # Add extracted files to the before_rename list + if extr_files: + group['before_rename'].extend(extr_files) + + # Remove weird chars from movie name + movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title) + + # Put 'The' at the end + name_the = movie_name + if movie_name[:4].lower() == 'the ': + name_the = movie_name[4:] + ', The' + + replacements = { + 'ext': 'mkv', + 'namethe': name_the.strip(), + 'thename': movie_name.strip(), + 'year': media['info']['year'], + 'first': name_the[0].upper(), + 'quality': group['meta_data']['quality']['label'], + 'quality_type': group['meta_data']['quality_type'], + 'video': group['meta_data'].get('video'), + 'audio': group['meta_data'].get('audio'), + 'group': group['meta_data']['group'], + 'source': group['meta_data']['source'], + 'resolution_width': group['meta_data'].get('resolution_width'), + 'resolution_height': group['meta_data'].get('resolution_height'), + 'audio_channels': group['meta_data'].get('audio_channels'), + 'imdb_id': group['identifier'], + 'cd': '', + 'cd_nr': '', + 'mpaa': media['info'].get('mpaa', ''), + 'category': category_label, + } + + for file_type in group['files']: + + # Move nfo depending on settings + if file_type is 'nfo' and not self.conf('rename_nfo'): + log.debug('Skipping, renaming of %s disabled', file_type) + for current_file in group['files'][file_type]: + if self.conf('cleanup') and (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)): + remove_files.append(current_file) + continue + + # Subtitle extra + if file_type is 'subtitle_extra': + continue + + # Move other files + multiple = len(group['files'][file_type]) > 1 and not group['is_dvd'] + cd = 1 if multiple else 0 + + for current_file in sorted(list(group['files'][file_type])): + current_file = sp(current_file) + + # Original filename + replacements['original'] = os.path.splitext(os.path.basename(current_file))[0] + replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True) + + # Extension + replacements['ext'] = getExt(current_file) + + # cd # + replacements['cd'] = ' cd%d' % cd if multiple else '' + replacements['cd_nr'] = cd if multiple else '' + + # Naming + final_folder_name = self.doReplace(folder_name, replacements, folder = True) + final_file_name = self.doReplace(file_name, replacements) + replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)] + + # Meta naming + if file_type is 'trailer': + final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True) + elif file_type is 'nfo': + final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True) + + # Seperator replace + if separator: + final_file_name = final_file_name.replace(' ', separator) + + # Move DVD files (no structure renaming) + if group['is_dvd'] and file_type is 'movie': + found = False + for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']: + has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep) + if has_string >= 0: + structure_dir = current_file[has_string:].lstrip(os.path.sep) + rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir) + found = True + break + + if not found: + log.error('Could not determine dvd structure for: %s', current_file) + + # Do rename others + else: + if file_type is 'leftover': + if self.conf('move_leftover'): + rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file)) + elif file_type not in ['subtitle']: + rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name) + + # Check for extra subtitle files + if file_type is 'subtitle': + + remove_multiple = False + if len(group['files']['movie']) == 1: + remove_multiple = True + + sub_langs = group['subtitle_language'].get(current_file, []) + + # rename subtitles with or without language + sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) + rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) + + rename_extras = self.getRenameExtras( + extra_type = 'subtitle_extra', + replacements = replacements, + folder_name = folder_name, + file_name = file_name, + destination = destination, + group = group, + current_file = current_file, + remove_multiple = remove_multiple, + ) + + # Don't add language if multiple languages in 1 subtitle file + if len(sub_langs) == 1: + sub_name = sub_name.replace(replacements['ext'], '%s.%s' % (sub_langs[0], replacements['ext'])) + rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) + + rename_files = mergeDicts(rename_files, rename_extras) + + # Filename without cd etc + elif file_type is 'movie': + rename_extras = self.getRenameExtras( + extra_type = 'movie_extra', + replacements = replacements, + folder_name = folder_name, + file_name = file_name, + destination = destination, + group = group, + current_file = current_file + ) + rename_files = mergeDicts(rename_files, rename_extras) + + group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)] + group['destination_dir'] = os.path.join(destination, final_folder_name) + + if multiple: + cd += 1 + + # Before renaming, remove the lower quality files + remove_leftovers = True + + # Mark movie "done" once it's found the quality with the finish check + try: + if media.get('status') == 'active' and media.get('profile_id'): + profile = db.get('id', media['profile_id']) + if group['meta_data']['quality']['identifier'] in profile.get('qualities', []): + nr = profile['qualities'].index(group['meta_data']['quality']['identifier']) + finish = profile['finish'][nr] + if finish: + mdia = db.get('id', media['_id']) + mdia['status'] = 'done' + mdia['last_edit'] = int(time.time()) + db.update(mdia) + + except Exception as e: + log.error('Failed marking movie finished: %s', (traceback.format_exc())) + + # Go over current movie releases + for release in db.run('release', 'for_media', media['_id']): + + # When a release already exists + if release.get('status') == 'done': + + release_order = quality_order.index(release['quality']) + group_quality_order = quality_order.index(group['meta_data']['quality']['identifier']) + + # This is where CP removes older, lesser quality releases + if release_order > group_quality_order: + log.info('Removing lesser quality %s for %s.', (media_title, release.get('quality'))) + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + remove_files.append(release_file) + remove_releases.append(release) + # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc + elif release_order == group_quality_order: + log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality'))) + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + remove_files.append(release_file) + remove_releases.append(release) + + # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan + else: + log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality'))) + + # Add exists tag to the .ignore file + self.tagRelease(group = group, tag = 'exists') + + # Notify on rename fail + download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('identifier')) + fireEvent('movie.renaming.canceled', message = download_message, data = group) + remove_leftovers = False + + break + + elif release.get('status') in ['snatched', 'seeding']: + if release_download and release_download.get('release_id'): + if release_download['release_id'] == release['_id']: + if release_download['status'] == 'completed': + # Set the release to downloaded + fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) + elif release_download['status'] == 'seeding': + # Set the release to seeding + fireEvent('release.update_status', release['_id'], status = 'seeding', single = True) + + elif release.get('identifier') == group['meta_data']['quality']['identifier']: + # Set the release to downloaded + fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) + + # Remove leftover files + if not remove_leftovers: # Don't remove anything + break + + log.debug('Removing leftover files') + for current_file in group['files']['leftover']: + if self.conf('cleanup') and not self.conf('move_leftover') and \ + (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)): + remove_files.append(current_file) + + # Remove files + delete_folders = [] + for src in remove_files: + + if rename_files.get(src): + log.debug('Not removing file that will be renamed: %s', src) + continue + + log.info('Removing "%s"', src) + try: + src = sp(src) + if os.path.isfile(src): + os.remove(src) + + parent_dir = os.path.dirname(src) + if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \ + not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \ + not isSubFolder(parent_dir, base_folder): + + delete_folders.append(parent_dir) + + except: + log.error('Failed removing %s: %s', (src, traceback.format_exc())) + self.tagRelease(group = group, tag = 'failed_remove') + + # Delete leftover folder from older releases + for delete_folder in delete_folders: + try: + self.deleteEmptyFolder(delete_folder, show_error = False) + except Exception as e: + log.error('Failed to delete folder: %s %s', (e, traceback.format_exc())) + + # Rename all files marked + group['renamed_files'] = [] + failed_rename = False + for src in rename_files: + if rename_files[src]: + dst = rename_files[src] + log.info('Renaming "%s" to "%s"', (src, dst)) + + # Create dir + self.makeDir(os.path.dirname(dst)) + + try: + self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group)) + group['renamed_files'].append(dst) + except: + log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) + failed_rename = True + break + + # If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted. + if failed_rename: + self.tagRelease(group = group, tag = 'failed_rename') + continue + # If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt) + else: + self.untagRelease(group = group, tag = 'failed_rename') + + # Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent + if self.movieInFromFolder(media_folder) and self.downloadIsTorrent(release_download): + self.tagRelease(group = group, tag = 'renamed_already') + + # Remove matching releases + for release in remove_releases: + log.debug('Removing release %s', release.identifier) + try: + db.delete(release) + except: + log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc())) + + if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download): + if media_folder: + # Delete the movie folder + group_folder = media_folder + else: + # Delete the first empty subfolder in the tree relative to the 'from' folder + group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0])) + + try: + log.info('Deleting folder: %s', group_folder) + self.deleteEmptyFolder(group_folder) + except: + log.error('Failed removing %s: %s', (group_folder, traceback.format_exc())) + + # Notify on download, search for trailers etc + download_message = 'Downloaded %s (%s)' % (media_title, replacements['quality']) + try: + fireEvent('renamer.after', message = download_message, group = group, in_order = True) + except: + log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc()) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + self.renaming_started = False + + def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False): + if not group: group = {} + if not replacements: replacements = {} + + replacements = replacements.copy() + rename_files = {} + + def test(s): + return current_file[:-len(replacements['ext'])] in sp(s) + + for extra in set(filter(test, group['files'][extra_type])): + replacements['ext'] = getExt(extra) + + final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True) + final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) + rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name) + + return rename_files + + # This adds a file to ignore / tag a release so it is ignored later + def tagRelease(self, tag, group = None, release_download = None): + if not tag: + return + + text = """This file is from CouchPotato +It has marked this release as "%s" +This file hides the release from the renamer +Remove it if you want it to be renamed (again, or at least let it try again) +""" % tag + + tag_files = [] + + # Tag movie files if they are known + if isinstance(group, dict): + tag_files = [sorted(list(group['files']['movie']))[0]] + + elif isinstance(release_download, dict): + # Tag download_files if they are known + if release_download['files']: + tag_files = splitString(release_download['files'], '|') + + # Tag all files in release folder + else: + for root, folders, names in scandir.walk(release_download['folder']): + tag_files.extend([os.path.join(root, name) for name in names]) + + for filename in tag_files: + + # Dont tag .ignore files + if os.path.splitext(filename)[1] == '.ignore': + continue + + tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag) + if not os.path.isfile(tag_filename): + self.createFile(tag_filename, text) + + def untagRelease(self, group = None, release_download = None, tag = ''): + if not release_download: + return + + tag_files = [] + folder = None + + # Tag movie files if they are known + if isinstance(group, dict): + tag_files = [sorted(list(group['files']['movie']))[0]] + + folder = group['parentdir'] + if not group.get('dirname') or not os.path.isdir(folder): + return False + + elif isinstance(release_download, dict): + # Untag download_files if they are known + if release_download['files']: + tag_files = splitString(release_download['files'], '|') + + # Untag all files in release folder + else: + for root, folders, names in scandir.walk(release_download['folder']): + tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) + + folder = release_download['folder'] + if not os.path.isdir(folder): + return False + + if not folder: + return False + + # Find all .ignore files in folder + ignore_files = [] + for root, dirnames, filenames in scandir.walk(folder): + ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) + + # Match all found ignore files with the tag_files and delete if found + for tag_file in tag_files: + ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) + for filename in ignore_file: + try: + os.remove(filename) + except: + log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc())) + + def hastagRelease(self, release_download, tag = ''): + if not release_download: + return False + + folder = release_download['folder'] + if not os.path.isdir(folder): + return False + + tag_files = [] + ignore_files = [] + + # Find tag on download_files if they are known + if release_download['files']: + tag_files = splitString(release_download['files'], '|') + + # Find tag on all files in release folder + else: + for root, folders, names in scandir.walk(release_download['folder']): + tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) + + # Find all .ignore files in folder + for root, dirnames, filenames in scandir.walk(folder): + ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) + + # Match all found ignore files with the tag_files and return True found + for tag_file in tag_files: + ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) + if ignore_file: + return True + + return False + + def moveFile(self, old, dest, forcemove = False): + dest = ss(dest) + try: + if forcemove or self.conf('file_action') not in ['copy', 'link']: + try: + shutil.move(old, dest) + except: + if os.path.exists(dest): + log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc())) + os.unlink(old) + else: + raise + elif self.conf('file_action') == 'copy': + shutil.copy(old, dest) + elif self.conf('file_action') == 'link': + # First try to hardlink + try: + log.debug('Hardlinking file "%s" to "%s"...', (old, dest)) + link(old, dest) + except: + # Try to simlink next + log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s.', (old, dest, traceback.format_exc())) + shutil.copy(old, dest) + try: + symlink(dest, old + '.link') + os.unlink(old) + os.rename(old + '.link', old) + except: + log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc())) + + try: + os.chmod(dest, Env.getPermission('file')) + if os.name == 'nt' and self.conf('ntfs_permission'): + os.popen('icacls "' + dest + '"* /reset /T') + except: + log.error('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1))) + except: + log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc())) + raise + + return True + + def doReplace(self, string, replacements, remove_multiple = False, folder = False): + """ + replace confignames with the real thing + """ + + replacements = replacements.copy() + if remove_multiple: + replacements['cd'] = '' + replacements['cd_nr'] = '' + + replaced = toUnicode(string) + for x, r in replacements.items(): + if x in ['thename', 'namethe']: + continue + if r is not None: + replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) + else: + #If information is not available, we don't want the tag in the filename + replaced = replaced.replace('<' + x + '>', '') + + replaced = self.replaceDoubles(replaced.lstrip('. ')) + for x, r in replacements.items(): + if x in ['thename', 'namethe']: + replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) + replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced) + + sep = self.conf('foldersep') if folder else self.conf('separator') + return replaced.replace(' ', ' ' if not sep else sep) + + def replaceDoubles(self, string): + + replaces = [ + ('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), + ('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'), + ] + + for r in replaces: + reg, replace_with = r + string = re.sub(reg, replace_with, string) + + return string + + def deleteEmptyFolder(self, folder, show_error = True): + folder = sp(folder) + + loge = log.error if show_error else log.debug + for root, dirs, files in scandir.walk(folder): + + for dir_name in dirs: + full_path = os.path.join(root, dir_name) + if len(os.listdir(full_path)) == 0: + try: + os.rmdir(full_path) + except: + loge('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) + + try: + os.rmdir(folder) + except: + loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) + + def checkSnatched(self, fire_scan = True): + + if self.checking_snatched: + log.debug('Already checking snatched') + return False + + self.checking_snatched = True + + try: + db = get_db() + + rels = list(db.run('release', 'with_status', ['snatched', 'seeding', 'missing'])) + + if not rels: + #No releases found that need status checking + self.checking_snatched = False + return True + + # Collect all download information with the download IDs from the releases + download_ids = [] + no_status_support = [] + try: + for rel in rels: + if not rel.get('download_info'): continue + + if rel['download_info'].get('id') and rel['download_info'].get('downloader'): + download_ids.append(rel['download_info']) + + ds = rel['download_info'].get('status_support') + if ds is False or ds == 'False': + no_status_support.append(ss(rel['download_info'].get('downloader'))) + except: + log.error('Error getting download IDs from database') + self.checking_snatched = False + return False + + release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else [] + + if len(no_status_support) > 0: + log.debug('Download status functionality is not implemented for one of the active downloaders: %s', no_status_support) + + if not release_downloads: + if fire_scan: + self.scan() + + self.checking_snatched = False + return True + + scan_releases = [] + scan_required = False + + log.debug('Checking status snatched releases...') + + try: + for rel in rels: + movie_dict = db.get('id', rel.get('media_id')) + download_info = rel.get('download_info') + + if not isinstance(download_info, dict): + log.error('Faulty release found without any info, ignoring.') + fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) + continue + + # Check if download ID is available + if not download_info.get('id') or not download_info.get('downloader'): + log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name'])) + scan_required = True + + # Continue with next release + continue + + # Find release in downloaders + nzbname = self.createNzbName(rel['info'], movie_dict) + + found_release = False + for release_download in release_downloads: + found_release = False + if download_info.get('id'): + if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']: + log.debug('Found release by id: %s', release_download['id']) + found_release = True + break + else: + if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['identifier']: + log.debug('Found release by release name or imdb ID: %s', release_download['name']) + found_release = True + break + + if not found_release: + log.info('%s not found in downloaders', nzbname) + + #Check status if already missing and for how long, if > 1 week, set to ignored else to missing + if rel.get('status') == 'missing': + if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60: + fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) + else: + # Set the release to missing + fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True) + + # Continue with next release + continue + + # Log that we found the release + timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft'] + log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft)) + + # Check status of release + if release_download['status'] == 'busy': + # Set the release to snatched if it was missing before + fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) + + # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading + if self.movieInFromFolder(release_download['folder']): + self.tagRelease(release_download = release_download, tag = 'downloading') + + elif release_download['status'] == 'seeding': + #If linking setting is enabled, process release + if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download): + log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio'])) + + # Remove the downloading tag + self.untagRelease(release_download = release_download, tag = 'downloading') + + # Scan and set the torrent to paused if required + release_download.update({'pause': True, 'scan': True, 'process_complete': False}) + scan_releases.append(release_download) + else: + #let it seed + log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio'])) + + # Set the release to seeding + fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True) + + elif release_download['status'] == 'failed': + # Set the release to failed + fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True) + + fireEvent('download.remove_failed', release_download, single = True) + + if self.conf('next_on_failed'): + fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id')) + + elif release_download['status'] == 'completed': + log.info('Download of %s completed!', release_download['name']) + + #Make sure the downloader sent over a path to look in + if self.statusInfoComplete(release_download): + + # If the release has been seeding, process now the seeding is done + if rel.get('status') == 'seeding': + if self.conf('file_action') != 'move': + # Set the release to done as the movie has already been renamed + fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True) + + # Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': False, 'process_complete': True}) + scan_releases.append(release_download) + else: + # Scan and Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + scan_releases.append(release_download) + + else: + # Set the release to snatched if it was missing before + fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) + + # Remove the downloading tag + self.untagRelease(release_download = release_download, tag = 'downloading') + + # Scan and Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + scan_releases.append(release_download) + else: + scan_required = True + + except: + log.error('Failed checking for release in downloader: %s', traceback.format_exc()) + + # The following can either be done here, or inside the scanner if we pass it scan_items in one go + for release_download in scan_releases: + # Ask the renamer to scan the item + if release_download['scan']: + if release_download['pause'] and self.conf('file_action') == 'link': + fireEvent('download.pause', release_download = release_download, pause = True, single = True) + self.scan(release_download = release_download) + if release_download['pause'] and self.conf('file_action') == 'link': + fireEvent('download.pause', release_download = release_download, pause = False, single = True) + if release_download['process_complete']: + #First make sure the files were succesfully processed + if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'): + # Remove the seeding tag if it exists + self.untagRelease(release_download = release_download, tag = 'renamed_already') + # Ask the downloader to process the item + fireEvent('download.process_complete', release_download = release_download, single = True) + + if fire_scan and (scan_required or len(no_status_support) > 0): + self.scan() + + self.checking_snatched = False + return True + except: + log.error('Failed checking snatched: %s', traceback.format_exc()) + + self.checking_snatched = False + return False + + def extendReleaseDownload(self, release_download): + + rls = None + db = get_db() + + if release_download and release_download.get('id'): + try: + rls = db.get('release_download', '%s_%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc'] + except: + log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader'))) + + if rls: + media = db.get('id', rls['media_id']) + release_download.update({ + 'imdb_id': media['identifier'], + 'quality': rls['quality'], + 'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'), + 'release_id': rls['_id'], + }) + + return release_download + + def downloadIsTorrent(self, release_download): + return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet'] + + def fileIsAdded(self, src, group): + if not group or not group.get('before_rename'): + return False + return src in group['before_rename'] + + def statusInfoComplete(self, release_download): + return release_download.get('id') and release_download.get('downloader') and release_download.get('folder') + + def movieInFromFolder(self, media_folder): + return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder + + def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False): + if not files: files = [] + + # RegEx for finding rar files + archive_regex = '(?P^(?P(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)' + restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))' + extr_files = [] + + from_folder = sp(self.conf('from')) + + # Check input variables + if not folder: + folder = from_folder + + check_file_date = True + if media_folder: + check_file_date = False + + if not files: + for root, folders, names in scandir.walk(folder): + files.extend([sp(os.path.join(root, name)) for name in names]) + + # Find all archive files + archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)] + + #Extract all found archives + for archive in archives: + # Check if it has already been processed by CPS + if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}): + continue + + # Find all related archive files + archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)] + archive['files'].append(archive['file']) + + # Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute + if check_file_date: + files_too_new, time_string = self.checkFilesChanged(archive['files']) + + if files_too_new: + log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file']))) + continue + + log.info('Archive %s found. Extracting...', os.path.basename(archive['file'])) + try: + rar_handle = RarFile(archive['file']) + extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder)) + self.makeDir(extr_path) + for packedinfo in rar_handle.infolist(): + if not packedinfo.isdir and not os.path.isfile(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))): + log.debug('Extracting %s...', packedinfo.filename) + rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False) + extr_files.append(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))) + del rar_handle + except Exception as e: + log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc())) + continue + + # Delete the archive files + for filename in archive['files']: + if cleanup: + try: + os.remove(filename) + except Exception as e: + log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc())) + continue + files.remove(filename) + + # Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided) + if extr_files and folder != from_folder: + for leftoverfile in list(files): + move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder)) + + try: + self.makeDir(os.path.dirname(move_to)) + self.moveFile(leftoverfile, move_to, cleanup) + except Exception as e: + log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc())) + # As we probably tried to overwrite the nfo file, check if it exists and then remove the original + if os.path.isfile(move_to): + if cleanup: + log.info('Deleting left over file %s instead...', leftoverfile) + os.unlink(leftoverfile) + else: + continue + + files.remove(leftoverfile) + extr_files.append(move_to) + + if cleanup: + # Remove all left over folders + log.debug('Removing old movie folder %s...', media_folder) + self.deleteEmptyFolder(media_folder) + + media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder)) + folder = from_folder + + if extr_files: + files.extend(extr_files) + + # Cleanup files and folder if media_folder was not provided + if not media_folder: + files = [] + folder = None + + return folder, media_folder, files, extr_files + + +rename_options = { + 'pre': '<', + 'post': '>', + 'choices': { + 'ext': 'Extention (mkv)', + 'namethe': 'Moviename, The', + 'thename': 'The Moviename', + 'year': 'Year (2011)', + 'first': 'First letter (M)', + 'quality': 'Quality (720p)', + 'quality_type': '(HD) or (SD)', + 'video': 'Video (x264)', + 'audio': 'Audio (DTS)', + 'group': 'Releasegroup name', + 'source': 'Source media (Bluray)', + 'resolution_width': 'resolution width (1280)', + 'resolution_height': 'resolution height (720)', + 'audio_channels': 'audio channels (7.1)', + 'original': 'Original filename', + 'original_folder': 'Original foldername', + 'imdb_id': 'IMDB id (tt0123456)', + 'cd': 'CD number (cd1)', + 'cd_nr': 'Just the cd nr. (1)', + 'mpaa': 'MPAA Rating', + 'category': 'Category label', + }, +} + +config = [{ + 'name': 'renamer', + 'order': 40, + 'description': 'Move and rename your downloaded movies to your movie directory.', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'renamer', + 'label': 'Rename downloaded movies', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'from', + 'type': 'directory', + 'description': 'Folder where CP searches for movies.', + }, + { + 'name': 'to', + 'type': 'directory', + 'description': 'Default folder where the movies are moved to.', + }, + { + 'name': 'folder_name', + 'label': 'Folder naming', + 'description': 'Name of the folder. Keep empty for no folder.', + 'default': ' ()', + 'type': 'choice', + 'options': rename_options + }, + { + 'name': 'file_name', + 'label': 'File naming', + 'description': 'Name of the file', + 'default': '.', + 'type': 'choice', + 'options': rename_options + }, + { + 'name': 'unrar', + 'type': 'bool', + 'description': 'Extract rar files if found.', + 'default': False, + }, + { + 'name': 'cleanup', + 'type': 'bool', + 'description': 'Cleanup leftover files after successful rename.', + 'default': False, + }, + { + 'advanced': True, + 'name': 'run_every', + 'label': 'Run every', + 'default': 1, + 'type': 'int', + 'unit': 'min(s)', + 'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is completed or handle failed download if these options are enabled'), + }, + { + 'advanced': True, + 'name': 'force_every', + 'label': 'Force every', + 'default': 2, + 'type': 'int', + 'unit': 'hour(s)', + 'description': 'Forces the renamer to scan every X hours', + }, + { + 'advanced': True, + 'name': 'next_on_failed', + 'default': True, + 'type': 'bool', + 'description': 'Try the next best release for a movie after a download failed.', + }, + { + 'name': 'move_leftover', + 'type': 'bool', + 'description': 'Move all leftover file after renaming, to the movie folder.', + 'default': False, + 'advanced': True, + }, + { + 'advanced': True, + 'name': 'separator', + 'label': 'File-Separator', + 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), + }, + { + 'advanced': True, + 'name': 'foldersep', + 'label': 'Folder-Separator', + 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), + }, + { + 'name': 'file_action', + 'label': 'Torrent File Action', + 'default': 'link', + 'type': 'dropdown', + 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')], + 'description': ('Link, Copy or Move after download completed.', + 'Link first tries hard link, then sym link and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'), + 'advanced': True, + }, + { + 'advanced': True, + 'name': 'ntfs_permission', + 'label': 'NTFS Permission', + 'type': 'bool', + 'hidden': os.name != 'nt', + 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', + 'default': False, + }, + ], + }, { + 'tab': 'renamer', + 'name': 'meta_renamer', + 'label': 'Advanced renaming', + 'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.', + 'advanced': True, + 'options': [ + { + 'name': 'rename_nfo', + 'label': 'Rename .NFO', + 'description': 'Rename original .nfo file', + 'type': 'bool', + 'default': True, + }, + { + 'name': 'nfo_name', + 'label': 'NFO naming', + 'default': '.orig.', + 'type': 'choice', + 'options': rename_options + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/renamer/__init__.py b/couchpotato/core/plugins/renamer/__init__.py deleted file mode 100755 index e238f5e..0000000 --- a/couchpotato/core/plugins/renamer/__init__.py +++ /dev/null @@ -1,178 +0,0 @@ -from couchpotato.core.plugins.renamer.main import Renamer -import os - - -def start(): - return Renamer() - -rename_options = { - 'pre': '<', - 'post': '>', - 'choices': { - 'ext': 'Extention (mkv)', - 'namethe': 'Moviename, The', - 'thename': 'The Moviename', - 'year': 'Year (2011)', - 'first': 'First letter (M)', - 'quality': 'Quality (720p)', - 'quality_type': '(HD) or (SD)', - 'video': 'Video (x264)', - 'audio': 'Audio (DTS)', - 'group': 'Releasegroup name', - 'source': 'Source media (Bluray)', - 'resolution_width': 'resolution width (1280)', - 'resolution_height': 'resolution height (720)', - 'audio_channels': 'audio channels (7.1)', - 'original': 'Original filename', - 'original_folder': 'Original foldername', - 'imdb_id': 'IMDB id (tt0123456)', - 'cd': 'CD number (cd1)', - 'cd_nr': 'Just the cd nr. (1)', - 'mpaa': 'MPAA Rating', - 'category': 'Category label', - }, -} - -config = [{ - 'name': 'renamer', - 'order': 40, - 'description': 'Move and rename your downloaded movies to your movie directory.', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'renamer', - 'label': 'Rename downloaded movies', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'from', - 'type': 'directory', - 'description': 'Folder where CP searches for movies.', - }, - { - 'name': 'to', - 'type': 'directory', - 'description': 'Default folder where the movies are moved to.', - }, - { - 'name': 'folder_name', - 'label': 'Folder naming', - 'description': 'Name of the folder. Keep empty for no folder.', - 'default': ' ()', - 'type': 'choice', - 'options': rename_options - }, - { - 'name': 'file_name', - 'label': 'File naming', - 'description': 'Name of the file', - 'default': '.', - 'type': 'choice', - 'options': rename_options - }, - { - 'name': 'unrar', - 'type': 'bool', - 'description': 'Extract rar files if found.', - 'default': False, - }, - { - 'name': 'cleanup', - 'type': 'bool', - 'description': 'Cleanup leftover files after successful rename.', - 'default': False, - }, - { - 'advanced': True, - 'name': 'run_every', - 'label': 'Run every', - 'default': 1, - 'type': 'int', - 'unit': 'min(s)', - 'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is completed or handle failed download if these options are enabled'), - }, - { - 'advanced': True, - 'name': 'force_every', - 'label': 'Force every', - 'default': 2, - 'type': 'int', - 'unit': 'hour(s)', - 'description': 'Forces the renamer to scan every X hours', - }, - { - 'advanced': True, - 'name': 'next_on_failed', - 'default': True, - 'type': 'bool', - 'description': 'Try the next best release for a movie after a download failed.', - }, - { - 'name': 'move_leftover', - 'type': 'bool', - 'description': 'Move all leftover file after renaming, to the movie folder.', - 'default': False, - 'advanced': True, - }, - { - 'advanced': True, - 'name': 'separator', - 'label': 'File-Separator', - 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), - }, - { - 'advanced': True, - 'name': 'foldersep', - 'label': 'Folder-Separator', - 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), - }, - { - 'name': 'file_action', - 'label': 'Torrent File Action', - 'default': 'link', - 'type': 'dropdown', - 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')], - 'description': ('Link, Copy or Move after download completed.', - 'Link first tries hard link, then sym link and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'), - 'advanced': True, - }, - { - 'advanced': True, - 'name': 'ntfs_permission', - 'label': 'NTFS Permission', - 'type': 'bool', - 'hidden': os.name != 'nt', - 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', - 'default': False, - }, - ], - }, { - 'tab': 'renamer', - 'name': 'meta_renamer', - 'label': 'Advanced renaming', - 'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.', - 'advanced': True, - 'options': [ - { - 'name': 'rename_nfo', - 'label': 'Rename .NFO', - 'description': 'Rename original .nfo file', - 'type': 'bool', - 'default': True, - }, - { - 'name': 'nfo_name', - 'label': 'NFO naming', - 'default': '.orig.', - 'type': 'choice', - 'options': rename_options - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer/main.py deleted file mode 100755 index 8d14550..0000000 --- a/couchpotato/core/plugins/renamer/main.py +++ /dev/null @@ -1,1189 +0,0 @@ -from couchpotato import get_db -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import toUnicode, ss, sp -from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ - getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from scandir import scandir -from unrar2 import RarFile -import errno -import fnmatch -import os -import re -import shutil -import time -import traceback -import six -from six.moves import filter - -log = CPLog(__name__) - - -class Renamer(Plugin): - - renaming_started = False - checking_snatched = False - - def __init__(self): - addApiView('renamer.scan', self.scanView, docs = { - 'desc': 'For the renamer to check for new files to rename in a folder', - 'params': { - 'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'}, - 'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'}, - 'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'}, - 'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'}, - 'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'}, - 'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'}, - 'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''}, - }, - }) - - addEvent('renamer.scan', self.scan) - addEvent('renamer.check_snatched', self.checkSnatched) - - addEvent('app.load', self.scan) - addEvent('app.load', self.setCrons) - - # Enable / disable interval - addEvent('setting.save.renamer.enabled.after', self.setCrons) - addEvent('setting.save.renamer.run_every.after', self.setCrons) - addEvent('setting.save.renamer.force_every.after', self.setCrons) - - def setCrons(self): - - fireEvent('schedule.remove', 'renamer.check_snatched') - if self.isEnabled() and self.conf('run_every') > 0: - fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True) - - fireEvent('schedule.remove', 'renamer.check_snatched_forced') - if self.isEnabled() and self.conf('force_every') > 0: - fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True) - - return True - - def scanView(self, **kwargs): - - async = tryInt(kwargs.get('async', 0)) - base_folder = kwargs.get('base_folder') - media_folder = sp(kwargs.get('media_folder')) - - # Backwards compatibility, to be removed after a few versions :) - if not media_folder: - media_folder = sp(kwargs.get('movie_folder')) - - downloader = kwargs.get('downloader') - download_id = kwargs.get('download_id') - files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')]) - status = kwargs.get('status', 'completed') - - release_download = None - if not base_folder and media_folder: - release_download = {'folder': media_folder} - release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {}) - - fire_handle = fireEvent if not async else fireEventAsync - - fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download) - - return { - 'success': True - } - - def scan(self, base_folder = None, release_download = None): - if not release_download: release_download = {} - - if self.isDisabled(): - return - - if self.renaming_started is True: - log.info('Renamer is already running, if you see this often, check the logs above for errors.') - return - - if not base_folder: - base_folder = self.conf('from') - - from_folder = sp(self.conf('from')) - to_folder = sp(self.conf('to')) - - # Get media folder to process - media_folder = release_download.get('folder') - - # Quality order for calculation quality priority - quality_order = fireEvent('quality.order', single = True) - - # Get all folders that should not be processed - no_process = [to_folder] - cat_list = fireEvent('category.all', single = True) or [] - no_process.extend([item['destination'] for item in cat_list]) - try: - if Env.setting('library', section = 'manage').strip(): - no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')]) - except: - pass - - # Check to see if the no_process folders are inside the "from" folder. - if not os.path.isdir(base_folder) or not os.path.isdir(to_folder): - log.error('Both the "To" and "From" folder have to exist.') - return - else: - for item in no_process: - if isSubFolder(item, base_folder): - log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.') - return - - # Check to see if the no_process folders are inside the provided media_folder - if media_folder and not os.path.isdir(media_folder): - log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder) - - # Update to the from folder - if len(splitString(release_download.get('files'), '|')) == 1: - new_media_folder = from_folder - else: - new_media_folder = os.path.join(from_folder, os.path.basename(media_folder)) - - if not os.path.isdir(new_media_folder): - log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder) - return - - # Update the files - new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in splitString(release_download.get('files'), '|')] - if new_files and not os.path.isfile(new_files[0]): - log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder) - return - - # Update release_download info to the from folder - log.debug('Release %s found in the \'from\' folder.', media_folder) - release_download['folder'] = new_media_folder - release_download['files'] = '|'.join(new_files) - media_folder = new_media_folder - - if media_folder: - for item in no_process: - if isSubFolder(item, media_folder): - log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.') - return - - # Make sure a checkSnatched marked all downloads/seeds as such - if not release_download and self.conf('run_every') > 0: - self.checkSnatched(fire_scan = False) - - self.renaming_started = True - - # make sure the media folder name is included in the search - folder = None - files = [] - if media_folder: - log.info('Scanning media folder %s...', media_folder) - folder = os.path.dirname(media_folder) - - if release_download.get('files', ''): - files = splitString(release_download['files'], '|') - - # If there is only one file in the torrent, the downloader did not create a subfolder - if len(files) == 1: - folder = media_folder - else: - # Get all files from the specified folder - try: - for root, folders, names in scandir.walk(media_folder): - files.extend([sp(os.path.join(root, name)) for name in names]) - except: - log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc())) - - db = get_db() - - # Extend the download info with info stored in the downloaded release - release_download = self.extendReleaseDownload(release_download) - - # Unpack any archives - extr_files = None - if self.conf('unrar'): - folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files, - cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download)) - - groups = fireEvent('scanner.scan', folder = folder if folder else base_folder, - files = files, release_download = release_download, return_ignored = False, single = True) or [] - - folder_name = self.conf('folder_name') - file_name = self.conf('file_name') - trailer_name = self.conf('trailer_name') - nfo_name = self.conf('nfo_name') - separator = self.conf('separator') - - # Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader. - if not groups and self.statusInfoComplete(release_download): - self.tagRelease(release_download = release_download, tag = 'failed_rename') - - for group_identifier in groups: - - group = groups[group_identifier] - rename_files = {} - remove_files = [] - remove_releases = [] - - media_title = getTitle(group) - - # Add _UNKNOWN_ if no library item is connected - if not group.get('media') or not media_title: - self.tagRelease(group = group, tag = 'unknown') - continue - # Rename the files using the library data - else: - - # Media not in library, add it first - if not group['media'].get('_id'): - group['media'] = fireEvent('movie.add', params = { - 'identifier': group['identifier'], - 'profile_id': None - }, search_after = False, status = 'done', single = True) - else: - group['media'] = fireEvent('movie.update_info', identifier = group['media']['identifier'], single = True) - - if not group['media'] or not group['media'].get('_id'): - log.error('Could not rename, no library item to work with: %s', group_identifier) - continue - - media = group['media'] - media_title = getTitle(media) - - # Overwrite destination when set in category - destination = to_folder - category_label = '' - - if media.get('category_id'): - try: - category = db.get('id', media['category_id']) - category_label = category['label'] - - if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None': - destination = category['destination'] - log.debug('Setting category destination for "%s": %s' % (media_title, destination)) - else: - log.debug('No category destination found for "%s"' % media_title) - except: - log.error('Failed getting category label: %s', traceback.format_exc()) - - # Find subtitle for renaming - group['before_rename'] = [] - fireEvent('renamer.before', group) - - # Add extracted files to the before_rename list - if extr_files: - group['before_rename'].extend(extr_files) - - # Remove weird chars from movie name - movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title) - - # Put 'The' at the end - name_the = movie_name - if movie_name[:4].lower() == 'the ': - name_the = movie_name[4:] + ', The' - - replacements = { - 'ext': 'mkv', - 'namethe': name_the.strip(), - 'thename': movie_name.strip(), - 'year': media['info']['year'], - 'first': name_the[0].upper(), - 'quality': group['meta_data']['quality']['label'], - 'quality_type': group['meta_data']['quality_type'], - 'video': group['meta_data'].get('video'), - 'audio': group['meta_data'].get('audio'), - 'group': group['meta_data']['group'], - 'source': group['meta_data']['source'], - 'resolution_width': group['meta_data'].get('resolution_width'), - 'resolution_height': group['meta_data'].get('resolution_height'), - 'audio_channels': group['meta_data'].get('audio_channels'), - 'imdb_id': group['identifier'], - 'cd': '', - 'cd_nr': '', - 'mpaa': media['info'].get('mpaa', ''), - 'category': category_label, - } - - for file_type in group['files']: - - # Move nfo depending on settings - if file_type is 'nfo' and not self.conf('rename_nfo'): - log.debug('Skipping, renaming of %s disabled', file_type) - for current_file in group['files'][file_type]: - if self.conf('cleanup') and (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)): - remove_files.append(current_file) - continue - - # Subtitle extra - if file_type is 'subtitle_extra': - continue - - # Move other files - multiple = len(group['files'][file_type]) > 1 and not group['is_dvd'] - cd = 1 if multiple else 0 - - for current_file in sorted(list(group['files'][file_type])): - current_file = sp(current_file) - - # Original filename - replacements['original'] = os.path.splitext(os.path.basename(current_file))[0] - replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True) - - # Extension - replacements['ext'] = getExt(current_file) - - # cd # - replacements['cd'] = ' cd%d' % cd if multiple else '' - replacements['cd_nr'] = cd if multiple else '' - - # Naming - final_folder_name = self.doReplace(folder_name, replacements, folder = True) - final_file_name = self.doReplace(file_name, replacements) - replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)] - - # Meta naming - if file_type is 'trailer': - final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True) - elif file_type is 'nfo': - final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True) - - # Seperator replace - if separator: - final_file_name = final_file_name.replace(' ', separator) - - # Move DVD files (no structure renaming) - if group['is_dvd'] and file_type is 'movie': - found = False - for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']: - has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep) - if has_string >= 0: - structure_dir = current_file[has_string:].lstrip(os.path.sep) - rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir) - found = True - break - - if not found: - log.error('Could not determine dvd structure for: %s', current_file) - - # Do rename others - else: - if file_type is 'leftover': - if self.conf('move_leftover'): - rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file)) - elif file_type not in ['subtitle']: - rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name) - - # Check for extra subtitle files - if file_type is 'subtitle': - - remove_multiple = False - if len(group['files']['movie']) == 1: - remove_multiple = True - - sub_langs = group['subtitle_language'].get(current_file, []) - - # rename subtitles with or without language - sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) - rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) - - rename_extras = self.getRenameExtras( - extra_type = 'subtitle_extra', - replacements = replacements, - folder_name = folder_name, - file_name = file_name, - destination = destination, - group = group, - current_file = current_file, - remove_multiple = remove_multiple, - ) - - # Don't add language if multiple languages in 1 subtitle file - if len(sub_langs) == 1: - sub_name = sub_name.replace(replacements['ext'], '%s.%s' % (sub_langs[0], replacements['ext'])) - rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) - - rename_files = mergeDicts(rename_files, rename_extras) - - # Filename without cd etc - elif file_type is 'movie': - rename_extras = self.getRenameExtras( - extra_type = 'movie_extra', - replacements = replacements, - folder_name = folder_name, - file_name = file_name, - destination = destination, - group = group, - current_file = current_file - ) - rename_files = mergeDicts(rename_files, rename_extras) - - group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)] - group['destination_dir'] = os.path.join(destination, final_folder_name) - - if multiple: - cd += 1 - - # Before renaming, remove the lower quality files - remove_leftovers = True - - # Mark movie "done" once it's found the quality with the finish check - try: - if media.get('status') == 'active' and media.get('profile_id'): - profile = db.get('id', media['profile_id']) - if group['meta_data']['quality']['identifier'] in profile.get('qualities', []): - nr = profile['qualities'].index(group['meta_data']['quality']['identifier']) - finish = profile['finish'][nr] - if finish: - mdia = db.get('id', media['_id']) - mdia['status'] = 'done' - mdia['last_edit'] = int(time.time()) - db.update(mdia) - - except Exception as e: - log.error('Failed marking movie finished: %s', (traceback.format_exc())) - - # Go over current movie releases - for release in db.run('release', 'for_media', media['_id']): - - # When a release already exists - if release.get('status') == 'done': - - release_order = quality_order.index(release['quality']) - group_quality_order = quality_order.index(group['meta_data']['quality']['identifier']) - - # This is where CP removes older, lesser quality releases - if release_order > group_quality_order: - log.info('Removing lesser quality %s for %s.', (media_title, release.get('quality'))) - for file_type in release.get('files', {}): - for release_file in release['files'][file_type]: - remove_files.append(release_file) - remove_releases.append(release) - # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc - elif release_order == group_quality_order: - log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality'))) - for file_type in release.get('files', {}): - for release_file in release['files'][file_type]: - remove_files.append(release_file) - remove_releases.append(release) - - # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan - else: - log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality'))) - - # Add exists tag to the .ignore file - self.tagRelease(group = group, tag = 'exists') - - # Notify on rename fail - download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('identifier')) - fireEvent('movie.renaming.canceled', message = download_message, data = group) - remove_leftovers = False - - break - - elif release.get('status') in ['snatched', 'seeding']: - if release_download and release_download.get('release_id'): - if release_download['release_id'] == release['_id']: - if release_download['status'] == 'completed': - # Set the release to downloaded - fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) - elif release_download['status'] == 'seeding': - # Set the release to seeding - fireEvent('release.update_status', release['_id'], status = 'seeding', single = True) - - elif release.get('identifier') == group['meta_data']['quality']['identifier']: - # Set the release to downloaded - fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) - - # Remove leftover files - if not remove_leftovers: # Don't remove anything - break - - log.debug('Removing leftover files') - for current_file in group['files']['leftover']: - if self.conf('cleanup') and not self.conf('move_leftover') and \ - (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)): - remove_files.append(current_file) - - # Remove files - delete_folders = [] - for src in remove_files: - - if rename_files.get(src): - log.debug('Not removing file that will be renamed: %s', src) - continue - - log.info('Removing "%s"', src) - try: - src = sp(src) - if os.path.isfile(src): - os.remove(src) - - parent_dir = os.path.dirname(src) - if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \ - not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \ - not isSubFolder(parent_dir, base_folder): - - delete_folders.append(parent_dir) - - except: - log.error('Failed removing %s: %s', (src, traceback.format_exc())) - self.tagRelease(group = group, tag = 'failed_remove') - - # Delete leftover folder from older releases - for delete_folder in delete_folders: - try: - self.deleteEmptyFolder(delete_folder, show_error = False) - except Exception as e: - log.error('Failed to delete folder: %s %s', (e, traceback.format_exc())) - - # Rename all files marked - group['renamed_files'] = [] - failed_rename = False - for src in rename_files: - if rename_files[src]: - dst = rename_files[src] - log.info('Renaming "%s" to "%s"', (src, dst)) - - # Create dir - self.makeDir(os.path.dirname(dst)) - - try: - self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group)) - group['renamed_files'].append(dst) - except: - log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) - failed_rename = True - break - - # If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted. - if failed_rename: - self.tagRelease(group = group, tag = 'failed_rename') - continue - # If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt) - else: - self.untagRelease(group = group, tag = 'failed_rename') - - # Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent - if self.movieInFromFolder(media_folder) and self.downloadIsTorrent(release_download): - self.tagRelease(group = group, tag = 'renamed_already') - - # Remove matching releases - for release in remove_releases: - log.debug('Removing release %s', release.identifier) - try: - db.delete(release) - except: - log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc())) - - if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download): - if media_folder: - # Delete the movie folder - group_folder = media_folder - else: - # Delete the first empty subfolder in the tree relative to the 'from' folder - group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0])) - - try: - log.info('Deleting folder: %s', group_folder) - self.deleteEmptyFolder(group_folder) - except: - log.error('Failed removing %s: %s', (group_folder, traceback.format_exc())) - - # Notify on download, search for trailers etc - download_message = 'Downloaded %s (%s)' % (media_title, replacements['quality']) - try: - fireEvent('renamer.after', message = download_message, group = group, in_order = True) - except: - log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc()) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - self.renaming_started = False - - def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False): - if not group: group = {} - if not replacements: replacements = {} - - replacements = replacements.copy() - rename_files = {} - - def test(s): - return current_file[:-len(replacements['ext'])] in sp(s) - - for extra in set(filter(test, group['files'][extra_type])): - replacements['ext'] = getExt(extra) - - final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True) - final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) - rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name) - - return rename_files - - # This adds a file to ignore / tag a release so it is ignored later - def tagRelease(self, tag, group = None, release_download = None): - if not tag: - return - - text = """This file is from CouchPotato -It has marked this release as "%s" -This file hides the release from the renamer -Remove it if you want it to be renamed (again, or at least let it try again) -""" % tag - - tag_files = [] - - # Tag movie files if they are known - if isinstance(group, dict): - tag_files = [sorted(list(group['files']['movie']))[0]] - - elif isinstance(release_download, dict): - # Tag download_files if they are known - if release_download['files']: - tag_files = splitString(release_download['files'], '|') - - # Tag all files in release folder - else: - for root, folders, names in scandir.walk(release_download['folder']): - tag_files.extend([os.path.join(root, name) for name in names]) - - for filename in tag_files: - - # Dont tag .ignore files - if os.path.splitext(filename)[1] == '.ignore': - continue - - tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag) - if not os.path.isfile(tag_filename): - self.createFile(tag_filename, text) - - def untagRelease(self, group = None, release_download = None, tag = ''): - if not release_download: - return - - tag_files = [] - folder = None - - # Tag movie files if they are known - if isinstance(group, dict): - tag_files = [sorted(list(group['files']['movie']))[0]] - - folder = group['parentdir'] - if not group.get('dirname') or not os.path.isdir(folder): - return False - - elif isinstance(release_download, dict): - # Untag download_files if they are known - if release_download['files']: - tag_files = splitString(release_download['files'], '|') - - # Untag all files in release folder - else: - for root, folders, names in scandir.walk(release_download['folder']): - tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) - - folder = release_download['folder'] - if not os.path.isdir(folder): - return False - - if not folder: - return False - - # Find all .ignore files in folder - ignore_files = [] - for root, dirnames, filenames in scandir.walk(folder): - ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) - - # Match all found ignore files with the tag_files and delete if found - for tag_file in tag_files: - ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) - for filename in ignore_file: - try: - os.remove(filename) - except: - log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc())) - - def hastagRelease(self, release_download, tag = ''): - if not release_download: - return False - - folder = release_download['folder'] - if not os.path.isdir(folder): - return False - - tag_files = [] - ignore_files = [] - - # Find tag on download_files if they are known - if release_download['files']: - tag_files = splitString(release_download['files'], '|') - - # Find tag on all files in release folder - else: - for root, folders, names in scandir.walk(release_download['folder']): - tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) - - # Find all .ignore files in folder - for root, dirnames, filenames in scandir.walk(folder): - ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) - - # Match all found ignore files with the tag_files and return True found - for tag_file in tag_files: - ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) - if ignore_file: - return True - - return False - - def moveFile(self, old, dest, forcemove = False): - dest = ss(dest) - try: - if forcemove or self.conf('file_action') not in ['copy', 'link']: - try: - shutil.move(old, dest) - except: - if os.path.exists(dest): - log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc())) - os.unlink(old) - else: - raise - elif self.conf('file_action') == 'copy': - shutil.copy(old, dest) - elif self.conf('file_action') == 'link': - # First try to hardlink - try: - log.debug('Hardlinking file "%s" to "%s"...', (old, dest)) - link(old, dest) - except: - # Try to simlink next - log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s.', (old, dest, traceback.format_exc())) - shutil.copy(old, dest) - try: - symlink(dest, old + '.link') - os.unlink(old) - os.rename(old + '.link', old) - except: - log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc())) - - try: - os.chmod(dest, Env.getPermission('file')) - if os.name == 'nt' and self.conf('ntfs_permission'): - os.popen('icacls "' + dest + '"* /reset /T') - except: - log.error('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1))) - except: - log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc())) - raise - - return True - - def doReplace(self, string, replacements, remove_multiple = False, folder = False): - """ - replace confignames with the real thing - """ - - replacements = replacements.copy() - if remove_multiple: - replacements['cd'] = '' - replacements['cd_nr'] = '' - - replaced = toUnicode(string) - for x, r in replacements.items(): - if x in ['thename', 'namethe']: - continue - if r is not None: - replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) - else: - #If information is not available, we don't want the tag in the filename - replaced = replaced.replace('<' + x + '>', '') - - replaced = self.replaceDoubles(replaced.lstrip('. ')) - for x, r in replacements.items(): - if x in ['thename', 'namethe']: - replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) - replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced) - - sep = self.conf('foldersep') if folder else self.conf('separator') - return replaced.replace(' ', ' ' if not sep else sep) - - def replaceDoubles(self, string): - - replaces = [ - ('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), - ('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'), - ] - - for r in replaces: - reg, replace_with = r - string = re.sub(reg, replace_with, string) - - return string - - def deleteEmptyFolder(self, folder, show_error = True): - folder = sp(folder) - - loge = log.error if show_error else log.debug - for root, dirs, files in scandir.walk(folder): - - for dir_name in dirs: - full_path = os.path.join(root, dir_name) - if len(os.listdir(full_path)) == 0: - try: - os.rmdir(full_path) - except: - loge('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) - - try: - os.rmdir(folder) - except: - loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) - - def checkSnatched(self, fire_scan = True): - - if self.checking_snatched: - log.debug('Already checking snatched') - return False - - self.checking_snatched = True - - try: - db = get_db() - - rels = list(db.run('release', 'with_status', ['snatched', 'seeding', 'missing'])) - - if not rels: - #No releases found that need status checking - self.checking_snatched = False - return True - - # Collect all download information with the download IDs from the releases - download_ids = [] - no_status_support = [] - try: - for rel in rels: - if not rel.get('download_info'): continue - - if rel['download_info'].get('id') and rel['download_info'].get('downloader'): - download_ids.append(rel['download_info']) - - ds = rel['download_info'].get('status_support') - if ds is False or ds == 'False': - no_status_support.append(ss(rel['download_info'].get('downloader'))) - except: - log.error('Error getting download IDs from database') - self.checking_snatched = False - return False - - release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else [] - - if len(no_status_support) > 0: - log.debug('Download status functionality is not implemented for one of the active downloaders: %s', no_status_support) - - if not release_downloads: - if fire_scan: - self.scan() - - self.checking_snatched = False - return True - - scan_releases = [] - scan_required = False - - log.debug('Checking status snatched releases...') - - try: - for rel in rels: - movie_dict = db.get('id', rel.get('media_id')) - download_info = rel.get('download_info') - - if not isinstance(download_info, dict): - log.error('Faulty release found without any info, ignoring.') - fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) - continue - - # Check if download ID is available - if not download_info.get('id') or not download_info.get('downloader'): - log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name'])) - scan_required = True - - # Continue with next release - continue - - # Find release in downloaders - nzbname = self.createNzbName(rel['info'], movie_dict) - - found_release = False - for release_download in release_downloads: - found_release = False - if download_info.get('id'): - if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']: - log.debug('Found release by id: %s', release_download['id']) - found_release = True - break - else: - if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['identifier']: - log.debug('Found release by release name or imdb ID: %s', release_download['name']) - found_release = True - break - - if not found_release: - log.info('%s not found in downloaders', nzbname) - - #Check status if already missing and for how long, if > 1 week, set to ignored else to missing - if rel.get('status') == 'missing': - if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60: - fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) - else: - # Set the release to missing - fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True) - - # Continue with next release - continue - - # Log that we found the release - timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft'] - log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft)) - - # Check status of release - if release_download['status'] == 'busy': - # Set the release to snatched if it was missing before - fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) - - # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading - if self.movieInFromFolder(release_download['folder']): - self.tagRelease(release_download = release_download, tag = 'downloading') - - elif release_download['status'] == 'seeding': - #If linking setting is enabled, process release - if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download): - log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio'])) - - # Remove the downloading tag - self.untagRelease(release_download = release_download, tag = 'downloading') - - # Scan and set the torrent to paused if required - release_download.update({'pause': True, 'scan': True, 'process_complete': False}) - scan_releases.append(release_download) - else: - #let it seed - log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio'])) - - # Set the release to seeding - fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True) - - elif release_download['status'] == 'failed': - # Set the release to failed - fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True) - - fireEvent('download.remove_failed', release_download, single = True) - - if self.conf('next_on_failed'): - fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id')) - - elif release_download['status'] == 'completed': - log.info('Download of %s completed!', release_download['name']) - - #Make sure the downloader sent over a path to look in - if self.statusInfoComplete(release_download): - - # If the release has been seeding, process now the seeding is done - if rel.get('status') == 'seeding': - if self.conf('file_action') != 'move': - # Set the release to done as the movie has already been renamed - fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True) - - # Allow the downloader to clean-up - release_download.update({'pause': False, 'scan': False, 'process_complete': True}) - scan_releases.append(release_download) - else: - # Scan and Allow the downloader to clean-up - release_download.update({'pause': False, 'scan': True, 'process_complete': True}) - scan_releases.append(release_download) - - else: - # Set the release to snatched if it was missing before - fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) - - # Remove the downloading tag - self.untagRelease(release_download = release_download, tag = 'downloading') - - # Scan and Allow the downloader to clean-up - release_download.update({'pause': False, 'scan': True, 'process_complete': True}) - scan_releases.append(release_download) - else: - scan_required = True - - except: - log.error('Failed checking for release in downloader: %s', traceback.format_exc()) - - # The following can either be done here, or inside the scanner if we pass it scan_items in one go - for release_download in scan_releases: - # Ask the renamer to scan the item - if release_download['scan']: - if release_download['pause'] and self.conf('file_action') == 'link': - fireEvent('download.pause', release_download = release_download, pause = True, single = True) - self.scan(release_download = release_download) - if release_download['pause'] and self.conf('file_action') == 'link': - fireEvent('download.pause', release_download = release_download, pause = False, single = True) - if release_download['process_complete']: - #First make sure the files were succesfully processed - if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'): - # Remove the seeding tag if it exists - self.untagRelease(release_download = release_download, tag = 'renamed_already') - # Ask the downloader to process the item - fireEvent('download.process_complete', release_download = release_download, single = True) - - if fire_scan and (scan_required or len(no_status_support) > 0): - self.scan() - - self.checking_snatched = False - return True - except: - log.error('Failed checking snatched: %s', traceback.format_exc()) - - self.checking_snatched = False - return False - - def extendReleaseDownload(self, release_download): - - rls = None - db = get_db() - - if release_download and release_download.get('id'): - try: - rls = db.get('release_download', '%s_%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc'] - except: - log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader'))) - - if rls: - media = db.get('id', rls['media_id']) - release_download.update({ - 'imdb_id': media['identifier'], - 'quality': rls['quality'], - 'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'), - 'release_id': rls['_id'], - }) - - return release_download - - def downloadIsTorrent(self, release_download): - return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet'] - - def fileIsAdded(self, src, group): - if not group or not group.get('before_rename'): - return False - return src in group['before_rename'] - - def statusInfoComplete(self, release_download): - return release_download.get('id') and release_download.get('downloader') and release_download.get('folder') - - def movieInFromFolder(self, media_folder): - return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder - - def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False): - if not files: files = [] - - # RegEx for finding rar files - archive_regex = '(?P^(?P(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)' - restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))' - extr_files = [] - - from_folder = sp(self.conf('from')) - - # Check input variables - if not folder: - folder = from_folder - - check_file_date = True - if media_folder: - check_file_date = False - - if not files: - for root, folders, names in scandir.walk(folder): - files.extend([sp(os.path.join(root, name)) for name in names]) - - # Find all archive files - archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)] - - #Extract all found archives - for archive in archives: - # Check if it has already been processed by CPS - if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}): - continue - - # Find all related archive files - archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)] - archive['files'].append(archive['file']) - - # Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute - if check_file_date: - files_too_new, time_string = self.checkFilesChanged(archive['files']) - - if files_too_new: - log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file']))) - continue - - log.info('Archive %s found. Extracting...', os.path.basename(archive['file'])) - try: - rar_handle = RarFile(archive['file']) - extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder)) - self.makeDir(extr_path) - for packedinfo in rar_handle.infolist(): - if not packedinfo.isdir and not os.path.isfile(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))): - log.debug('Extracting %s...', packedinfo.filename) - rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False) - extr_files.append(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))) - del rar_handle - except Exception as e: - log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc())) - continue - - # Delete the archive files - for filename in archive['files']: - if cleanup: - try: - os.remove(filename) - except Exception as e: - log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc())) - continue - files.remove(filename) - - # Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided) - if extr_files and folder != from_folder: - for leftoverfile in list(files): - move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder)) - - try: - self.makeDir(os.path.dirname(move_to)) - self.moveFile(leftoverfile, move_to, cleanup) - except Exception as e: - log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc())) - # As we probably tried to overwrite the nfo file, check if it exists and then remove the original - if os.path.isfile(move_to): - if cleanup: - log.info('Deleting left over file %s instead...', leftoverfile) - os.unlink(leftoverfile) - else: - continue - - files.remove(leftoverfile) - extr_files.append(move_to) - - if cleanup: - # Remove all left over folders - log.debug('Removing old movie folder %s...', media_folder) - self.deleteEmptyFolder(media_folder) - - media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder)) - folder = from_folder - - if extr_files: - files.extend(extr_files) - - # Cleanup files and folder if media_folder was not provided - if not media_folder: - files = [] - folder = None - - return folder, media_folder, files, extr_files diff --git a/couchpotato/core/plugins/scanner.py b/couchpotato/core/plugins/scanner.py new file mode 100644 index 0000000..3ed5079 --- /dev/null +++ b/couchpotato/core/plugins/scanner.py @@ -0,0 +1,868 @@ +from couchpotato import get_db +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp +from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ + splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from enzyme.exceptions import NoParserError, ParseError +from guessit import guess_movie_info +from scandir import scandir +from subliminal.videos import Video +import enzyme +import os +import re +import threading +import time +import traceback +from six.moves import filter, map, zip + +log = CPLog(__name__) + + +class Scanner(Plugin): + + ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', + '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo', + 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files + ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate'] + extensions = { + 'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'], + 'movie_extra': ['mds'], + 'dvd': ['vts_*', 'vob'], + 'nfo': ['nfo', 'txt', 'tag'], + 'subtitle': ['sub', 'srt', 'ssa', 'ass'], + 'subtitle_extra': ['idx'], + 'trailer': ['mov', 'mp4', 'flv'] + } + + file_types = { + 'subtitle': ('subtitle', 'subtitle'), + 'subtitle_extra': ('subtitle', 'subtitle_extra'), + 'trailer': ('video', 'trailer'), + 'nfo': ('nfo', 'nfo'), + 'movie': ('video', 'movie'), + 'movie_extra': ('movie', 'movie_extra'), + 'backdrop': ('image', 'backdrop'), + 'poster': ('image', 'poster'), + 'thumbnail': ('image', 'thumbnail'), + 'leftover': ('leftover', 'leftover'), + } + + file_sizes = { # in MB + 'movie': {'min': 300}, + 'trailer': {'min': 2, 'max': 250}, + 'backdrop': {'min': 0, 'max': 5}, + } + + codecs = { + 'audio': ['dts', 'ac3', 'ac3d', 'mp3'], + 'video': ['x264', 'h264', 'divx', 'xvid'] + } + + audio_codec_map = { + 0x2000: 'ac3', + 0x2001: 'dts', + 0x0055: 'mp3', + 0x0050: 'mp2', + 0x0001: 'pcm', + 0x003: 'pcm', + 0x77a1: 'tta1', + 0x5756: 'wav', + 0x6750: 'vorbis', + 0xF1AC: 'flac', + 0x00ff: 'aac', + } + + source_media = { + 'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], + 'hddvd': ['hddvd', 'hd-dvd'], + 'dvd': ['dvd'], + 'hdtv': ['hdtv'] + } + + clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \ + '|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)' + multipart_regex = [ + '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 + '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 + '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1 + '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1 + 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext + 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext + 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv + 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv + '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$', + '([a-z])([0-9]+)(\.....?)$', + '()([ab])(\.....?)$' #*a.mkv + ] + + cp_imdb = '(.cp.(?Ptt[0-9{7}]+).)' + + def __init__(self): + + addEvent('scanner.create_file_identifier', self.createStringIdentifier) + addEvent('scanner.remove_cptag', self.removeCPTag) + + addEvent('scanner.scan', self.scan) + addEvent('scanner.name_year', self.getReleaseNameYear) + addEvent('scanner.partnumber', self.getPartNumber) + + def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None): + + folder = sp(folder) + + if not folder or not os.path.isdir(folder): + log.error('Folder doesn\'t exists: %s', folder) + return {} + + # Get movie "master" files + movie_files = {} + leftovers = [] + + # Scan all files of the folder if no files are set + if not files: + check_file_date = True + try: + files = [] + for root, dirs, walk_files in scandir.walk(folder): + files.extend([sp(os.path.join(root, filename)) for filename in walk_files]) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + except: + log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) + + log.debug('Found %s files to scan and group in %s', (len(files), folder)) + else: + check_file_date = False + files = [sp(x) for x in files] + + for file_path in files: + + if not os.path.exists(file_path): + continue + + # Remove ignored files + if self.isSampleFile(file_path): + leftovers.append(file_path) + continue + elif not self.keepFile(file_path): + continue + + is_dvd_file = self.isDVDFile(file_path) + if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file + + # Normal identifier + identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) + identifiers = [identifier] + + # Identifier with quality + quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'} + if quality: + identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) + identifiers = [identifier_with_quality, identifier] + + if not movie_files.get(identifier): + movie_files[identifier] = { + 'unsorted_files': [], + 'identifiers': identifiers, + 'is_dvd': is_dvd_file, + } + + movie_files[identifier]['unsorted_files'].append(file_path) + else: + leftovers.append(file_path) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # Cleanup + del files + + # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" + # files will be grouped first. + leftovers = set(sorted(leftovers, reverse = True)) + + # Group files minus extension + ignored_identifiers = [] + for identifier, group in movie_files.items(): + if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) + + log.debug('Grouping files: %s', identifier) + + has_ignored = 0 + for file_path in list(group['unsorted_files']): + ext = getExt(file_path) + wo_ext = file_path[:-(len(ext) + 1)] + found_files = set([i for i in leftovers if wo_ext in i]) + group['unsorted_files'].extend(found_files) + leftovers = leftovers - found_files + + has_ignored += 1 if ext == 'ignore' else 0 + + if has_ignored == 0: + for file_path in list(group['unsorted_files']): + ext = getExt(file_path) + has_ignored += 1 if ext == 'ignore' else 0 + + if has_ignored > 0: + ignored_identifiers.append(identifier) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + + # Create identifiers for all leftover files + path_identifiers = {} + for file_path in leftovers: + identifier = self.createStringIdentifier(file_path, folder) + + if not path_identifiers.get(identifier): + path_identifiers[identifier] = [] + + path_identifiers[identifier].append(file_path) + + + # Group the files based on the identifier + delete_identifiers = [] + for identifier, found_files in path_identifiers.items(): + log.debug('Grouping files on identifier: %s', identifier) + + group = movie_files.get(identifier) + if group: + group['unsorted_files'].extend(found_files) + delete_identifiers.append(identifier) + + # Remove the found files from the leftover stack + leftovers = leftovers - set(found_files) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # Cleaning up used + for identifier in delete_identifiers: + if path_identifiers.get(identifier): + del path_identifiers[identifier] + del delete_identifiers + + # Group based on folder + delete_identifiers = [] + for identifier, found_files in path_identifiers.items(): + log.debug('Grouping files on foldername: %s', identifier) + + for ff in found_files: + new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) + + group = movie_files.get(new_identifier) + if group: + group['unsorted_files'].extend([ff]) + delete_identifiers.append(identifier) + + # Remove the found files from the leftover stack + leftovers -= leftovers - set([ff]) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # leftovers should be empty + if leftovers: + log.debug('Some files are still left over: %s', leftovers) + + # Cleaning up used + for identifier in delete_identifiers: + if path_identifiers.get(identifier): + del path_identifiers[identifier] + del delete_identifiers + + # Make sure we remove older / still extracting files + valid_files = {} + while True and not self.shuttingDown(): + try: + identifier, group = movie_files.popitem() + except: + break + + # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute + if check_file_date: + files_too_new, time_string = self.checkFilesChanged(group['unsorted_files']) + if files_too_new: + log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) + + # Delete the unsorted list + del group['unsorted_files'] + + continue + + # Only process movies newer than x + if newer_than and newer_than > 0: + has_new_files = False + for cur_file in group['unsorted_files']: + file_time = self.getFileTimes(cur_file) + if file_time[0] > newer_than or file_time[1] > newer_than: + has_new_files = True + break + + if not has_new_files: + log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) + + # Delete the unsorted list + del group['unsorted_files'] + + continue + + valid_files[identifier] = group + + del movie_files + + total_found = len(valid_files) + + # Make sure only one movie was found if a download ID is provided + if release_download and total_found == 0: + log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id')) + elif release_download and total_found > 1: + log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files))) + release_download = None + + # Determine file types + processed_movies = {} + while True and not self.shuttingDown(): + try: + identifier, group = valid_files.popitem() + except: + break + + if return_ignored is False and identifier in ignored_identifiers: + log.debug('Ignore file found, ignoring release: %s', identifier) + continue + + # Group extra (and easy) files first + group['files'] = { + 'movie_extra': self.getMovieExtras(group['unsorted_files']), + 'subtitle': self.getSubtitles(group['unsorted_files']), + 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), + 'nfo': self.getNfo(group['unsorted_files']), + 'trailer': self.getTrailers(group['unsorted_files']), + 'leftover': set(group['unsorted_files']), + } + + # Media files + if group['is_dvd']: + group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) + else: + group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) + + if len(group['files']['movie']) == 0: + log.error('Couldn\'t find any movie files for %s', identifier) + continue + + log.debug('Getting metadata for %s', identifier) + group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download) + + # Subtitle meta + group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} + + # Get parent dir from movie files + for movie_file in group['files']['movie']: + group['parentdir'] = os.path.dirname(movie_file) + group['dirname'] = None + + folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) + folder_names.reverse() + + # Try and get a proper dirname, so no "A", "Movie", "Download" etc + for folder_name in folder_names: + if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: + group['dirname'] = folder_name + break + + break + + # Leftover "sorted" files + for file_type in group['files']: + if not file_type is 'leftover': + group['files']['leftover'] -= set(group['files'][file_type]) + group['files'][file_type] = list(group['files'][file_type]) + group['files']['leftover'] = list(group['files']['leftover']) + + # Delete the unsorted list + del group['unsorted_files'] + + # Determine movie + group['media'] = self.determineMedia(group, release_download = release_download) + if not group['media']: + log.error('Unable to determine media: %s', group['identifiers']) + else: + group['identifier'] = group['media'].get('identifier') or group['media']['info'].get('imdb') + + processed_movies[identifier] = group + + # Notify parent & progress on something found + if on_found: + on_found(group, total_found, total_found - len(processed_movies)) + + # Wait for all the async events calm down a bit + while threading.activeCount() > 100 and not self.shuttingDown(): + log.debug('Too many threads active, waiting a few seconds') + time.sleep(10) + + if len(processed_movies) > 0: + log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) + else: + log.debug('Found no movies in the folder %s', folder) + + return processed_movies + + def getMetaData(self, group, folder = '', release_download = None): + + data = {} + files = list(group['files']['movie']) + + for cur_file in files: + if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files + + meta = self.getMeta(cur_file) + + try: + data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) + data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) + data['resolution_width'] = meta.get('resolution_width', 720) + data['resolution_height'] = meta.get('resolution_height', 480) + data['audio_channels'] = meta.get('audio_channels', 2.0) + data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2) + except: + log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) + pass + + if data.get('audio'): break + + # Use the quality guess first, if that failes use the quality we wanted to download + data['quality'] = None + if release_download and release_download.get('quality'): + data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True) + + if not data['quality']: + data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True) + + if not data['quality']: + data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) + + data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' + + filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0]) + data['group'] = self.getGroup(filename[len(folder):]) + data['source'] = self.getSourceMedia(filename) + + return data + + def getMeta(self, filename): + + try: + p = enzyme.parse(filename) + + # Video codec + vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower() + + # Audio codec + ac = p.audio[0].codec + try: ac = self.audio_codec_map.get(p.audio[0].codec) + except: pass + + return { + 'video': vc, + 'audio': ac, + 'resolution_width': tryInt(p.video[0].width), + 'resolution_height': tryInt(p.video[0].height), + 'audio_channels': p.audio[0].channels, + } + except ParseError: + log.debug('Failed to parse meta for %s', filename) + except NoParserError: + log.debug('No parser found for %s', filename) + except: + log.debug('Failed parsing %s', filename) + + return {} + + def getSubtitleLanguage(self, group): + detected_languages = {} + + # Subliminal scanner + paths = None + try: + paths = group['files']['movie'] + scan_result = [] + for p in paths: + if not group['is_dvd']: + video = Video.from_path(toUnicode(p)) + video_result = [(video, video.scan())] + scan_result.extend(video_result) + + for video, detected_subtitles in scan_result: + for s in detected_subtitles: + if s.language and s.path not in paths: + detected_languages[s.path] = [s.language] + except: + log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc())) + + # IDX + for extra in group['files']['subtitle_extra']: + try: + if os.path.isfile(extra): + output = open(extra, 'r') + txt = output.read() + output.close() + + idx_langs = re.findall('\nid: (\w+)', txt) + + sub_file = '%s.sub' % os.path.splitext(extra)[0] + if len(idx_langs) > 0 and os.path.isfile(sub_file): + detected_languages[sub_file] = idx_langs + except: + log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc())) + + return detected_languages + + def determineMedia(self, group, release_download = None): + + # Get imdb id from downloader + imdb_id = release_download and release_download.get('imdb_id') + if imdb_id: + log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id')) + + files = group['files'] + + # Check for CP(imdb_id) string in the file paths + if not imdb_id: + for cur_file in files['movie']: + imdb_id = self.getCPImdb(cur_file) + if imdb_id: + log.debug('Found movie via CP tag: %s', cur_file) + break + + # Check and see if nfo contains the imdb-id + nfo_file = None + if not imdb_id: + try: + for nf in files['nfo']: + imdb_id = getImdb(nf, check_inside = True) + if imdb_id: + log.debug('Found movie via nfo file: %s', nf) + nfo_file = nf + break + except: + pass + + # Check and see if filenames contains the imdb-id + if not imdb_id: + try: + for filetype in files: + for filetype_file in files[filetype]: + imdb_id = getImdb(filetype_file) + if imdb_id: + log.debug('Found movie via imdb in filename: %s', nfo_file) + break + except: + pass + + # Search based on identifiers + if not imdb_id: + for identifier in group['identifiers']: + + if len(identifier) > 2: + try: filename = list(group['files'].get('movie'))[0] + except: filename = None + + name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None) + if name_year.get('name') and name_year.get('year'): + movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1) + + if len(movie) > 0: + imdb_id = movie[0].get('imdb') + log.debug('Found movie via search: %s', cur_file) + if imdb_id: break + else: + log.debug('Identifier to short to use for search: %s', identifier) + + if imdb_id: + try: + db = get_db() + return db.get('media', imdb_id, with_doc = True)['doc'] + except: + log.debug('Movie "%s" not in library, just getting info', imdb_id) + return { + 'identifier': imdb_id, + 'info': fireEvent('movie.info', identifier = imdb_id, merge = True, extended = False) + } + + log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) + return {} + + def getCPImdb(self, string): + + try: + m = re.search(self.cp_imdb, string.lower()) + id = m.group('id') + if id: return id + except AttributeError: + pass + + return False + + def removeCPTag(self, name): + try: + return re.sub(self.cp_imdb, '', name) + except: + pass + return name + + def getSamples(self, files): + return set(filter(lambda s: self.isSampleFile(s), files)) + + def getMediaFiles(self, files): + + def test(s): + return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s) + + return set(filter(test, files)) + + def getMovieExtras(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files)) + + def getDVDFiles(self, files): + def test(s): + return self.isDVDFile(s) + + return set(filter(test, files)) + + def getSubtitles(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files)) + + def getSubtitlesExtras(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files)) + + def getNfo(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files)) + + def getTrailers(self, files): + + def test(s): + return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer']) + + return set(filter(test, files)) + + def getImages(self, files): + + def test(s): + return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn'] + files = set(filter(test, files)) + + images = { + 'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files)) + } + + # Rest + images['rest'] = files - images['backdrop'] + + return images + + + def isDVDFile(self, file_name): + + if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])): + return True + + for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']: + if needle in file_name.lower(): + return True + + return False + + def keepFile(self, filename): + + # ignoredpaths + for i in self.ignored_in_path: + if i in filename.lower(): + log.debug('Ignored "%s" contains "%s".', (filename, i)) + return False + + # All is OK + return True + + def isSampleFile(self, filename): + is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower()) + if is_sample: log.debug('Is sample file: %s', filename) + return is_sample + + def filesizeBetween(self, file, file_size = None): + if not file_size: file_size = [] + + try: + return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576) + except: + log.error('Couldn\'t get filesize of %s.', file) + + return False + + def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): + + year = self.findYear(file_path) + + identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder + identifier = os.path.splitext(identifier)[0] # ext + + try: + path_split = splitString(identifier, os.path.sep) + identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename + except: pass + + if exclude_filename: + identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] + + # multipart + identifier = self.removeMultipart(identifier) + + # remove cptag + identifier = self.removeCPTag(identifier) + + # groups, release tags, scenename cleaner, regex isn't correct + identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':') + + # Year + if year and identifier[:4] != year: + split_by = ':::' if ':::' in identifier else year + identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year) + else: + identifier = identifier.split('::')[0] + + # Remove duplicates + out = [] + for word in identifier.split(): + if not word in out: + out.append(word) + + identifier = ' '.join(out) + + return simplifyString(identifier) + + + def removeMultipart(self, name): + for regex in self.multipart_regex: + try: + found = re.sub(regex, '', name) + if found != name: + name = found + except: + pass + return name + + def getPartNumber(self, name): + for regex in self.multipart_regex: + try: + found = re.search(regex, name) + if found: + return found.group(1) + return 1 + except: + pass + return 1 + + def getCodec(self, filename, codecs): + codecs = map(re.escape, codecs) + try: + codec = re.search('[^A-Z0-9](?P' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I) + return (codec and codec.group('codec')) or '' + except: + return '' + + def getGroup(self, file): + try: + match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I) + return match[-1] or '' + except: + return '' + + def getSourceMedia(self, file): + for media in self.source_media: + for alias in self.source_media[media]: + if alias in file.lower(): + return media + + return None + + def findYear(self, text): + + # Search year inside () or [] first + matches = re.findall('(\(|\[)(?P19[0-9]{2}|20[0-9]{2})(\]|\))', text) + if matches: + return matches[-1][1] + + # Search normal + matches = re.findall('(?P19[0-9]{2}|20[0-9]{2})', text) + if matches: + return matches[-1] + + return '' + + def getReleaseNameYear(self, release_name, file_name = None): + + release_name = release_name.strip(' .-_') + + # Use guessit first + guess = {} + if file_name: + try: + guessit = guess_movie_info(toUnicode(file_name)) + if guessit.get('title') and guessit.get('year'): + guess = { + 'name': guessit.get('title'), + 'year': guessit.get('year'), + } + except: + log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) + + # Backup to simple + cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) + cleaned = re.sub(self.clean, ' ', cleaned) + + for year_str in [file_name, release_name, cleaned]: + if not year_str: continue + year = self.findYear(year_str) + if year: + break + + cp_guess = {} + + if year: # Split name on year + try: + movie_name = cleaned.rsplit(year, 1).pop(0).strip() + if movie_name: + cp_guess = { + 'name': movie_name, + 'year': int(year), + } + except: + pass + + if not cp_guess: # Split name on multiple spaces + try: + movie_name = cleaned.split(' ').pop(0).strip() + cp_guess = { + 'name': movie_name, + 'year': int(year) if movie_name[:4] != year else 0, + } + except: + pass + + if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): + return cp_guess + elif guess == {}: + return cp_guess + + return guess diff --git a/couchpotato/core/plugins/scanner/__init__.py b/couchpotato/core/plugins/scanner/__init__.py deleted file mode 100644 index 66c6b39..0000000 --- a/couchpotato/core/plugins/scanner/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Scanner - - -def start(): - return Scanner() - -config = [] diff --git a/couchpotato/core/plugins/scanner/main.py b/couchpotato/core/plugins/scanner/main.py deleted file mode 100644 index 3ed5079..0000000 --- a/couchpotato/core/plugins/scanner/main.py +++ /dev/null @@ -1,868 +0,0 @@ -from couchpotato import get_db -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp -from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ - splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from enzyme.exceptions import NoParserError, ParseError -from guessit import guess_movie_info -from scandir import scandir -from subliminal.videos import Video -import enzyme -import os -import re -import threading -import time -import traceback -from six.moves import filter, map, zip - -log = CPLog(__name__) - - -class Scanner(Plugin): - - ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', - '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo', - 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files - ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate'] - extensions = { - 'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'], - 'movie_extra': ['mds'], - 'dvd': ['vts_*', 'vob'], - 'nfo': ['nfo', 'txt', 'tag'], - 'subtitle': ['sub', 'srt', 'ssa', 'ass'], - 'subtitle_extra': ['idx'], - 'trailer': ['mov', 'mp4', 'flv'] - } - - file_types = { - 'subtitle': ('subtitle', 'subtitle'), - 'subtitle_extra': ('subtitle', 'subtitle_extra'), - 'trailer': ('video', 'trailer'), - 'nfo': ('nfo', 'nfo'), - 'movie': ('video', 'movie'), - 'movie_extra': ('movie', 'movie_extra'), - 'backdrop': ('image', 'backdrop'), - 'poster': ('image', 'poster'), - 'thumbnail': ('image', 'thumbnail'), - 'leftover': ('leftover', 'leftover'), - } - - file_sizes = { # in MB - 'movie': {'min': 300}, - 'trailer': {'min': 2, 'max': 250}, - 'backdrop': {'min': 0, 'max': 5}, - } - - codecs = { - 'audio': ['dts', 'ac3', 'ac3d', 'mp3'], - 'video': ['x264', 'h264', 'divx', 'xvid'] - } - - audio_codec_map = { - 0x2000: 'ac3', - 0x2001: 'dts', - 0x0055: 'mp3', - 0x0050: 'mp2', - 0x0001: 'pcm', - 0x003: 'pcm', - 0x77a1: 'tta1', - 0x5756: 'wav', - 0x6750: 'vorbis', - 0xF1AC: 'flac', - 0x00ff: 'aac', - } - - source_media = { - 'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], - 'hddvd': ['hddvd', 'hd-dvd'], - 'dvd': ['dvd'], - 'hdtv': ['hdtv'] - } - - clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \ - '|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)' - multipart_regex = [ - '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 - '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 - '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1 - '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1 - 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext - 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext - 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv - 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv - '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$', - '([a-z])([0-9]+)(\.....?)$', - '()([ab])(\.....?)$' #*a.mkv - ] - - cp_imdb = '(.cp.(?Ptt[0-9{7}]+).)' - - def __init__(self): - - addEvent('scanner.create_file_identifier', self.createStringIdentifier) - addEvent('scanner.remove_cptag', self.removeCPTag) - - addEvent('scanner.scan', self.scan) - addEvent('scanner.name_year', self.getReleaseNameYear) - addEvent('scanner.partnumber', self.getPartNumber) - - def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None): - - folder = sp(folder) - - if not folder or not os.path.isdir(folder): - log.error('Folder doesn\'t exists: %s', folder) - return {} - - # Get movie "master" files - movie_files = {} - leftovers = [] - - # Scan all files of the folder if no files are set - if not files: - check_file_date = True - try: - files = [] - for root, dirs, walk_files in scandir.walk(folder): - files.extend([sp(os.path.join(root, filename)) for filename in walk_files]) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - except: - log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) - - log.debug('Found %s files to scan and group in %s', (len(files), folder)) - else: - check_file_date = False - files = [sp(x) for x in files] - - for file_path in files: - - if not os.path.exists(file_path): - continue - - # Remove ignored files - if self.isSampleFile(file_path): - leftovers.append(file_path) - continue - elif not self.keepFile(file_path): - continue - - is_dvd_file = self.isDVDFile(file_path) - if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file - - # Normal identifier - identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) - identifiers = [identifier] - - # Identifier with quality - quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'} - if quality: - identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) - identifiers = [identifier_with_quality, identifier] - - if not movie_files.get(identifier): - movie_files[identifier] = { - 'unsorted_files': [], - 'identifiers': identifiers, - 'is_dvd': is_dvd_file, - } - - movie_files[identifier]['unsorted_files'].append(file_path) - else: - leftovers.append(file_path) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # Cleanup - del files - - # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" - # files will be grouped first. - leftovers = set(sorted(leftovers, reverse = True)) - - # Group files minus extension - ignored_identifiers = [] - for identifier, group in movie_files.items(): - if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) - - log.debug('Grouping files: %s', identifier) - - has_ignored = 0 - for file_path in list(group['unsorted_files']): - ext = getExt(file_path) - wo_ext = file_path[:-(len(ext) + 1)] - found_files = set([i for i in leftovers if wo_ext in i]) - group['unsorted_files'].extend(found_files) - leftovers = leftovers - found_files - - has_ignored += 1 if ext == 'ignore' else 0 - - if has_ignored == 0: - for file_path in list(group['unsorted_files']): - ext = getExt(file_path) - has_ignored += 1 if ext == 'ignore' else 0 - - if has_ignored > 0: - ignored_identifiers.append(identifier) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - - # Create identifiers for all leftover files - path_identifiers = {} - for file_path in leftovers: - identifier = self.createStringIdentifier(file_path, folder) - - if not path_identifiers.get(identifier): - path_identifiers[identifier] = [] - - path_identifiers[identifier].append(file_path) - - - # Group the files based on the identifier - delete_identifiers = [] - for identifier, found_files in path_identifiers.items(): - log.debug('Grouping files on identifier: %s', identifier) - - group = movie_files.get(identifier) - if group: - group['unsorted_files'].extend(found_files) - delete_identifiers.append(identifier) - - # Remove the found files from the leftover stack - leftovers = leftovers - set(found_files) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # Cleaning up used - for identifier in delete_identifiers: - if path_identifiers.get(identifier): - del path_identifiers[identifier] - del delete_identifiers - - # Group based on folder - delete_identifiers = [] - for identifier, found_files in path_identifiers.items(): - log.debug('Grouping files on foldername: %s', identifier) - - for ff in found_files: - new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) - - group = movie_files.get(new_identifier) - if group: - group['unsorted_files'].extend([ff]) - delete_identifiers.append(identifier) - - # Remove the found files from the leftover stack - leftovers -= leftovers - set([ff]) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # leftovers should be empty - if leftovers: - log.debug('Some files are still left over: %s', leftovers) - - # Cleaning up used - for identifier in delete_identifiers: - if path_identifiers.get(identifier): - del path_identifiers[identifier] - del delete_identifiers - - # Make sure we remove older / still extracting files - valid_files = {} - while True and not self.shuttingDown(): - try: - identifier, group = movie_files.popitem() - except: - break - - # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute - if check_file_date: - files_too_new, time_string = self.checkFilesChanged(group['unsorted_files']) - if files_too_new: - log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) - - # Delete the unsorted list - del group['unsorted_files'] - - continue - - # Only process movies newer than x - if newer_than and newer_than > 0: - has_new_files = False - for cur_file in group['unsorted_files']: - file_time = self.getFileTimes(cur_file) - if file_time[0] > newer_than or file_time[1] > newer_than: - has_new_files = True - break - - if not has_new_files: - log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) - - # Delete the unsorted list - del group['unsorted_files'] - - continue - - valid_files[identifier] = group - - del movie_files - - total_found = len(valid_files) - - # Make sure only one movie was found if a download ID is provided - if release_download and total_found == 0: - log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id')) - elif release_download and total_found > 1: - log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files))) - release_download = None - - # Determine file types - processed_movies = {} - while True and not self.shuttingDown(): - try: - identifier, group = valid_files.popitem() - except: - break - - if return_ignored is False and identifier in ignored_identifiers: - log.debug('Ignore file found, ignoring release: %s', identifier) - continue - - # Group extra (and easy) files first - group['files'] = { - 'movie_extra': self.getMovieExtras(group['unsorted_files']), - 'subtitle': self.getSubtitles(group['unsorted_files']), - 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), - 'nfo': self.getNfo(group['unsorted_files']), - 'trailer': self.getTrailers(group['unsorted_files']), - 'leftover': set(group['unsorted_files']), - } - - # Media files - if group['is_dvd']: - group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) - else: - group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) - - if len(group['files']['movie']) == 0: - log.error('Couldn\'t find any movie files for %s', identifier) - continue - - log.debug('Getting metadata for %s', identifier) - group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download) - - # Subtitle meta - group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} - - # Get parent dir from movie files - for movie_file in group['files']['movie']: - group['parentdir'] = os.path.dirname(movie_file) - group['dirname'] = None - - folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) - folder_names.reverse() - - # Try and get a proper dirname, so no "A", "Movie", "Download" etc - for folder_name in folder_names: - if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: - group['dirname'] = folder_name - break - - break - - # Leftover "sorted" files - for file_type in group['files']: - if not file_type is 'leftover': - group['files']['leftover'] -= set(group['files'][file_type]) - group['files'][file_type] = list(group['files'][file_type]) - group['files']['leftover'] = list(group['files']['leftover']) - - # Delete the unsorted list - del group['unsorted_files'] - - # Determine movie - group['media'] = self.determineMedia(group, release_download = release_download) - if not group['media']: - log.error('Unable to determine media: %s', group['identifiers']) - else: - group['identifier'] = group['media'].get('identifier') or group['media']['info'].get('imdb') - - processed_movies[identifier] = group - - # Notify parent & progress on something found - if on_found: - on_found(group, total_found, total_found - len(processed_movies)) - - # Wait for all the async events calm down a bit - while threading.activeCount() > 100 and not self.shuttingDown(): - log.debug('Too many threads active, waiting a few seconds') - time.sleep(10) - - if len(processed_movies) > 0: - log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) - else: - log.debug('Found no movies in the folder %s', folder) - - return processed_movies - - def getMetaData(self, group, folder = '', release_download = None): - - data = {} - files = list(group['files']['movie']) - - for cur_file in files: - if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files - - meta = self.getMeta(cur_file) - - try: - data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) - data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) - data['resolution_width'] = meta.get('resolution_width', 720) - data['resolution_height'] = meta.get('resolution_height', 480) - data['audio_channels'] = meta.get('audio_channels', 2.0) - data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2) - except: - log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) - pass - - if data.get('audio'): break - - # Use the quality guess first, if that failes use the quality we wanted to download - data['quality'] = None - if release_download and release_download.get('quality'): - data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True) - - if not data['quality']: - data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True) - - if not data['quality']: - data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) - - data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' - - filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0]) - data['group'] = self.getGroup(filename[len(folder):]) - data['source'] = self.getSourceMedia(filename) - - return data - - def getMeta(self, filename): - - try: - p = enzyme.parse(filename) - - # Video codec - vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower() - - # Audio codec - ac = p.audio[0].codec - try: ac = self.audio_codec_map.get(p.audio[0].codec) - except: pass - - return { - 'video': vc, - 'audio': ac, - 'resolution_width': tryInt(p.video[0].width), - 'resolution_height': tryInt(p.video[0].height), - 'audio_channels': p.audio[0].channels, - } - except ParseError: - log.debug('Failed to parse meta for %s', filename) - except NoParserError: - log.debug('No parser found for %s', filename) - except: - log.debug('Failed parsing %s', filename) - - return {} - - def getSubtitleLanguage(self, group): - detected_languages = {} - - # Subliminal scanner - paths = None - try: - paths = group['files']['movie'] - scan_result = [] - for p in paths: - if not group['is_dvd']: - video = Video.from_path(toUnicode(p)) - video_result = [(video, video.scan())] - scan_result.extend(video_result) - - for video, detected_subtitles in scan_result: - for s in detected_subtitles: - if s.language and s.path not in paths: - detected_languages[s.path] = [s.language] - except: - log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc())) - - # IDX - for extra in group['files']['subtitle_extra']: - try: - if os.path.isfile(extra): - output = open(extra, 'r') - txt = output.read() - output.close() - - idx_langs = re.findall('\nid: (\w+)', txt) - - sub_file = '%s.sub' % os.path.splitext(extra)[0] - if len(idx_langs) > 0 and os.path.isfile(sub_file): - detected_languages[sub_file] = idx_langs - except: - log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc())) - - return detected_languages - - def determineMedia(self, group, release_download = None): - - # Get imdb id from downloader - imdb_id = release_download and release_download.get('imdb_id') - if imdb_id: - log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id')) - - files = group['files'] - - # Check for CP(imdb_id) string in the file paths - if not imdb_id: - for cur_file in files['movie']: - imdb_id = self.getCPImdb(cur_file) - if imdb_id: - log.debug('Found movie via CP tag: %s', cur_file) - break - - # Check and see if nfo contains the imdb-id - nfo_file = None - if not imdb_id: - try: - for nf in files['nfo']: - imdb_id = getImdb(nf, check_inside = True) - if imdb_id: - log.debug('Found movie via nfo file: %s', nf) - nfo_file = nf - break - except: - pass - - # Check and see if filenames contains the imdb-id - if not imdb_id: - try: - for filetype in files: - for filetype_file in files[filetype]: - imdb_id = getImdb(filetype_file) - if imdb_id: - log.debug('Found movie via imdb in filename: %s', nfo_file) - break - except: - pass - - # Search based on identifiers - if not imdb_id: - for identifier in group['identifiers']: - - if len(identifier) > 2: - try: filename = list(group['files'].get('movie'))[0] - except: filename = None - - name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None) - if name_year.get('name') and name_year.get('year'): - movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1) - - if len(movie) > 0: - imdb_id = movie[0].get('imdb') - log.debug('Found movie via search: %s', cur_file) - if imdb_id: break - else: - log.debug('Identifier to short to use for search: %s', identifier) - - if imdb_id: - try: - db = get_db() - return db.get('media', imdb_id, with_doc = True)['doc'] - except: - log.debug('Movie "%s" not in library, just getting info', imdb_id) - return { - 'identifier': imdb_id, - 'info': fireEvent('movie.info', identifier = imdb_id, merge = True, extended = False) - } - - log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) - return {} - - def getCPImdb(self, string): - - try: - m = re.search(self.cp_imdb, string.lower()) - id = m.group('id') - if id: return id - except AttributeError: - pass - - return False - - def removeCPTag(self, name): - try: - return re.sub(self.cp_imdb, '', name) - except: - pass - return name - - def getSamples(self, files): - return set(filter(lambda s: self.isSampleFile(s), files)) - - def getMediaFiles(self, files): - - def test(s): - return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s) - - return set(filter(test, files)) - - def getMovieExtras(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files)) - - def getDVDFiles(self, files): - def test(s): - return self.isDVDFile(s) - - return set(filter(test, files)) - - def getSubtitles(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files)) - - def getSubtitlesExtras(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files)) - - def getNfo(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files)) - - def getTrailers(self, files): - - def test(s): - return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer']) - - return set(filter(test, files)) - - def getImages(self, files): - - def test(s): - return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn'] - files = set(filter(test, files)) - - images = { - 'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files)) - } - - # Rest - images['rest'] = files - images['backdrop'] - - return images - - - def isDVDFile(self, file_name): - - if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])): - return True - - for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']: - if needle in file_name.lower(): - return True - - return False - - def keepFile(self, filename): - - # ignoredpaths - for i in self.ignored_in_path: - if i in filename.lower(): - log.debug('Ignored "%s" contains "%s".', (filename, i)) - return False - - # All is OK - return True - - def isSampleFile(self, filename): - is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower()) - if is_sample: log.debug('Is sample file: %s', filename) - return is_sample - - def filesizeBetween(self, file, file_size = None): - if not file_size: file_size = [] - - try: - return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576) - except: - log.error('Couldn\'t get filesize of %s.', file) - - return False - - def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): - - year = self.findYear(file_path) - - identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder - identifier = os.path.splitext(identifier)[0] # ext - - try: - path_split = splitString(identifier, os.path.sep) - identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename - except: pass - - if exclude_filename: - identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] - - # multipart - identifier = self.removeMultipart(identifier) - - # remove cptag - identifier = self.removeCPTag(identifier) - - # groups, release tags, scenename cleaner, regex isn't correct - identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':') - - # Year - if year and identifier[:4] != year: - split_by = ':::' if ':::' in identifier else year - identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year) - else: - identifier = identifier.split('::')[0] - - # Remove duplicates - out = [] - for word in identifier.split(): - if not word in out: - out.append(word) - - identifier = ' '.join(out) - - return simplifyString(identifier) - - - def removeMultipart(self, name): - for regex in self.multipart_regex: - try: - found = re.sub(regex, '', name) - if found != name: - name = found - except: - pass - return name - - def getPartNumber(self, name): - for regex in self.multipart_regex: - try: - found = re.search(regex, name) - if found: - return found.group(1) - return 1 - except: - pass - return 1 - - def getCodec(self, filename, codecs): - codecs = map(re.escape, codecs) - try: - codec = re.search('[^A-Z0-9](?P' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I) - return (codec and codec.group('codec')) or '' - except: - return '' - - def getGroup(self, file): - try: - match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I) - return match[-1] or '' - except: - return '' - - def getSourceMedia(self, file): - for media in self.source_media: - for alias in self.source_media[media]: - if alias in file.lower(): - return media - - return None - - def findYear(self, text): - - # Search year inside () or [] first - matches = re.findall('(\(|\[)(?P19[0-9]{2}|20[0-9]{2})(\]|\))', text) - if matches: - return matches[-1][1] - - # Search normal - matches = re.findall('(?P19[0-9]{2}|20[0-9]{2})', text) - if matches: - return matches[-1] - - return '' - - def getReleaseNameYear(self, release_name, file_name = None): - - release_name = release_name.strip(' .-_') - - # Use guessit first - guess = {} - if file_name: - try: - guessit = guess_movie_info(toUnicode(file_name)) - if guessit.get('title') and guessit.get('year'): - guess = { - 'name': guessit.get('title'), - 'year': guessit.get('year'), - } - except: - log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) - - # Backup to simple - cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) - cleaned = re.sub(self.clean, ' ', cleaned) - - for year_str in [file_name, release_name, cleaned]: - if not year_str: continue - year = self.findYear(year_str) - if year: - break - - cp_guess = {} - - if year: # Split name on year - try: - movie_name = cleaned.rsplit(year, 1).pop(0).strip() - if movie_name: - cp_guess = { - 'name': movie_name, - 'year': int(year), - } - except: - pass - - if not cp_guess: # Split name on multiple spaces - try: - movie_name = cleaned.split(' ').pop(0).strip() - cp_guess = { - 'name': movie_name, - 'year': int(year) if movie_name[:4] != year else 0, - } - except: - pass - - if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): - return cp_guess - elif guess == {}: - return cp_guess - - return guess diff --git a/couchpotato/core/plugins/score/__init__.py b/couchpotato/core/plugins/score/__init__.py index a960081..65cadd9 100644 --- a/couchpotato/core/plugins/score/__init__.py +++ b/couchpotato/core/plugins/score/__init__.py @@ -1,7 +1,5 @@ from .main import Score -def start(): +def autoload(): return Score() - -config = [] diff --git a/couchpotato/core/plugins/subtitle.py b/couchpotato/core/plugins/subtitle.py new file mode 100644 index 0000000..06a77f9 --- /dev/null +++ b/couchpotato/core/plugins/subtitle.py @@ -0,0 +1,76 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import toUnicode, sp +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +import subliminal +import traceback + +log = CPLog(__name__) + +autoload = 'Subtitle' + + +class Subtitle(Plugin): + + services = ['opensubtitles', 'thesubdb', 'subswiki', 'podnapisi'] + + def __init__(self): + addEvent('renamer.before', self.searchSingle) + + def searchSingle(self, group): + if self.isDisabled(): return + + try: + available_languages = sum(group['subtitle_language'].values(), []) + downloaded = [] + files = [toUnicode(x) for x in group['files']['movie']] + log.debug('Searching for subtitles for: %s', files) + + for lang in self.getLanguages(): + if lang not in available_languages: + download = subliminal.download_subtitles(files, multi = True, force = False, languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) + for subtitle in download: + downloaded.extend(download[subtitle]) + + for d_sub in downloaded: + log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) + group['files']['subtitle'].append(sp(d_sub.path)) + group['before_rename'].append(sp(d_sub.path)) + group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2] + + return True + + except: + log.error('Failed searching for subtitle: %s', (traceback.format_exc())) + + return False + + def getLanguages(self): + return splitString(self.conf('languages')) + + +config = [{ + 'name': 'subtitle', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'subtitle', + 'label': 'Download subtitles', + 'description': 'after rename', + 'options': [ + { + 'name': 'enabled', + 'label': 'Search and download subtitles', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'languages', + 'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at on Wikipedia'), + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/subtitle/__init__.py b/couchpotato/core/plugins/subtitle/__init__.py deleted file mode 100644 index 59847ae..0000000 --- a/couchpotato/core/plugins/subtitle/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from .main import Subtitle - - -def start(): - return Subtitle() - -config = [{ - 'name': 'subtitle', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'subtitle', - 'label': 'Download subtitles', - 'description': 'after rename', - 'options': [ - { - 'name': 'enabled', - 'label': 'Search and download subtitles', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'languages', - 'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at on Wikipedia'), - }, -# { -# 'name': 'automatic', -# 'default': True, -# 'type': 'bool', -# 'description': 'Automaticly search & download for movies in library', -# }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/subtitle/main.py b/couchpotato/core/plugins/subtitle/main.py deleted file mode 100644 index c00c757..0000000 --- a/couchpotato/core/plugins/subtitle/main.py +++ /dev/null @@ -1,49 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode, sp -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -import subliminal -import traceback - -log = CPLog(__name__) - - -class Subtitle(Plugin): - - services = ['opensubtitles', 'thesubdb', 'subswiki', 'podnapisi'] - - def __init__(self): - addEvent('renamer.before', self.searchSingle) - - def searchSingle(self, group): - if self.isDisabled(): return - - try: - available_languages = sum(group['subtitle_language'].values(), []) - downloaded = [] - files = [toUnicode(x) for x in group['files']['movie']] - log.debug('Searching for subtitles for: %s', files) - - for lang in self.getLanguages(): - if lang not in available_languages: - download = subliminal.download_subtitles(files, multi = True, force = False, languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) - for subtitle in download: - downloaded.extend(download[subtitle]) - - for d_sub in downloaded: - log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) - group['files']['subtitle'].append(sp(d_sub.path)) - group['before_rename'].append(sp(d_sub.path)) - group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2] - - return True - - except: - log.error('Failed searching for subtitle: %s', (traceback.format_exc())) - - return False - - def getLanguages(self): - return splitString(self.conf('languages')) diff --git a/couchpotato/core/plugins/trailer.py b/couchpotato/core/plugins/trailer.py new file mode 100644 index 0000000..b202e93 --- /dev/null +++ b/couchpotato/core/plugins/trailer.py @@ -0,0 +1,78 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.variable import getExt, getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +import os + +log = CPLog(__name__) + +autoload = 'Trailer' + + +class Trailer(Plugin): + + def __init__(self): + addEvent('renamer.after', self.searchSingle) + + def searchSingle(self, message = None, group = None): + if not group: group = {} + if self.isDisabled() or len(group['files']['trailer']) > 0: return + + trailers = fireEvent('trailer.search', group = group, merge = True) + if not trailers or trailers == []: + log.info('No trailers found for: %s', getTitle(group)) + return False + + for trailer in trailers.get(self.conf('quality'), []): + + ext = getExt(trailer) + filename = self.conf('name').replace('', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) + destination = os.path.join(group['destination_dir'], filename) + if not os.path.isfile(destination): + trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) + if os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one + os.unlink(trailer_file) + continue + else: + log.debug('Trailer already exists: %s', destination) + + group['renamed_files'].append(destination) + + # Download first and break + break + + return True + + +config = [{ + 'name': 'trailer', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'trailer', + 'label': 'Download trailer', + 'description': 'after rename', + 'options': [ + { + 'name': 'enabled', + 'label': 'Search and download trailers', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'quality', + 'default': '720p', + 'type': 'dropdown', + 'values': [('1080p', '1080p'), ('720p', '720p'), ('480P', '480p')], + }, + { + 'name': 'name', + 'label': 'Naming', + 'default': '-trailer', + 'advanced': True, + 'description': 'Use <filename> to use above settings.' + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/trailer/__init__.py b/couchpotato/core/plugins/trailer/__init__.py deleted file mode 100644 index e7a6d26..0000000 --- a/couchpotato/core/plugins/trailer/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from .main import Trailer - - -def start(): - return Trailer() - -config = [{ - 'name': 'trailer', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'trailer', - 'label': 'Download trailer', - 'description': 'after rename', - 'options': [ - { - 'name': 'enabled', - 'label': 'Search and download trailers', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'quality', - 'default': '720p', - 'type': 'dropdown', - 'values': [('1080p', '1080p'), ('720p', '720p'), ('480P', '480p')], - }, - { - 'name': 'name', - 'label': 'Naming', - 'default': '-trailer', - 'advanced': True, - 'description': 'Use <filename> to use above settings.' - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/trailer/main.py b/couchpotato/core/plugins/trailer/main.py deleted file mode 100644 index 3608406..0000000 --- a/couchpotato/core/plugins/trailer/main.py +++ /dev/null @@ -1,42 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.variable import getExt, getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -import os - -log = CPLog(__name__) - - -class Trailer(Plugin): - - def __init__(self): - addEvent('renamer.after', self.searchSingle) - - def searchSingle(self, message = None, group = None): - if not group: group = {} - if self.isDisabled() or len(group['files']['trailer']) > 0: return - - trailers = fireEvent('trailer.search', group = group, merge = True) - if not trailers or trailers == []: - log.info('No trailers found for: %s', getTitle(group)) - return False - - for trailer in trailers.get(self.conf('quality'), []): - - ext = getExt(trailer) - filename = self.conf('name').replace('', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) - destination = os.path.join(group['destination_dir'], filename) - if not os.path.isfile(destination): - trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) - if os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one - os.unlink(trailer_file) - continue - else: - log.debug('Trailer already exists: %s', destination) - - group['renamed_files'].append(destination) - - # Download first and break - break - - return True diff --git a/couchpotato/core/plugins/userscript/__init__.py b/couchpotato/core/plugins/userscript/__init__.py index 184f5d7..9d70859 100644 --- a/couchpotato/core/plugins/userscript/__init__.py +++ b/couchpotato/core/plugins/userscript/__init__.py @@ -1,7 +1,5 @@ from .main import Userscript -def start(): +def autoload(): return Userscript() - -config = [] diff --git a/couchpotato/core/plugins/wizard/__init__.py b/couchpotato/core/plugins/wizard/__init__.py index eda6f25..7a272b4 100644 --- a/couchpotato/core/plugins/wizard/__init__.py +++ b/couchpotato/core/plugins/wizard/__init__.py @@ -1,7 +1,7 @@ from .main import Wizard -def start(): +def autoload(): return Wizard() config = [{