Browse Source

Scanner, Downloaders, Providers

pull/1/merge
Ruud 14 years ago
parent
commit
09fed4ca63
  1. 2
      couchpotato/__init__.py
  2. 13
      couchpotato/core/downloaders/base.py
  3. 20
      couchpotato/core/downloaders/blackhole/main.py
  4. 24
      couchpotato/core/event.py
  5. 6
      couchpotato/core/helpers/variable.py
  6. 7
      couchpotato/core/loader.py
  7. 6
      couchpotato/core/plugins/base.py
  8. 15
      couchpotato/core/plugins/file/main.py
  9. 8
      couchpotato/core/plugins/file/static/file.js
  10. 39
      couchpotato/core/plugins/library/main.py
  11. 16
      couchpotato/core/plugins/movie/main.py
  12. 4
      couchpotato/core/plugins/profile/main.py
  13. 3
      couchpotato/core/plugins/quality/main.py
  14. 4
      couchpotato/core/plugins/renamer/main.py
  15. 6
      couchpotato/core/plugins/scanner/__init__.py
  16. 477
      couchpotato/core/plugins/scanner/main.py
  17. 43
      couchpotato/core/plugins/searcher/main.py
  18. 9
      couchpotato/core/plugins/status/main.py
  19. 3
      couchpotato/core/providers/base.py
  20. 23
      couchpotato/core/providers/movie/themoviedb/main.py
  21. 138
      couchpotato/core/providers/nzb/newzbin/main.py
  22. 100
      couchpotato/core/providers/nzb/nzbs/main.py
  23. 24
      couchpotato/core/settings/model.py
  24. 1
      couchpotato/environment.py
  25. 2
      libs/axl/axel.py
  26. 9
      libs/getmeta.py

2
couchpotato/__init__.py

@ -24,7 +24,7 @@ def get_session(engine = None):
return scoped_session(sessionmaker(bind = engine)) return scoped_session(sessionmaker(bind = engine))
def get_engine(): def get_engine():
return create_engine(Env.get('db_path'), echo = False) return create_engine(Env.get('db_path')+'?check_same_thread=False', echo = False)
def addView(route, func, static = False): def addView(route, func, static = False):
web.add_url_rule(route + ('' if static else '/'), endpoint = route if route else 'index', view_func = func) web.add_url_rule(route + ('' if static else '/'), endpoint = route if route else 'index', view_func = func)

13
couchpotato/core/downloaders/base.py

@ -1,9 +1,14 @@
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class Downloader(Plugin): class Downloader(Plugin):
type = []
def __init__(self): def __init__(self):
addEvent('download', self.download) addEvent('download', self.download)
@ -15,3 +20,11 @@ class Downloader(Plugin):
def isEnabled(self): def isEnabled(self):
return self.conf('enabled', True) return self.conf('enabled', True)
def isCorrectType(self, type):
is_correct = type in self.type
if not is_correct:
log.debug("Downloader doesn't support this type")
return bool

20
couchpotato/core/downloaders/blackhole/main.py

@ -1,8 +1,10 @@
from __future__ import with_statement from __future__ import with_statement
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import toSafeString from couchpotato.core.helpers.encoding import toSafeString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.downloaders.base import Downloader from inspect import isfunction
import os import os
import traceback
import urllib import urllib
log = CPLog(__name__) log = CPLog(__name__)
@ -21,16 +23,28 @@ class Blackhole(Downloader):
if not directory or not os.path.isdir(directory): if not directory or not os.path.isdir(directory):
log.error('No directory set for blackhole %s download.' % data.get('type')) log.error('No directory set for blackhole %s download.' % data.get('type'))
else: else:
fullPath = os.path.join(directory, toSafeString(data.get('name')) + '.' + data) fullPath = os.path.join(directory, toSafeString(data.get('name')) + '.' + data.get('type'))
try:
if not os.path.isfile(fullPath): if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.' % (data.get('type'), fullPath)) log.info('Downloading %s to %s.' % (data.get('type'), fullPath))
if isfunction(data.get('download')):
file = data.get('download')()
if not file:
log.debug('Failed download file: %s' % data.get('name'))
return False
else:
file = urllib.urlopen(data.get('url')).read() file = urllib.urlopen(data.get('url')).read()
with open(fullPath, 'wb') as f: with open(fullPath, 'wb') as f:
f.write(file) f.write(file)
return True return True
else: else:
log.error('File %s already exists.' % fullPath) log.info('File %s already exists.' % fullPath)
return True
except:
log.error('Failed to download to blackhole %s' % traceback.format_exc())
pass
return False return False

24
couchpotato/core/event.py

@ -43,24 +43,30 @@ def fireEvent(name, *args, **kwargs):
result = e(*args, **kwargs) result = e(*args, **kwargs)
if single and not merge: if single and not merge:
results = None
if result[0][0] == True and result[0][1]:
results = result[0][1] results = result[0][1]
elif result[0][1]:
errorHandler(result[0][1])
else: else:
results = [] results = []
for r in result: for r in result:
if r[0] == True: if r[0] == True and r[1]:
results.append(r[1]) results.append(r[1])
else: elif r[1]:
errorHandler(r[1]) errorHandler(r[1])
# Merge dict # Merge
if merge and type(results[0]) == dict: if merge and len(results) > 0:
# Dict
if type(results[0]) == dict:
merged = {} merged = {}
for result in results: for result in results:
merged = mergeDicts(merged, result) merged = mergeDicts(merged, result)
results = merged results = merged
# Merg lists # Lists
elif merge and type(results[0]) == list: elif type(results[0]) == list:
merged = [] merged = []
for result in results: for result in results:
merged += result merged += result
@ -68,10 +74,10 @@ def fireEvent(name, *args, **kwargs):
results = merged results = merged
return results return results
except KeyError: except KeyError, e:
pass pass
except Exception, e: except Exception:
log.error('%s: %s' % (name, e)) log.error('%s: %s' % (name, traceback.format_exc()))
def fireEventAsync(name, *args, **kwargs): def fireEventAsync(name, *args, **kwargs):
#log.debug('Async "%s": %s, %s' % (name, args, kwargs)) #log.debug('Async "%s": %s, %s' % (name, args, kwargs))

6
couchpotato/core/helpers/variable.py

@ -22,6 +22,12 @@ def mergeDicts(a, b):
current_dst[key] = current_src[key] current_dst[key] = current_src[key]
return dst return dst
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
def md5(text): def md5(text):
return hashlib.md5(text).hexdigest() return hashlib.md5(text).hexdigest()

7
couchpotato/core/loader.py

@ -2,6 +2,7 @@ from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
import glob import glob
import os import os
import traceback
log = CPLog(__name__) log = CPLog(__name__)
@ -49,7 +50,7 @@ class Loader:
self.loadPlugins(m, plugin.get('name')) self.loadPlugins(m, plugin.get('name'))
except Exception, e: except Exception, e:
log.error('Can\'t import %s: %s' % (module_name, e)) log.error('Can\'t import %s: %s' % (module_name, traceback.format_exc()))
if did_save: if did_save:
fireEvent('settings.save') fireEvent('settings.save')
@ -73,7 +74,7 @@ class Loader:
fireEvent('settings.register', section_name = section['name'], options = options, save = save) fireEvent('settings.register', section_name = section['name'], options = options, save = save)
return True return True
except Exception, e: except Exception, e:
log.debug("Failed loading settings for '%s': %s" % (name, e)) log.debug("Failed loading settings for '%s': %s" % (name, traceback.format_exc()))
return False return False
def loadPlugins(self, module, name): def loadPlugins(self, module, name):
@ -81,7 +82,7 @@ class Loader:
module.start() module.start()
return True return True
except Exception, e: except Exception, e:
log.error("Failed loading plugin '%s': %s" % (name, e)) log.error("Failed loading plugin '%s': %s" % (name, traceback.format_exc()))
return False return False
def addModule(self, priority, type, module, name): def addModule(self, priority, type, module, name):

6
couchpotato/core/plugins/base.py

@ -1,4 +1,4 @@
from couchpotato import addView from couchpotato import addView, get_session
from couchpotato.environment import Env from couchpotato.environment import Env
from flask.helpers import send_from_directory from flask.helpers import send_from_directory
import os.path import os.path
@ -7,8 +7,8 @@ import re
class Plugin(): class Plugin():
def conf(self, attr): def conf(self, attr, default = None):
return Env.setting(attr, self.getName().lower()) return Env.setting(attr, self.getName().lower(), default = default)
def getName(self): def getName(self):
return self.__class__.__name__ return self.__class__.__name__

15
couchpotato/core/plugins/file/main.py

@ -1,6 +1,7 @@
from couchpotato import get_session from couchpotato import get_session
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import md5, getExt from couchpotato.core.helpers.variable import md5, getExt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
@ -55,23 +56,24 @@ class FileManager(Plugin):
return False return False
def add(self, path = '', part = 1, type = (), properties = {}): def add(self, path = '', part = 1, type = (), available = 1, properties = {}):
db = get_session() db = get_session()
f = db.query(File).filter_by(path = path).first() f = db.query(File).filter_by(path = toUnicode(path)).first()
if not f: if not f:
f = File() f = File()
db.add(f) db.add(f)
f.path = path f.path = path
f.part = part f.part = part
f.available = available
f.type_id = self.getType(type).id f.type_id = self.getType(type).id
db.commit() db.commit()
db.expunge(f) file_dict = f.to_dict()
return f
return file_dict
def getType(self, type): def getType(self, type):
@ -100,7 +102,6 @@ class FileManager(Plugin):
types = [] types = []
for type in results: for type in results:
temp = type.to_dict() types.append(type.to_dict())
types.append(temp)
return types return types

8
couchpotato/core/plugins/file/static/file.js

@ -36,12 +36,8 @@ var FileSelect = new Class({
return file.type_id == File.Type.get(type).id; return file.type_id == File.Type.get(type).id;
}); });
if(single){ if(single)
results = new File(results.pop()); return new File(results.pop());
}
else {
}
return results; return results;

39
couchpotato/core/plugins/library/main.py

@ -2,7 +2,8 @@ from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, LibraryTitle from couchpotato.core.settings.model import Library, LibraryTitle, File
import traceback
log = CPLog(__name__) log = CPLog(__name__)
@ -12,17 +13,19 @@ class LibraryPlugin(Plugin):
addEvent('library.add', self.add) addEvent('library.add', self.add)
addEvent('library.update', self.update) addEvent('library.update', self.update)
def add(self, attrs = {}): def add(self, attrs = {}, update_after = True):
db = get_session() db = get_session()
l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first() l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first()
if not l: if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = Library( l = Library(
year = attrs.get('year'), year = attrs.get('year'),
identifier = attrs.get('identifier'), identifier = attrs.get('identifier'),
plot = attrs.get('plot'), plot = attrs.get('plot'),
tagline = attrs.get('tagline') tagline = attrs.get('tagline'),
status_id = status.get('id')
) )
title = LibraryTitle( title = LibraryTitle(
@ -35,26 +38,36 @@ class LibraryPlugin(Plugin):
db.commit() db.commit()
# Update library info # Update library info
fireEventAsync('library.update', library = l, default_title = attrs.get('title', '')) if update_after:
fireEventAsync('library.update', identifier = l.identifier, default_title = attrs.get('title', ''))
#db.remove() library_dict = l.to_dict()
return l
def update(self, library, default_title = ''): return library_dict
def update(self, identifier, default_title = '', force = False):
db = get_session() db = get_session()
library = db.query(Library).filter_by(identifier = library.identifier).first() library = db.query(Library).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
if library.status_id == done_status.get('id') and not force:
return
info = fireEvent('provider.movie.info', merge = True, identifier = library.identifier) info = fireEvent('provider.movie.info', merge = True, identifier = identifier)
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s' % identifier)
return
# Main info # Main info
library.plot = info.get('plot', '') library.plot = info.get('plot', '')
library.tagline = info.get('tagline', '') library.tagline = info.get('tagline', '')
library.year = info.get('year', 0) library.year = info.get('year', 0)
library.status_id = done_status.get('id')
# Titles # Titles
[db.delete(title) for title in library.titles] [db.delete(title) for title in library.titles]
titles = info.get('titles') titles = info.get('titles', [])
log.debug('Adding titles: %s' % titles) log.debug('Adding titles: %s' % titles)
for title in titles: for title in titles:
@ -67,15 +80,17 @@ class LibraryPlugin(Plugin):
db.commit() db.commit()
# Files # Files
images = info.get('images') images = info.get('images', [])
for type in images: for type in images:
for image in images[type]: for image in images[type]:
file_path = fireEvent('file.download', url = image, single = True) file_path = fireEvent('file.download', url = image, single = True)
file = fireEvent('file.add', path = file_path, type = ('image', type[:-1]), single = True) file = fireEvent('file.add', path = file_path, type = ('image', type[:-1]), single = True)
try: try:
file = db.query(File).filter_by(id = file.get('id')).one()
library.files.append(file) library.files.append(file)
db.commit() db.commit()
except: except:
log.debug('File already attached to library') pass
#log.debug('Failed to attach to library: %s' % traceback.format_exc())
fireEvent('library.update.after') fireEvent('library.update.after')

16
couchpotato/core/plugins/movie/main.py

@ -101,21 +101,23 @@ class MoviePlugin(Plugin):
m = db.query(Movie).filter_by(library_id = library.id).first() m = db.query(Movie).filter_by(library_id = library.id).first()
if not m: if not m:
m = Movie( m = Movie(
library_id = library.id, library_id = library.get('id'),
profile_id = params.get('profile_id') profile_id = params.get('profile_id')
) )
db.add(m) db.add(m)
m.status_id = status.id m.status_id = status.get('id')
db.commit() db.commit()
return jsonified({ movie_dict = m.to_dict(deep = {
'success': True,
'added': True,
'movie': m.to_dict(deep = {
'releases': {'status': {}, 'quality': {}}, 'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}} 'library': {'titles': {}}
}) })
return jsonified({
'success': True,
'added': True,
'movie': movie_dict,
}) })
def edit(self): def edit(self):
@ -129,7 +131,7 @@ class MoviePlugin(Plugin):
status = fireEvent('status.add', 'deleted', single = True) status = fireEvent('status.add', 'deleted', single = True)
movie = db.query(Movie).filter_by(id = params.get('id')).first() movie = db.query(Movie).filter_by(id = params.get('id')).first()
movie.status_id = status.id movie.status_id = status.get('id')
db.commit() db.commit()
return jsonified({ return jsonified({

4
couchpotato/core/plugins/profile/main.py

@ -60,9 +60,11 @@ class ProfilePlugin(Plugin):
db.commit() db.commit()
profile_dict = p.to_dict(deep = {'types': {}})
return jsonified({ return jsonified({
'success': True, 'success': True,
'profile': p.to_dict(deep = {'types': {}}) 'profile': profile_dict
}) })
def delete(self): def delete(self):

3
couchpotato/core/plugins/quality/main.py

@ -52,8 +52,9 @@ class QualityPlugin(Plugin):
db = get_session() db = get_session()
quality = db.query(Quality).filter_by(identifier = identifier).first() quality = db.query(Quality).filter_by(identifier = identifier).first()
quality_dict = dict(self.getQuality(quality.identifier), **quality.to_dict())
return dict(self.getQuality(quality.identifier), **quality.to_dict()) return quality_dict
def getQuality(self, identifier): def getQuality(self, identifier):

4
couchpotato/core/plugins/renamer/main.py

@ -13,7 +13,7 @@ class Renamer(Plugin):
addEvent('renamer.scan', self.scan) addEvent('renamer.scan', self.scan)
addEvent('app.load', self.scan) addEvent('app.load', self.scan)
fireEvent('schedule.interval', 'renamer.scan', self.scan, minutes = self.conf('run_every')) #fireEvent('schedule.interval', 'renamer.scan', self.scan, minutes = self.conf('run_every'))
def scan(self): def scan(self):
print 'scan' pass

6
couchpotato/core/plugins/scanner/__init__.py

@ -0,0 +1,6 @@
from .main import Scanner
def start():
return Scanner()
config = []

477
couchpotato/core/plugins/scanner/main.py

@ -0,0 +1,477 @@
from couchpotato import get_session
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import File, Library, Release, Movie
from couchpotato.environment import Env
from flask.helpers import json
from themoviedb.tmdb import opensubtitleHashFile
import os
import re
import subprocess
import traceback
log = CPLog(__name__)
class Scanner(Plugin):
minimal_filesize = {
'media': 314572800, # 300MB
'trailer': 1048576, # 1MB
}
ignored_in_path = ['_unpack', '_failed_', '_unknown_', '_exists_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo'] #unpacking, smb-crap, hidden files
ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films']
extensions = {
'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img'],
'dvd': ['vts_*', 'vob'],
'nfo': ['nfo', 'txt', 'tag'],
'subtitle': ['sub', 'srt', 'ssa', 'ass'],
'subtitle_extra': ['idx'],
'trailer': ['mov', 'mp4', 'flv']
}
file_types = {
'subtitle': ('subtitle', 'subtitle'),
'trailer': ('video', 'trailer'),
'nfo': ('nfo', 'nfo'),
'movie': ('video', 'movie'),
'backdrop': ('image', 'backdrop'),
}
codecs = {
'audio': ['dts', 'ac3', 'ac3d', 'mp3'],
'video': ['x264', 'divx', 'xvid']
}
source_media = {
'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'],
'hddvd': ['hddvd', 'hd-dvd'],
'dvd': ['dvd'],
'hdtv': ['hdtv']
}
clean = '(?i)[^\s](ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])[^\s]*'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
'[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1.mkv
'[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1.mkv
'()[ _\.-]+([0-9]*[abcd]+)(\.....?)$',
'([a-z])([0-9]+)(\.....?)$',
'()([ab])(\.....?)$' #*a.mkv
]
def __init__(self):
addEvent('app.load', self.scan)
def scan(self, folder = '/Volumes/Media/Test/'):
"""
Get all files
For each file larger then 350MB
create movie "group", this is where all movie files will be grouped
group multipart together
check if its DVD (VIDEO_TS)
# This should work for non-folder based structure
for each moviegroup
for each file smaller then 350MB, allfiles.filter(moviename*)
# Assuming the beginning of the filename is the same for this structure
Movie is masterfile, moviename-cd1.ext -> moviename
Find other files connected to moviename, moviename*.nfo, moviename*.sub, moviename*trailer.ext
Remove found file from allfiles
# This should work for folder based structure
for each leftover file
Loop over leftover files, use dirname as moviename
For each found movie
determine filetype
Check if it's already in the db
Add it to database
"""
# Get movie "master" files
movie_files = {}
leftovers = []
for root, dirs, files in os.walk(folder):
for filename in files:
file_path = os.path.join(root, filename)
# Remove ignored files
if not self.keepFile(file_path):
continue
is_dvd_file = self.isDVDFile(file_path)
if os.path.getsize(file_path) > self.minimal_filesize['media'] or is_dvd_file: # Minimal 300MB files or is DVD file
identifier = self.createFileIdentifier(file_path, folder, exclude_filename = is_dvd_file)
if not movie_files.get(identifier):
movie_files[identifier] = {
'unsorted_files': [],
'identifiers': [],
'is_dvd': is_dvd_file,
}
movie_files[identifier]['unsorted_files'].append(file_path)
else:
leftovers.append(file_path)
# Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2"
# files will be grouped first.
leftovers = set(sorted(leftovers, reverse = True))
id_handles = [
None, # Attach files to group by identifier
lambda x: os.path.split(x)[-1], # Attach files via filename of master_file name only
os.path.dirname, # Attach files via master_file dirname
]
# Create identifier based on handle
for handler in id_handles:
for identifier, group in movie_files.iteritems():
identifier = handler(identifier) if handler else identifier
if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier)
# Group the files based on the identifier
found_files = self.getGroupFiles(identifier, folder, leftovers)
group['unsorted_files'].extend(found_files)
# Remove the found files from the leftover stack
leftovers = leftovers - found_files
# Open up the db
db = get_session()
# Mark all files as "offline" before a adding them to the database (again)
files_in_path = db.query(File).filter(File.path.like(toUnicode(folder) + u'%%'))
files_in_path.update({'available': 0}, synchronize_session = False)
db.commit()
# Determine file types
update_after = []
for identifier, group in movie_files.iteritems():
# Group extra (and easy) files first
images = self.getImages(group['unsorted_files'])
group['files'] = {
'subtitle': self.getSubtitles(group['unsorted_files']),
'nfo': self.getNfo(group['unsorted_files']),
'trailer': self.getTrailers(group['unsorted_files']),
'backdrop': images['backdrop'],
'leftover': set(group['unsorted_files']),
}
# Media files
if group['is_dvd']:
group['files']['movie'] = self.getDVDFiles(group['unsorted_files'])
else:
group['files']['movie'] = self.getMediaFiles(group['unsorted_files'])
group['meta_data'] = self.getMetaData(group['files']['movie'])
# Leftover "sorted" files
for type in group['files']:
group['files']['leftover'] -= set(group['files'][type])
# Delete the unsorted list
del group['unsorted_files']
# Determine movie
group['library'] = self.determineMovie(group)
# Save to DB
if group['library']:
#library = db.query(Library).filter_by(id = library.get('id')).one()
# Add release
release = self.addRelease(group)
return
# Add identifier for library update
update_after.append(group['library'].get('identifier'))
for identifier in update_after:
fireEvent('library.update', identifier = identifier)
# If cleanup option is enabled, remove offline files from database
if self.conf('cleanup_offline'):
files_in_path = db.query(File).filter(File.path.like(folder + '%%')).filter_by(available = 0)
[db.delete(x) for x in files_in_path]
db.commit()
db.remove()
def addRelease(self, group):
db = get_session()
identifier = '%s.%s.%s' % (group['library']['identifier'], group['meta_data']['audio'], group['meta_data']['quality'])
# Add movie
done_status = fireEvent('status.get', 'done', single = True)
movie = db.query(Movie).filter_by(library_id = group['library'].get('id')).first()
if not movie:
movie = Movie(
library_id = group['library'].get('id'),
profile_id = 0,
status_id = done_status.get('id')
)
db.add(movie)
db.commit()
# Add release
quality = fireEvent('quality.single', group['meta_data']['quality'], single = True)
release = db.query(Release).filter_by(identifier = identifier).first()
if not release:
release = Release(
identifier = identifier,
movie = movie,
quality_id = quality.get('id'),
status_id = done_status.get('id')
)
db.add(release)
db.commit()
# Add each file type
for type in group['files']:
for file in group['files'][type]:
added_file = self.saveFile(file, type = type, include_media_info = type is 'movie')
try:
added_file = db.query(File).filter_by(id = added_file.get('id')).one()
release.files.append(added_file)
db.commit()
except Exception, e:
log.debug('Failed to attach "%s" to release: %s' % (file, e))
db.remove()
def getMetaData(self, files):
return {
'audio': 'AC3',
'quality': '720p',
'quality_type': 'HD',
'resolution_width': 1280,
'resolution_height': 720
}
for file in files:
self.getMeta(file)
def getMeta(self, filename):
lib_dir = os.path.join(Env.get('app_dir'), 'libs')
script = os.path.join(lib_dir, 'getmeta.py')
p = subprocess.Popen(["python", script, filename], stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = lib_dir)
z = p.communicate()[0]
try:
meta = json.loads(z)
log.info('Retrieved metainfo: %s' % meta)
return meta
except Exception, e:
print e
log.error('Couldn\'t get metadata from file')
def determineMovie(self, group):
imdb_id = None
files = group['files']
# Check and see if nfo contains the imdb-id
try:
for nfo_file in files['nfo']:
imdb_id = self.getImdb(nfo_file)
if imdb_id: break
except:
pass
# Check if path is already in db
db = get_session()
for file in files['movie']:
f = db.query(File).filter_by(path = toUnicode(file)).first()
try:
imdb_id = f.library[0].identifier
break
except:
pass
db.remove()
# Search based on identifiers
if not imdb_id:
for identifier in group['identifiers']:
if len(identifier) > 2:
movie = fireEvent('provider.movie.search', q = identifier, merge = True, limit = 1)
if len(movie) > 0:
imdb_id = movie[0]['imdb']
if imdb_id: break
else:
log.debug('Identifier to short to use for search: %s' % identifier)
if imdb_id:
#movie = fireEvent('provider.movie.info', identifier = imdb_id, merge = True)
#if movie and movie.get('imdb'):
return fireEvent('library.add', attrs = {
'identifier': imdb_id
}, update_after = False, single = True)
log.error('No imdb_id found for %s.' % group['identifiers'])
return False
def saveFile(self, file, type = 'unknown', include_media_info = False):
properties = {}
# Get media info for files
if include_media_info:
properties = {}
# Check database and update/insert if necessary
return fireEvent('file.add', path = file, part = self.getPartNumber(file), type = self.file_types[type], properties = properties, single = True)
def getImdb(self, txt):
if os.path.isfile(txt):
output = open(txt, 'r')
txt = output.read()
output.close()
try:
m = re.search('(?P<id>tt[0-9{7}]+)', txt)
id = m.group('id')
if id: return id
except AttributeError:
pass
return False
def getMediaFiles(self, files):
def test(s):
return self.filesizeBetween(s, 300, 100000) and getExt(s.lower()) in self.extensions['movie']
return set(filter(test, files))
def getDVDFiles(self, files):
def test(s):
return self.isDVDFile(s)
return set(filter(test, files))
def getSubtitles(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files))
def getNfo(self, files):
return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files))
def getTrailers(self, files):
def test(s):
return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, 2, 250)
return set(filter(test, files))
def getImages(self, files):
def test(s):
return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn']
files = set(filter(test, files))
images = {}
# Fanart
images['backdrop'] = set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files))
# Rest
images['rest'] = files - images['backdrop']
return images
def isDVDFile(self, file):
if list(set(file.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])):
return True
for needle in ['vts_', 'video_ts', 'audio_ts']:
if needle in file.lower():
return True
return False
def keepFile(self, file):
# ignoredpaths
for i in self.ignored_in_path:
if i in file.lower():
log.debug('Ignored "%s" contains "%s".' % (file, i))
return False
# Sample file
if re.search('(^|[\W_])sample\d*[\W_]', file.lower()):
log.debug('Is sample file "%s".' % file)
return False
# Minimal size
if self.filesizeBetween(file, self.minimal_filesize['media']):
log.debug('File to small: %s' % file)
return False
# All is OK
return True
def filesizeBetween(self, file, min = 0, max = 100000):
try:
return (min * 1048576) < os.path.getsize(file) < (max * 1048576)
except:
log.error('Couldn\'t get filesize of %s.' % file)
return False
def getGroupFiles(self, identifier, folder, file_pile):
return set(filter(lambda s:identifier in self.createFileIdentifier(s, folder), file_pile))
def createFileIdentifier(self, file_path, folder, exclude_filename = False):
identifier = file_path.replace(folder, '') # root folder
identifier = os.path.splitext(identifier)[0] # ext
if exclude_filename:
identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])]
identifier = self.removeMultipart(identifier) # multipart
return identifier
def removeMultipart(self, name):
for regex in self.multipart_regex:
try:
found = re.sub(regex, '', name)
if found != name:
return found
except:
pass
return name
def getPartNumber(self, name):
for regex in self.multipart_regex:
try:
found = re.search(regex, name)
if found:
return found.group(1)
return 1
except:
pass
return name

43
couchpotato/core/plugins/searcher/main.py

@ -19,6 +19,7 @@ class Searcher(Plugin):
# Schedule cronjob # Schedule cronjob
fireEvent('schedule.cron', 'searcher.all', self.all, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute')) fireEvent('schedule.cron', 'searcher.all', self.all, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
addEvent('app.load', self.all)
def all(self): def all(self):
@ -28,22 +29,49 @@ class Searcher(Plugin):
Movie.status.has(identifier = 'active') Movie.status.has(identifier = 'active')
).all() ).all()
snatched_status = fireEvent('status.get', 'snatched', single = True)
for movie in movies: for movie in movies:
self.single(movie.to_dict(deep = { success = self.single(movie.to_dict(deep = {
'profile': {'types': {'quality': {}}}, 'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}}, 'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}}, 'library': {'titles': {}, 'files':{}},
'files': {} 'files': {}
})) }))
# Mark as snatched on success
if success:
movie.status_id = snatched_status.get('id')
db.commit()
def single(self, movie): def single(self, movie):
successful = False
for type in movie['profile']['types']: for type in movie['profile']['types']:
has_better_quality = False
# See if beter quality is available
for release in movie['releases']:
if release['quality']['order'] <= type['quality']['order']:
has_better_quality = True
# Don't search for quality lower then already available.
if not has_better_quality:
log.info('Search for %s in %s' % (movie['library']['titles'][0]['title'], type['quality']['label']))
results = fireEvent('provider.yarr.search', movie, type['quality'], merge = True) results = fireEvent('provider.yarr.search', movie, type['quality'], merge = True)
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True) sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
for nzb in sorted_results: for nzb in sorted_results:
print nzb['name'] successful = fireEvent('download', data = nzb, single = True)
if successful:
log.info('Downloading of %s successful.' % nzb.get('name'))
return True
return False
def correctMovie(self, nzb = {}, movie = {}, quality = {}, **kwargs): def correctMovie(self, nzb = {}, movie = {}, quality = {}, **kwargs):
@ -53,7 +81,7 @@ class Searcher(Plugin):
retention = Env.setting('retention', section = 'nzb') retention = Env.setting('retention', section = 'nzb')
if retention < nzb.get('age', 0): if retention < nzb.get('age', 0):
log.info('Wrong: Outside retention, age = %s, needs = %s: %s' % (nzb['age'], retention, nzb['name'])) log.info('Wrong: Outside retention, age is %s, needs %s or lower: %s' % (nzb['age'], retention, nzb['name']))
return False return False
nzb_words = re.split('\W+', simplifyString(nzb['name'])) nzb_words = re.split('\W+', simplifyString(nzb['name']))
@ -66,7 +94,7 @@ class Searcher(Plugin):
ignored_words = self.conf('ignored_words').split(',') ignored_words = self.conf('ignored_words').split(',')
blacklisted = list(set(nzb_words) & set(ignored_words)) blacklisted = list(set(nzb_words) & set(ignored_words))
if self.conf('ignored_words') and blacklisted: if self.conf('ignored_words') and blacklisted:
log.info("NZB '%s' contains the following blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted))) log.info("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted)))
return False return False
#qualities = fireEvent('quality.all', single = True) #qualities = fireEvent('quality.all', single = True)
@ -154,3 +182,10 @@ class Searcher(Plugin):
return True return True
return False return False
def correctName(self, check_name, movie_name):
check_words = re.split('\W+', simplifyString(check_name))
movie_words = re.split('\W+', simplifyString(movie_name))
return len(list(set(check_words) & set(movie_words))) == len(movie_words)

9
couchpotato/core/plugins/status/main.py

@ -11,15 +11,18 @@ log = CPLog(__name__)
class StatusPlugin(Plugin): class StatusPlugin(Plugin):
statuses = { statuses = {
'needs_update': 'Needs update',
'active': 'Active', 'active': 'Active',
'done': 'Done', 'done': 'Done',
'downloaded': 'Downloaded', 'downloaded': 'Downloaded',
'wanted': 'Wanted', 'wanted': 'Wanted',
'snatched': 'Snatched',
'deleted': 'Deleted', 'deleted': 'Deleted',
} }
def __init__(self): def __init__(self):
addEvent('status.add', self.add) addEvent('status.add', self.add)
addEvent('status.get', self.add) # Alias for .add
addEvent('status.all', self.all) addEvent('status.all', self.all)
addEvent('app.load', self.fill) addEvent('app.load', self.fill)
@ -52,8 +55,9 @@ class StatusPlugin(Plugin):
db.add(s) db.add(s)
db.commit() db.commit()
#db.remove() status_dict = s.to_dict()
return s
return status_dict
def fill(self): def fill(self):
@ -71,3 +75,4 @@ class StatusPlugin(Plugin):
s.label = label s.label = label
db.commit() db.commit()

3
couchpotato/core/providers/base.py

@ -53,6 +53,7 @@ class Provider(Plugin):
self.wait() self.wait()
try: try:
log.info('Opening url: %s' % url)
if username and password: if username and password:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm() passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, username, password) passman.add_password(None, url, username, password)
@ -120,7 +121,7 @@ class YarrProvider(Provider):
return False return False
def found(self, new): def found(self, new):
log.info('Found, score(%(score)s): %(name)s' % new) log.info('Found: score(%(score)s): %(name)s' % new)
class NZBProvider(YarrProvider): class NZBProvider(YarrProvider):

23
couchpotato/core/providers/movie/themoviedb/main.py

@ -27,8 +27,13 @@ class TheMovieDb(MovieProvider):
if self.isDisabled(): if self.isDisabled():
return False return False
search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
if not results:
log.debug('TheMovieDB - Searching for movie: %s' % q) log.debug('TheMovieDB - Searching for movie: %s' % q)
raw = tmdb.search(simplifyString(q)) raw = tmdb.search(search_string)
results = [] results = []
if raw: if raw:
@ -43,6 +48,7 @@ class TheMovieDb(MovieProvider):
break break
log.info('TheMovieDB - Found: %s' % [result['titles'][0] + ' (' + str(result['year']) + ')' for result in results]) log.info('TheMovieDB - Found: %s' % [result['titles'][0] + ' (' + str(result['year']) + ')' for result in results])
self.setCache(cache_key, results)
return results return results
except SyntaxError, e: except SyntaxError, e:
log.error('Failed to parse XML response: %s' % e) log.error('Failed to parse XML response: %s' % e)
@ -51,12 +57,23 @@ class TheMovieDb(MovieProvider):
return results return results
def getInfo(self, identifier = None): def getInfo(self, identifier = None):
cache_key = 'tmdb.cache.%s' % identifier
result = self.getCache(cache_key)
if not result:
result = {} result = {}
movie = None
movie = tmdb.imdbLookup(id = identifier)[0] try:
log.debug('Getting info: %s' % cache_key)
movie = tmdb.imdbLookup(id = identifier)
except:
pass
if movie: if movie:
result = self.parseMovie(movie) result = self.parseMovie(movie[0])
self.setCache(cache_key, result)
return result return result

138
couchpotato/core/providers/nzb/newzbin/main.py

@ -1,18 +1,25 @@
from couchpotato.core.event import addEvent
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import NZBProvider from couchpotato.core.providers.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse from dateutil.parser import parse
from urllib import urlencode from urllib import urlencode
from urllib2 import URLError from urllib2 import URLError
import httplib
import time import time
import traceback
import urllib
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__) log = CPLog(__name__)
class Newzbin(NZBProvider): class Newzbin(NZBProvider, RSS):
searchUrl = 'https://www.newzbin.com/search/' searchUrl = 'https://www.newzbin.com/search/'
formatIds = { format_ids = {
2: ['scr'], 2: ['scr'],
1: ['cam'], 1: ['cam'],
4: ['tc'], 4: ['tc'],
@ -37,40 +44,32 @@ class Newzbin(NZBProvider):
if self.isDisabled() or not self.isAvailable(self.searchUrl): if self.isDisabled() or not self.isAvailable(self.searchUrl):
return results return results
formatId = self.getFormatId(type) format_id = self.getFormatId(type)
catId = self.getCatId(type) cat_id = self.getCatId(type)
arguments = urlencode({ arguments = urlencode({
'searchaction': 'Search', 'searchaction': 'Search',
'u_url_posts_only': '0', 'u_url_posts_only': '0',
'u_show_passworded': '0', 'u_show_passworded': '0',
'q_url': 'imdb.com/title/' + movie.imdb, 'q_url': 'imdb.com/title/' + movie['library']['identifier'],
'sort': 'ps_totalsize', 'sort': 'ps_totalsize',
'order': 'asc', 'order': 'asc',
'u_post_results_amt': '100', 'u_post_results_amt': '100',
'feed': 'rss', 'feed': 'rss',
'category': '6', 'category': '6',
'ps_rb_video_format': str(catId), 'ps_rb_video_format': str(cat_id),
'ps_rb_source': str(formatId), 'ps_rb_source': str(format_id),
}) })
url = "%s?%s" % (self.searchUrl, arguments) url = "%s?%s" % (self.searchUrl, arguments)
cacheId = str('%s %s %s' % (movie.imdb, str(formatId), str(catId))) cache_key = str('newzbin.%s.%s.%s' % (movie['library']['identifier'], str(format_id), str(cat_id)))
singleCat = True single_cat = True
try: try:
cached = False data = self.getCache(cache_key)
if(self.cache.get(cacheId)): if not data:
data = True
cached = True
log.info('Getting RSS from cache: %s.' % cacheId)
else:
log.info('Searching: %s' % url)
data = self.urlopen(url, username = self.conf('username'), password = self.conf('password')) data = self.urlopen(url, username = self.conf('username'), password = self.conf('password'))
self.cache[cacheId] = { self.setCache(cache_key, data)
'time': time.time()
}
except (IOError, URLError): except (IOError, URLError):
log.error('Failed to open %s.' % url) log.error('Failed to open %s.' % url)
return results return results
@ -78,46 +77,47 @@ class Newzbin(NZBProvider):
if data: if data:
try: try:
try: try:
if cached: data = XMLTree.fromstring(data)
xml = self.cache[cacheId]['xml'] nzbs = self.getElements(data, 'channel/item')
else: except Exception, e:
xml = self.getItems(data) log.debug('%s, %s' % (self.getName(), e))
self.cache[cacheId]['xml'] = xml
except:
log.debug('No valid xml or to many requests.. You never know with %s.' % self.name)
return results return results
for item in xml: for nzb in nzbs:
title = self.gettextelement(item, "title") title = self.getTextElement(nzb, "title")
if 'error' in title.lower(): continue if 'error' in title.lower(): continue
REPORT_NS = 'http://www.newzbin.com/DTD/2007/feeds/report/'; REPORT_NS = 'http://www.newzbin.com/DTD/2007/feeds/report/';
# Add attributes to name # Add attributes to name
for attr in item.find('{%s}attributes' % REPORT_NS): for attr in nzb.find('{%s}attributes' % REPORT_NS):
title += ' ' + attr.text title += ' ' + attr.text
id = int(self.gettextelement(item, '{%s}id' % REPORT_NS)) id = int(self.getTextElement(nzb, '{%s}id' % REPORT_NS))
size = str(int(self.gettextelement(item, '{%s}size' % REPORT_NS)) / 1024 / 1024) + ' mb' size = str(int(self.getTextElement(nzb, '{%s}size' % REPORT_NS)) / 1024 / 1024) + ' mb'
date = str(self.gettextelement(item, '{%s}postdate' % REPORT_NS)) date = str(self.getTextElement(nzb, '{%s}postdate' % REPORT_NS))
new = self.feedItem() new = {
new.id = id 'id': id,
new.type = 'nzb' 'type': 'nzb',
new.name = title 'name': title,
new.date = int(time.mktime(parse(date).timetuple())) 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
new.size = self.parseSize(size) 'size': self.parseSize(size),
new.url = str(self.gettextelement(item, '{%s}nzb' % REPORT_NS)) 'url': str(self.getTextElement(nzb, '{%s}nzb' % REPORT_NS)),
new.detailUrl = str(self.gettextelement(item, 'link')) 'download': lambda: self.download(id),
new.content = self.gettextelement(item, "description") 'detail_url': str(self.getTextElement(nzb, 'link')),
new.score = self.calcScore(new, movie) 'description': self.getTextElement(nzb, "description"),
new.addbyid = True 'check_nzb': False,
new.checkNZB = False }
new['score'] = fireEvent('score.calculate', new, movie, single = True)
if new.date > time.time() - (int(self.config.get('NZB', 'retention')) * 24 * 60 * 60) and self.isCorrectMovie(new, movie, type, imdbResults = True, singleCategory = singleCat):
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = True, single_category = single_cat, single = True)
if is_correct_movie:
results.append(new) results.append(new)
log.info('Found: %s' % new.name) self.found(new)
return results return results
except SyntaxError: except SyntaxError:
@ -125,13 +125,47 @@ class Newzbin(NZBProvider):
return results return results
def download(self, nzb_id):
try:
conn = httplib.HTTPSConnection('www.newzbin.com')
postdata = { 'username': self.conf('username'), 'password': self.conf('password'), 'reportid': nzb_id }
postdata = urllib.urlencode(postdata)
headers = {
'User-agent': 'CouchPotato+/%s' % Env.get('version'),
'Content-type': 'application/x-www-form-urlencoded',
}
fetchurl = '/api/dnzb/'
conn.request('POST', fetchurl, postdata, headers)
response = conn.getresponse()
# Save debug info if we have to
data = response.read()
except:
log.error('Problem with Newzbin server: %s' % traceback.format_exc())
return False
# Is a valid response
return_code = response.getheader('X-DNZB-RCode')
return_text = response.getheader('X-DNZB-RText')
if return_code is not '200':
log.error('Error getting nzb from Newzbin: %s, %s' % (return_code, return_text))
return False
return data
def getFormatId(self, format): def getFormatId(self, format):
for id, quality in self.formatIds.iteritems(): for id, quality in self.format_ids.iteritems():
for q in quality: for q in quality:
if q == format: if q == format:
return id return id
return self.catBackupId return self.cat_backup_id
def isEnabled(self): def isEnabled(self):
return NZBProvider.isEnabled(self) and self.conf('enabled') and self.conf('username') and self.conf('password') return NZBProvider.isEnabled(self) and self.conf('enabled') and self.conf('username') and self.conf('password')

100
couchpotato/core/providers/nzb/nzbs/main.py

@ -1,15 +1,18 @@
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import NZBProvider from couchpotato.core.providers.base import NZBProvider
from dateutil.parser import parse from dateutil.parser import parse
from urllib import urlencode from urllib import urlencode
from urllib2 import URLError from urllib2 import URLError
import time import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__) log = CPLog(__name__)
class Nzbs(NZBProvider): class Nzbs(NZBProvider, RSS):
urls = { urls = {
'download': 'http://nzbs.org/index.php?action=getnzb&nzbid=%s%s', 'download': 'http://nzbs.org/index.php?action=getnzb&nzbid=%s%s',
@ -34,88 +37,71 @@ class Nzbs(NZBProvider):
def search(self, movie, quality): def search(self, movie, quality):
results = [] results = []
if self.isDisabled() or not self.isAvailable(self.apiUrl + '?test' + self.getApiExt()): if self.isDisabled() or not self.isAvailable(self.urls['api'] + '?test' + self.getApiExt()):
return results return results
catId = self.getCatId(type) cat_id = self.getCatId(quality.get('identifier'))
arguments = urlencode({ arguments = urlencode({
'action':'search', 'action':'search',
'q': self.toSearchString(movie.name), 'q': simplifyString(movie['library']['titles'][0]['title']),
'catid': catId, 'catid': cat_id[0],
'i': self.conf('id'), 'i': self.conf('id'),
'h': self.conf('key'), 'h': self.conf('api_key'),
'age': self.config.get('NZB', 'retention')
}) })
url = "%s?%s" % (self.apiUrl, arguments) url = "%s?%s" % (self.urls['api'], arguments)
cacheId = str(movie.imdb) + '-' + str(catId)
singleCat = (len(self.catIds.get(catId)) == 1 and catId != self.catBackupId) cache_key = '%s-%s' % (movie['library'].get('identifier'), str(cat_id))
try: try:
cached = False data = self.getCache(cache_key)
if(self.cache.get(cacheId)): if not data:
data = True
cached = True
log.info('Getting RSS from cache: %s.' % cacheId)
else:
log.info('Searching: %s' % url)
data = self.urlopen(url) data = self.urlopen(url)
self.cache[cacheId] = { self.setCache(cache_key, data)
'time': time.time()
}
except (IOError, URLError): except (IOError, URLError):
log.error('Failed to open %s.' % url) log.error('Failed to open %s.' % url)
return results return results
if data: if data:
log.debug('Parsing NZBs.org RSS.')
try: try:
try: try:
if cached: data = XMLTree.fromstring(data)
xml = self.cache[cacheId]['xml'] nzbs = self.getElements(data, 'channel/item')
else: except Exception, e:
xml = self.getItems(data) log.debug('%s, %s' % (self.getName(), e))
self.cache[cacheId]['xml'] = xml
except:
retry = False
if retry == False:
log.error('No valid xml, to many requests? Try again in 15sec.')
time.sleep(15)
return self.find(movie, quality, type, retry = True)
else:
log.error('Failed again.. disable %s for 15min.' % self.name)
self.available = False
return results return results
for nzb in xml: for nzb in nzbs:
id = int(self.gettextelement(nzb, "link").partition('nzbid=')[2]) new = {
'id': int(self.getTextElement(nzb, "link").partition('nzbid=')[2]),
size = self.gettextelement(nzb, "description").split('</a><br />')[1].split('">')[1] 'type': 'nzb',
'name': self.getTextElement(nzb, "title"),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
'size': self.parseSize(self.getTextElement(nzb, "description").split('</a><br />')[1].split('">')[1]),
'url': self.urls['download'] % (id, self.getApiExt()),
'detail_url': self.urls['detail'] % id,
'description': self.getTextElement(nzb, "description"),
'check_nzb': True,
}
new['score'] = fireEvent('score.calculate', new, movie, single = True)
new = self.feedItem() is_correct_movie = fireEvent('searcher.correct_movie',
new.id = id nzb = new, movie = movie, quality = quality,
new.type = 'nzb' imdb_results = False, single_category = False, single = True)
new.name = self.gettextelement(nzb, "title")
new.date = int(time.mktime(parse(self.gettextelement(nzb, "pubDate")).timetuple()))
new.size = self.parseSize(size)
new.url = self.downloadLink(id)
new.detailUrl = self.detailLink(id)
new.content = self.gettextelement(nzb, "description")
new.score = self.calcScore(new, movie)
if self.isCorrectMovie(new, movie, type, singleCategory = singleCat): if is_correct_movie:
results.append(new) results.append(new)
log.info('Found: %s' % new.name) self.found(new)
return results return results
except SyntaxError: except SyntaxError:
log.error('Failed to parse XML response from NZBs.org') log.error('Failed to parse XML response from NZBMatrix.com')
return False
return results return results
def isEnabled(self): def isEnabled(self):
return NZBProvider.isEnabled(self) and self.conf('enabled') and self.conf('id') and self.conf('key') return NZBProvider.isEnabled(self) and self.conf('enabled') and self.conf('id') and self.conf('api_key')
def getApiExt(self): def getApiExt(self):
return '&i=%s&h=%s' % (self.conf('id'), self.conf('key')) return '&i=%s&h=%s' % (self.conf('id'), self.conf('api_key'))

24
couchpotato/core/settings/model.py

@ -43,7 +43,7 @@ class Library(Entity):
status = ManyToOne('Status') status = ManyToOne('Status')
movie = OneToMany('Movie') movie = OneToMany('Movie')
titles = OneToMany('LibraryTitle') titles = OneToMany('LibraryTitle', order_by = '-default')
files = ManyToMany('File') files = ManyToMany('File')
@ -70,11 +70,23 @@ class Release(Entity):
"""Logically groups all files that belong to a certain release, such as """Logically groups all files that belong to a certain release, such as
parts of a movie, subtitles.""" parts of a movie, subtitles."""
identifier = Field(String(100))
movie = ManyToOne('Movie') movie = ManyToOne('Movie')
status = ManyToOne('Status') status = ManyToOne('Status')
quality = ManyToOne('Quality') quality = ManyToOne('Quality')
files = ManyToMany('File') files = ManyToMany('File')
history = OneToMany('History') history = OneToMany('History')
info = OneToMany('ReleaseInfo')
class ReleaseInfo(Entity):
"""Properties that can be bound to a file for off-line usage"""
identifier = Field(String(50))
value = Field(Unicode(255), nullable = False)
release = ManyToOne('Release')
class Status(Entity): class Status(Entity):
@ -132,6 +144,7 @@ class File(Entity):
path = Field(Unicode(255), nullable = False, unique = True) path = Field(Unicode(255), nullable = False, unique = True)
part = Field(Integer, default = 1) part = Field(Integer, default = 1)
available = Field(Boolean)
type = ManyToOne('FileType') type = ManyToOne('FileType')
properties = OneToMany('FileProperty') properties = OneToMany('FileProperty')
@ -178,8 +191,15 @@ class RenameHistory(Entity):
file = ManyToOne('File') file = ManyToOne('File')
class Folder(Entity):
"""Renamer destination folders."""
path = Field(Unicode(255))
label = Field(Unicode(255))
def setup(): def setup():
""" Setup the database and create the tables that don't exists yet """ """Setup the database and create the tables that don't exists yet"""
from elixir import setup_all, create_all from elixir import setup_all, create_all
from couchpotato import get_engine from couchpotato import get_engine

1
couchpotato/environment.py

@ -12,6 +12,7 @@ class Env:
_args = None _args = None
_quiet = False _quiet = False
_deamonize = False _deamonize = False
_version = 0.5
''' Data paths and directories ''' ''' Data paths and directories '''
_app_dir = "" _app_dir = ""

2
libs/axl/axel.py

@ -179,7 +179,7 @@ class Event(object):
if not self.asynchronous: if not self.asynchronous:
self.result.append(tuple(r)) self.result.append(tuple(r))
except Exception, e: except Exception:
if not self.asynchronous: if not self.asynchronous:
self.result.append((False, self._error(sys.exc_info()), self.result.append((False, self._error(sys.exc_info()),
handler)) handler))

9
libs/getmeta.py

@ -1,11 +1,10 @@
from hachoir_parser import createParser from flask.helpers import json
from hachoir_metadata import extractMetadata
from hachoir_core.cmd_line import unicodeFilename from hachoir_core.cmd_line import unicodeFilename
from hachoir_metadata import extractMetadata
from hachoir_parser import createParser
import datetime import datetime
import json
import sys
import re import re
import sys
def getMetadata(filename): def getMetadata(filename):

Loading…
Cancel
Save