Browse Source

Merge branch 'tv' of https://github.com/RuudBurger/CouchPotatoServer into tv_xem

pull/2290/head
Jason Mehring 12 years ago
parent
commit
132f4882e5
  1. 2
      couchpotato/__init__.py
  2. 6
      couchpotato/api.py
  3. 2
      couchpotato/core/_base/_core/main.py
  4. 49
      couchpotato/core/downloaders/utorrent/main.py
  5. 10
      couchpotato/core/helpers/encoding.py
  6. 28
      couchpotato/core/helpers/variable.py
  7. 118
      couchpotato/core/media/_base/searcher/main.py
  8. 8
      couchpotato/core/media/movie/_base/main.py
  9. 45
      couchpotato/core/media/movie/_base/static/movie.actions.js
  10. 18
      couchpotato/core/media/movie/_base/static/movie.js
  11. 5
      couchpotato/core/media/movie/_base/static/search.css
  12. 121
      couchpotato/core/media/movie/searcher/main.py
  13. 32
      couchpotato/core/media/show/_base/main.py
  14. 263
      couchpotato/core/media/show/searcher/main.py
  15. 7
      couchpotato/core/notifications/core/main.py
  16. 2
      couchpotato/core/notifications/core/static/notification.js
  17. 4
      couchpotato/core/plugins/base.py
  18. 11
      couchpotato/core/plugins/category/main.py
  19. 11
      couchpotato/core/plugins/dashboard/main.py
  20. 10
      couchpotato/core/plugins/file/main.py
  21. 10
      couchpotato/core/plugins/quality/main.py
  22. 4
      couchpotato/core/plugins/release/main.py
  23. 8
      couchpotato/core/plugins/renamer/__init__.py
  24. 14
      couchpotato/core/plugins/renamer/main.py
  25. 27
      couchpotato/core/plugins/scanner/main.py
  26. 4
      couchpotato/core/plugins/suggestion/main.py
  27. 93
      couchpotato/core/plugins/suggestion/static/suggest.js
  28. 2
      couchpotato/core/plugins/userscript/static/userscript.js
  29. 12
      couchpotato/core/plugins/wizard/static/wizard.js
  30. 44
      couchpotato/core/providers/base.py
  31. 9
      couchpotato/core/providers/info/themoviedb/main.py
  32. 6
      couchpotato/core/providers/nzb/binsearch/main.py
  33. 40
      couchpotato/core/providers/nzb/ftdworld/__init__.py
  34. 83
      couchpotato/core/providers/nzb/ftdworld/main.py
  35. 1
      couchpotato/core/providers/nzb/newznab/__init__.py
  36. 75
      couchpotato/core/providers/torrent/iptorrents/main.py
  37. 2
      couchpotato/core/providers/torrent/sceneaccess/main.py
  38. 12
      couchpotato/core/providers/torrent/torrentshack/main.py
  39. 11
      couchpotato/core/providers/trailer/hdtrailers/main.py
  40. 24
      couchpotato/runner.py
  41. 2
      init/ubuntu
  42. 161
      libs/caper/__init__.py
  43. 74
      libs/caper/constraint.py
  44. 147
      libs/caper/group.py
  45. 64
      libs/caper/helpers.py
  46. 193
      libs/caper/matcher.py
  47. 75
      libs/caper/objects.py
  48. 0
      libs/caper/parsers/__init__.py
  49. 88
      libs/caper/parsers/anime.py
  50. 136
      libs/caper/parsers/base.py
  51. 148
      libs/caper/parsers/scene.py
  52. 172
      libs/caper/result.py
  53. 72
      libs/caper/step.py
  54. 35
      libs/guessit/__init__.py
  55. 6
      libs/guessit/fileutils.py
  56. 2
      libs/guessit/guess.py
  57. 2
      libs/guessit/language.py
  58. 6
      libs/guessit/matcher.py
  59. 2
      libs/guessit/matchtree.py
  60. 50
      libs/guessit/patterns.py
  61. 18
      libs/guessit/transfo/guess_episodes_rexps.py
  62. 71
      libs/guessit/transfo/guess_idnumber.py
  63. 21
      libs/guessit/transfo/guess_release_group.py
  64. 16
      libs/guessit/transfo/guess_year.py
  65. 201
      libs/logr/__init__.py
  66. 2
      libs/synchronousdeluge/transfer.py
  67. 27
      libs/unrar2/PKG-INFO
  68. 191
      libs/unrar2/UnRAR2.html
  69. 18
      libs/unrar2/UnRARDLL/license.txt
  70. 140
      libs/unrar2/UnRARDLL/unrar.h
  71. BIN
      libs/unrar2/UnRARDLL/unrar.lib
  72. 606
      libs/unrar2/UnRARDLL/unrardll.txt
  73. 80
      libs/unrar2/UnRARDLL/whatsnew.txt
  74. 1
      libs/unrar2/UnRARDLL/x64/readme.txt
  75. BIN
      libs/unrar2/UnRARDLL/x64/unrar64.lib
  76. 21
      libs/unrar2/license.txt
  77. 66
      libs/unrar2/unix.py
  78. BIN
      libs/unrar2/unrar
  79. 0
      libs/unrar2/unrar.dll
  80. 0
      libs/unrar2/unrar64.dll
  81. 27
      libs/unrar2/windows.py

2
couchpotato/__init__.py

@ -22,7 +22,7 @@ class BaseHandler(RequestHandler):
username = Env.setting('username')
password = Env.setting('password')
if username or password:
if username and password:
return self.get_secure_cookie('user')
else: # Login when no username or password are set
return True

6
couchpotato/api.py

@ -44,13 +44,15 @@ class NonBlockHandler(RequestHandler):
def onNewMessage(self, response):
if self.request.connection.stream.closed():
self.on_connection_close()
return
try:
self.finish(response)
except:
log.error('Failed doing nonblock request: %s', (traceback.format_exc()))
self.finish({'success': False, 'error': 'Failed returning results'})
log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
def on_connection_close(self):

2
couchpotato/core/_base/_core/main.py

@ -56,7 +56,7 @@ class Core(Plugin):
self.signalHandler()
def md5Password(self, value):
return md5(value.encode(Env.get('encoding'))) if value else ''
return md5(value) if value else ''
def checkApikey(self, value):
return value if value and len(value) > 3 else uuid4().hex

49
couchpotato/core/downloaders/utorrent/main.py

@ -77,7 +77,7 @@ class uTorrent(Downloader):
else:
info = bdecode(filedata)["info"]
torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, movie)
torrent_filename = self.createFileName(data, filedata, movie)
if data.get('seed_ratio'):
torrent_params['seed_override'] = 1
@ -93,7 +93,7 @@ class uTorrent(Downloader):
# Send request to uTorrent
if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(data.get('url'))
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'))
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata)
@ -102,6 +102,39 @@ class uTorrent(Downloader):
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
count = 0
while True:
count += 1
# Check if torrent is saved in subfolder of torrent name
data = self.utorrent_api.get_files(torrent_hash)
torrent_files = json.loads(data)
if torrent_files.get('error'):
log.error('Error getting data from uTorrent: %s', torrent_files.get('error'))
return False
if (torrent_files.get('files') and len(torrent_files['files'][1]) > 0) or count > 60:
break
time.sleep(1)
# Torrent has only one file, so uTorrent wont create a folder for it
if len(torrent_files['files'][1]) == 1:
# Remove torrent and try again
self.utorrent_api.remove_torrent(torrent_hash, remove_data = True)
# Send request to uTorrent
if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'), add_folder = True)
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata, add_folder = True)
# Change settings of added torrent
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self):
@ -224,12 +257,16 @@ class uTorrentAPI(object):
token = re.findall("<div.*?>(.*?)</", request.read())[0]
return token
def add_torrent_uri(self, torrent):
def add_torrent_uri(self, filename, torrent, add_folder = False):
action = "action=add-url&s=%s" % urllib.quote(torrent)
if add_folder:
action += "&path=%s" % urllib.quote(filename)
return self._request(action)
def add_torrent_file(self, filename, filedata):
def add_torrent_file(self, filename, filedata, add_folder = False):
action = "action=add-file"
if add_folder:
action += "&path=%s" % urllib.quote(filename)
return self._request(action, {"torrent_file": (ss(filename), filedata)})
def set_torrent(self, hash, params):
@ -291,3 +328,7 @@ class uTorrentAPI(object):
action = 'action=setsetting' + ''.join(['&s=%s&v=%s' % (key, value) for (key, value) in settings_dict.items()])
return self._request(action)
def get_files(self, hash):
action = "action=getfiles&hash=%s" % hash
return self._request(action)

10
couchpotato/core/helpers/encoding.py

@ -38,8 +38,14 @@ def toUnicode(original, *args):
return toUnicode(ascii_text)
def ss(original, *args):
from couchpotato.environment import Env
return toUnicode(original, *args).encode(Env.get('encoding'))
u_original = toUnicode(original, *args)
try:
from couchpotato.environment import Env
return u_original.encode(Env.get('encoding'))
except Exception, e:
log.debug('Failed ss encoding char, force UTF8: %s', e)
return u_original.encode('UTF-8')
def ek(original, *args):
if isinstance(original, (str, unicode)):

28
couchpotato/core/helpers/variable.py

@ -1,4 +1,5 @@
from couchpotato.core.helpers.encoding import simplifyString, toSafeString
import collections
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog
import hashlib
import os.path
@ -101,7 +102,7 @@ def flattenList(l):
return l
def md5(text):
return hashlib.md5(text).hexdigest()
return hashlib.md5(ss(text)).hexdigest()
def sha1(text):
return hashlib.sha1(text).hexdigest()
@ -123,7 +124,12 @@ def cleanHost(host):
return host
def getImdb(txt, check_inside = True, multiple = False):
def getImdb(txt, check_inside = False, multiple = False):
if not check_inside:
txt = simplifyString(txt)
else:
txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
@ -140,9 +146,9 @@ def getImdb(txt, check_inside = True, multiple = False):
return False
def tryInt(s):
def tryInt(s, default=0):
try: return int(s)
except: return 0
except: return default
def tryFloat(s):
try:
@ -158,6 +164,11 @@ def natsortKey(s):
def natcmp(a, b):
return cmp(natsortKey(a), natsortKey(b))
def toIterable(value):
if isinstance(value, collections.Iterable):
return value
return [value]
def getTitle(library_dict):
try:
try:
@ -168,8 +179,11 @@ def getTitle(library_dict):
if title.default:
return title.title
except:
log.error('Could not get title for %s', library_dict.identifier)
return None
try:
return library_dict['info']['titles'][0]
except:
log.error('Could not get title for %s', library_dict.identifier)
return None
log.error('Could not get title for %s', library_dict['identifier'])
return None

118
couchpotato/core/media/_base/searcher/main.py

@ -2,11 +2,12 @@ from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.variable import md5, getTitle
from couchpotato.core.helpers.variable import md5, getTitle, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.base import SearcherBase
from couchpotato.core.settings.model import Media, Release, ReleaseInfo
from couchpotato.environment import Env
from sqlalchemy.exc import InterfaceError
from inspect import ismethod, isfunction
import datetime
import re
@ -23,7 +24,10 @@ class Searcher(SearcherBase):
addEvent('searcher.contains_other_quality', self.containsOtherQuality)
addEvent('searcher.correct_year', self.correctYear)
addEvent('searcher.correct_name', self.correctName)
addEvent('searcher.correct_words', self.correctWords)
addEvent('searcher.download', self.download)
addEvent('searcher.search', self.search)
addEvent('searcher.create_releases', self.createReleases)
addApiView('searcher.full_search', self.searchAllView, docs = {
'desc': 'Starts a full search for all media',
@ -130,6 +134,74 @@ class Searcher(SearcherBase):
return False
def search(self, protocols, media, quality):
results = []
search_type = None
if media['type'] == 'movie':
search_type = 'movie'
elif media['type'] in ['show', 'season', 'episode']:
search_type = 'show'
for search_protocol in protocols:
protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, search_type), media, quality, merge = True)
if protocol_results:
results += protocol_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
return sorted_results
def createReleases(self, search_results, media, quality_type):
available_status, ignored_status, failed_status = fireEvent('status.get', ['available', 'ignored', 'failed'], single = True)
db = get_session()
found_releases = []
for rel in search_results:
nzb_identifier = md5(rel['url'])
found_releases.append(nzb_identifier)
rls = db.query(Release).filter_by(identifier = nzb_identifier).first()
if not rls:
rls = Release(
identifier = nzb_identifier,
media_id = media.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in rel:
try:
if not isinstance(rel[info], (str, unicode, int, long, float)):
continue
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(rel[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
rel['status_id'] = rls.status_id
return found_releases
def getSearchProtocols(self):
download_protocols = fireEvent('download.enabled_protocols', merge = True)
@ -234,5 +306,49 @@ class Searcher(SearcherBase):
return False
def correctWords(self, rel_name, media):
media_title = fireEvent('searcher.get_search_title', media, single = True)
media_words = re.split('\W+', simplifyString(media_title))
rel_name = simplifyString(rel_name)
rel_words = re.split('\W+', rel_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = list(set(required_words + splitString(media['category']['required'].lower())))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(rel_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', rel_name)
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(media['category']['ignored'].lower())))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'cock', 'dick']
pron_words = list(set(rel_words) & set(pron_tags) - set(media_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', rel_name)
return False
return True
class SearchSetupError(Exception):
pass

8
couchpotato/core/media/movie/_base/main.py

@ -232,14 +232,14 @@ class MovieBase(MovieTypeBase):
# List release statuses
releases = db.query(Release) \
.filter(Release.movie_id.in_(movie_ids)) \
.filter(Release.media_id.in_(movie_ids)) \
.all()
release_statuses = dict((m, set()) for m in movie_ids)
releases_count = dict((m, 0) for m in movie_ids)
for release in releases:
release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.movie_id] += 1
release_statuses[release.media_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.media_id] += 1
# Get main movie data
q2 = db.query(Media) \
@ -469,7 +469,7 @@ class MovieBase(MovieTypeBase):
fireEvent('release.delete', release.id, single = True)
m.profile_id = params.get('profile_id', default_profile.get('id'))
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None
m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else (m.category_id or None)
else:
log.debug('Movie already exists, not updating: %s', params)
added = False

45
couchpotato/core/media/movie/_base/static/movie.actions.js

@ -18,11 +18,13 @@ var MovieAction = new Class({
create: function(){},
disable: function(){
this.el.addClass('disable')
if(this.el)
this.el.addClass('disable')
},
enable: function(){
this.el.removeClass('disable')
if(this.el)
this.el.removeClass('disable')
},
getTitle: function(){
@ -252,35 +254,38 @@ MA.Release = new Class({
});
if(self.last_release)
self.release_container.getElement('#release_'+self.last_release.id).addClass('last_release');
self.release_container.getElements('#release_'+self.last_release.id).addClass('last_release');
if(self.next_release)
self.release_container.getElement('#release_'+self.next_release.id).addClass('next_release');
self.release_container.getElements('#release_'+self.next_release.id).addClass('next_release');
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top');
var nr = self.next_release,
lr = self.last_release;
self.trynext_container.adopt(
new Element('span.or', {
'text': 'This movie is snatched, if anything went wrong, download'
}),
self.last_release ? new Element('a.button.orange', {
lr ? new Element('a.button.orange', {
'text': 'the same release again',
'events': {
'click': function(){
self.download(self.last_release);
self.download(lr);
}
}
}) : null,
self.next_release && self.last_release ? new Element('span.or', {
nr && lr ? new Element('span.or', {
'text': ','
}) : null,
self.next_release ? [new Element('a.button.green', {
'text': self.last_release ? 'another release' : 'the best release',
nr ? [new Element('a.button.green', {
'text': lr ? 'another release' : 'the best release',
'events': {
'click': function(){
self.download(self.next_release);
self.download(nr);
}
}
}),
@ -362,19 +367,25 @@ MA.Release = new Class({
var release_el = self.release_container.getElement('#release_'+release.id),
icon = release_el.getElement('.download.icon2');
icon.addClass('icon spinner').removeClass('download');
if(icon)
icon.addClass('icon spinner').removeClass('download');
Api.request('release.download', {
'data': {
'id': release.id
},
'onComplete': function(json){
icon.removeClass('icon spinner');
if(icon)
icon.removeClass('icon spinner');
if(json.success)
icon.addClass('completed');
if(json.success){
if(icon)
icon.addClass('completed');
release_el.getElement('.release_status').set('text', 'snatched');
}
else
icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
if(icon)
icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
}
});
},
@ -388,11 +399,11 @@ MA.Release = new Class({
},
'onComplete': function(){
var el = release.el;
if(el.hasClass('failed') || el.hasClass('ignored')){
if(el && (el.hasClass('failed') || el.hasClass('ignored'))){
el.removeClass('failed').removeClass('ignored');
el.getElement('.release_status').set('text', 'available');
}
else {
else if(el) {
el.addClass('ignored');
el.getElement('.release_status').set('text', 'ignored');
}

18
couchpotato/core/media/movie/_base/static/movie.js

@ -181,18 +181,18 @@ var Movie = new Class({
// Add releases
if(self.data.releases)
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
Object.each(self.options.actions, function(action, key){
@ -256,7 +256,8 @@ var Movie = new Class({
self.el.removeEvents('outerClick')
setTimeout(function(){
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
if(self.el)
self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide();
}, 600);
self.data_container.removeClass('hide_right');
@ -266,9 +267,10 @@ var Movie = new Class({
changeView: function(new_view){
var self = this;
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view')
if(self.el)
self.el
.removeClass(self.view+'_view')
.addClass(new_view+'_view')
self.view = new_view;
},

5
couchpotato/core/media/movie/_base/static/search.css

@ -59,6 +59,11 @@
.search_form.shown .input input {
opacity: 1;
}
.search_form input::-ms-clear {
width : 0;
height: 0;
}
@media all and (max-width: 480px) {
.search_form .input input {

121
couchpotato/core/media/movie/searcher/main.py

@ -1,7 +1,7 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.helpers.variable import md5, getTitle, splitString, \
possibleTitles, getImdb
from couchpotato.core.logger import CPLog
@ -29,9 +29,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
addEvent('movie.searcher.all', self.searchAll)
addEvent('movie.searcher.all_view', self.searchAllView)
addEvent('movie.searcher.single', self.single)
addEvent('movie.searcher.correct_movie', self.correctMovie)
addEvent('movie.searcher.try_next_release', self.tryNextRelease)
addEvent('movie.searcher.could_be_released', self.couldBeReleased)
addEvent('searcher.correct_release', self.correctRelease)
addEvent('searcher.get_search_title', self.getSearchTitle)
addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
@ -167,64 +168,18 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = []
for search_protocol in search_protocols:
protocol_results = fireEvent('provider.search.%s.movie' % search_protocol, movie, quality, merge = True)
if protocol_results:
results += protocol_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if len(sorted_results) == 0:
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True)
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
download_preference = self.conf('preferred_method', section = 'searcher')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
# Check if movie isn't deleted while searching
if not db.query(Media).filter_by(id = movie.get('id')).first():
break
# Add them to this movie releases list
for nzb in sorted_results:
nzb_identifier = md5(nzb['url'])
found_releases.append(nzb_identifier)
rls = db.query(Release).filter_by(identifier = nzb_identifier).first()
if not rls:
rls = Release(
identifier = nzb_identifier,
movie_id = movie.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in nzb:
try:
if not isinstance(nzb[info], (str, unicode, int, long, float)):
continue
found_releases += fireEvent('searcher.create_releases', results, movie, quality_type, single = True)
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(nzb[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
nzb['status_id'] = rls.status_id
for nzb in sorted_results:
for nzb in results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and nzb.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), nzb['name']))
continue
@ -265,7 +220,11 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return ret
def correctMovie(self, nzb = None, movie = None, quality = None, **kwargs):
def correctRelease(self, nzb = None, media = None, quality = None, **kwargs):
if media.get('type') != 'movie': return
media_title = fireEvent('searcher.get_search_title', media, single = True)
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
@ -274,50 +233,14 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
movie_name = getTitle(movie['library'])
movie_words = re.split('\W+', simplifyString(movie_name))
nzb_name = simplifyString(nzb['name'])
nzb_words = re.split('\W+', nzb_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words', section = 'searcher').lower())
try: required_words = list(set(required_words + splitString(movie['category']['required'].lower())))
except: pass
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(nzb_words) & set(req))) == len(req)
if len(required_words) > 0 and req_match == 0:
log.info2('Wrong: Required word missing: %s', nzb['name'])
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower())
try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower())))
except: pass
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored)
if len(ignored_words) > 0 and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name']))
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic']
pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', (nzb['name']))
# Check for required and ignored words
if not fireEvent('searcher.correct_words', nzb['name'], media, single = True):
return False
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if fireEvent('searcher.contains_other_quality', nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality, single = True):
if fireEvent('searcher.contains_other_quality', nzb, movie_year = media['library']['year'], preferred_quality = preferred_quality, single = True):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
return False
@ -347,23 +270,23 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return True
# Check if nzb contains imdb link
if getImdb(nzb.get('description', '')) == movie['library']['identifier']:
if getImdb(nzb.get('description', '')) == media['library']['identifier']:
return True
for raw_title in movie['library']['titles']:
for raw_title in media['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], movie['library']['year'], 1, single = True):
if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 1, single = True):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], movie['library']['year'], 0, single = True):
if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 0, single = True):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year']))
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['library']['year']))
return False
def couldBeReleased(self, is_pre_release, dates, year = None):
@ -434,5 +357,9 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
def getSearchTitle(self, media):
if media['type'] == 'movie':
return getTitle(media['library'])
class SearchSetupError(Exception):
pass

32
couchpotato/core/media/show/_base/main.py

@ -42,6 +42,12 @@ class ShowBase(MediaBase):
'shows': array, shows found,
}"""}
})
addApiView('show.refresh', self.refresh, docs = {
'desc': 'Refresh a show, season or episode by id',
'params': {
'id': {'desc': 'Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('show.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
@ -53,6 +59,26 @@ class ShowBase(MediaBase):
addEvent('show.add', self.add)
def refresh(self, id = '', **kwargs):
db = get_session()
for x in splitString(id):
media = db.query(Media).filter_by(id = x).first()
if media:
# Get current selected title
default_title = ''
for title in media.library.titles:
if title.default: default_title = title.title
fireEvent('notify.frontend', type = '%s.busy.%s' % (media.type, x), data = True)
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all()
return {
'success': True,
}
def search(self, q = '', **kwargs):
cache_key = u'%s/%s' % (__name__, simplifyString(q))
shows = Env.get('cache').get(cache_key)
@ -273,12 +299,12 @@ class ShowBase(MediaBase):
db.expire_all()
return show_dict
def createOnComplete(self, show_id):
def createOnComplete(self, id):
def onComplete():
db = get_session()
show = db.query(Media).filter_by(id = show_id).first()
fireEventAsync('show.searcher.single', show.to_dict(self.default_dict), on_complete = self.createNotifyFront(show_id))
media = db.query(Media).filter_by(id = id).first()
fireEventAsync('show.searcher.single', media.to_dict(self.default_dict), on_complete = self.createNotifyFront(id))
db.expire_all()
return onComplete

263
couchpotato/core/media/show/searcher/main.py

@ -1,5 +1,14 @@
import pprint
import re
from couchpotato import get_session, Env
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import getTitle, tryInt, possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.searcher.main import SearchSetupError
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media, Library
from caper import Caper
log = CPLog(__name__)
@ -8,5 +17,257 @@ class ShowSearcher(Plugin):
in_progress = False
# TODO come back to this later, think this could be handled better
quality_map = {
'webdl_1080p': {'resolution': ['1080p'], 'source': ['webdl']},
'webdl_720p': {'resolution': ['720p'], 'source': ['webdl']},
'hdtv_720p': {'resolution': ['720p'], 'source': ['hdtv']},
'hdtv_sd': {'resolution': ['480p', None], 'source': ['hdtv']},
}
def __init__(self):
pass
super(ShowSearcher, self).__init__()
addEvent('show.searcher.single', self.single)
addEvent('searcher.correct_release', self.correctRelease)
addEvent('searcher.get_search_title', self.getSearchTitle)
self.caper = Caper()
def _lookupMedia(self, media):
db = get_session()
media_library = db.query(Library).filter_by(id = media['library_id']).first()
show = None
season = None
episode = None
if media['type'] == 'episode':
show = media_library.parent.parent
season = media_library.parent
episode = media_library
if media['type'] == 'season':
show = media_library.parent
season = media_library
if media['type'] == 'show':
show = media_library
return show, season, episode
def single(self, media, search_protocols = None):
pprint.pprint(media)
if media['type'] == 'show':
# TODO handle show searches (scan all seasons)
return
# Find out search type
try:
if not search_protocols:
search_protocols = fireEvent('searcher.protocols', single = True)
except SearchSetupError:
return
done_status = fireEvent('status.get', 'done', single = True)
if not media['profile'] or media['status_id'] == done_status.get('id'):
log.debug('Episode doesn\'t have a profile or already done, assuming in manage tab.')
return
db = get_session()
pre_releases = fireEvent('quality.pre_releases', single = True)
available_status, ignored_status, failed_status = fireEvent('status.get', ['available', 'ignored', 'failed'], single = True)
found_releases = []
too_early_to_search = []
default_title = self.getSearchTitle(media['library'])
if not default_title:
log.error('No proper info found for episode, removing it from library to cause it from having more issues.')
#fireEvent('episode.delete', episode['id'], single = True)
return
show, season, episode = self._lookupMedia(media)
if show is None or season is None:
log.error('Unable to find show or season library in database, missing required data for searching')
return
fireEvent('notify.frontend', type = 'show.searcher.started.%s' % media['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
for quality_type in media['profile']['types']:
# TODO check air date?
#if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates, movie['library']['year']):
# too_early_to_search.append(quality_type['quality']['identifier'])
# continue
has_better_quality = 0
# See if better quality is available
for release in media['releases']:
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id'), failed_status.get('id')]:
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
log.info('Search for %s S%02d%s in %s', (getTitle(show), season.season_number, "E%02d" % episode.episode_number if episode else "", quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = fireEvent('searcher.search', search_protocols, media, quality, single = True)
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
# Check if movie isn't deleted while searching
if not db.query(Media).filter_by(id = media.get('id')).first():
break
# Add them to this movie releases list
found_releases += fireEvent('searcher.create_releases', results, media, quality_type, single = True)
log.info('%d results found' % len(results))
def correctRelease(self, release = None, media = None, quality = None, **kwargs):
if media.get('type') not in ['season', 'episode']: return
retention = Env.setting('retention', section = 'nzb')
if release.get('seeders') is None and 0 < retention < release.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (release['age'], retention, release['name']))
return False
# Check for required and ignored words
if not fireEvent('searcher.correct_words', release['name'], media, single = True):
return False
show, season, episode = self._lookupMedia(media)
if show is None or season is None:
log.error('Unable to find show or season library in database, missing required data for searching')
return
release_info = self.caper.parse(release['name'])
if len(release_info.chains) < 1:
log.info2('Wrong: %s, unable to parse release name (no chains)', release['name'])
return False
# TODO look at all chains
chain = release_info.chains[0]
if not self.correctQuality(chain, quality['identifier']):
log.info('Wrong: %s, quality does not match', release['name'])
return False
if not self.correctIdentifier(chain, media):
log.info('Wrong: %s, identifier does not match', release['name'])
return False
if 'show_name' not in chain.info or not len(chain.info['show_name']):
log.info('Wrong: %s, missing show name in parsed result', release['name'])
return False
chain_words = [x.lower() for x in chain.info['show_name']]
chain_title = ' '.join(chain_words)
library_title = None
# Check show titles match
for raw_title in show.titles:
for valid_words in [x.split(' ') for x in possibleTitles(raw_title.title)]:
if not library_title:
library_title = ' '.join(valid_words)
if valid_words == chain_words:
return True
log.info("Wrong: title '%s', undetermined show naming. Looking for '%s (%s)'", (chain_title, library_title, media['library']['year']))
return False
def correctQuality(self, chain, quality_identifier):
if quality_identifier not in self.quality_map:
log.info2('Wrong: unknown preferred quality %s for TV searching', quality_identifier)
return False
if 'video' not in chain.info:
log.info2('Wrong: no video tags found')
return False
video_tags = self.quality_map[quality_identifier]
if not self.chainMatches(chain, 'video', video_tags):
log.info2('Wrong: %s tags not in chain', video_tags)
return False
return True
def correctIdentifier(self, chain, media):
required_id = self.getIdentifier(media['library'], 'season_number', 'episode_number')
if 'identifier' not in chain.info:
return False
# TODO could be handled better?
if len(chain.info['identifier']) != 1:
return False
identifier = chain.info['identifier'][0]
# TODO air by date episodes
release_id = self.getIdentifier(identifier, 'season', 'episode')
if required_id != release_id:
log.info2('Wrong: required identifier %s does not match release identifier %s', (str(required_id), str(release_id)))
return False
return True
def getIdentifier(self, d, episode_key, season_key):
return (
tryInt(d.get(season_key), None) if season_key in d else None,
tryInt(d.get(episode_key), None) if episode_key in d else None
)
def chainMatches(self, chain, group, tags):
found_tags = []
for match in chain.info[group]:
for ck, cv in match.items():
if ck in tags and self.cleanMatchValue(cv) in tags[ck]:
found_tags.append(ck)
if set(tags.keys()) == set(found_tags):
return True
return set([key for key, value in tags.items() if value]) == set(found_tags)
def cleanMatchValue(self, value):
value = value.lower()
value = value.strip()
for ch in [' ', '-', '.']:
value = value.replace(ch, '')
return value
def getSearchTitle(self, media):
show, season, episode = self._lookupMedia(media)
if show is None:
return None
name = ''
if season is not None:
name = ' S%02d' % season.season_number
if episode is not None:
name += 'E%02d' % episode.episode_number
show_title = getTitle(show)
if not show_title:
return None
return show_title + name

7
couchpotato/core/notifications/core/main.py

@ -198,13 +198,16 @@ class CoreNotifier(Notification):
def removeListener(self, callback):
self.m_lock.acquire()
new_listeners = []
for list_tuple in self.listeners:
try:
listener, last_id = list_tuple
if listener == callback:
self.listeners.remove(list_tuple)
if listener != callback:
new_listeners.append(list_tuple)
except:
log.debug('Failed removing listener: %s', traceback.format_exc())
self.listeners = new_listeners
self.m_lock.release()
def cleanMessages(self):

2
couchpotato/core/notifications/core/static/notification.js

@ -157,7 +157,7 @@ var NotificationBase = new Class({
}
// Restart poll
self.startPoll()
self.startPoll.delay(1500, self);
},
showMessage: function(message, sticky, data){

4
couchpotato/core/plugins/base.py

@ -259,7 +259,7 @@ class Plugin(object):
def getCache(self, cache_key, url = None, **kwargs):
cache_key_md5 = md5(ss(cache_key))
cache_key_md5 = md5(cache_key)
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
@ -284,7 +284,7 @@ class Plugin(object):
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(ss(cache_key))
cache_key_md5 = md5(cache_key)
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key_md5, value, timeout)
return value

11
couchpotato/core/plugins/category/main.py

@ -54,12 +54,11 @@ class CategoryPlugin(Plugin):
db.add(c)
c.order = kwargs.get('order', c.order if c.order else 0)
c.label = toUnicode(kwargs.get('label'))
c.path = toUnicode(kwargs.get('path'))
c.ignored = toUnicode(kwargs.get('ignored'))
c.preferred = toUnicode(kwargs.get('preferred'))
c.required = toUnicode(kwargs.get('required'))
c.destination = toUnicode(kwargs.get('destination'))
c.label = toUnicode(kwargs.get('label', ''))
c.ignored = toUnicode(kwargs.get('ignored', ''))
c.preferred = toUnicode(kwargs.get('preferred', ''))
c.required = toUnicode(kwargs.get('required', ''))
c.destination = toUnicode(kwargs.get('destination', ''))
db.commit()

11
couchpotato/core/plugins/dashboard/main.py

@ -4,9 +4,10 @@ from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.variable import splitString, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Media, Library, LibraryTitle
from couchpotato.core.settings.model import Media, Library, LibraryTitle, \
Release
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import asc, or_
import random as rndm
import time
@ -48,12 +49,14 @@ class Dashboard(Plugin):
limit = tryInt(splt[0])
# Get all active movies
active_status = fireEvent('status.get', ['active'], single = True)
active_status, ignored_status = fireEvent('status.get', ['active', 'ignored'], single = True)
q = db.query(Media) \
.join(Library) \
.outerjoin(Media.releases) \
.filter(Media.status_id == active_status.get('id')) \
.with_entities(Media.id, Media.profile_id, Library.info, Library.year) \
.group_by(Media.id)
.group_by(Media.id) \
.filter(or_(Release.id == None, Release.status_id == ignored_status.get('id')))
if not random:
q = q.join(LibraryTitle) \

10
couchpotato/core/plugins/file/main.py

@ -71,11 +71,11 @@ class FileManager(Plugin):
db = get_session()
for root, dirs, walk_files in os.walk(Env.get('cache_dir')):
for filename in walk_files:
if root == python_cache or 'minified' in root or 'version' in filename or 'temp_updater' in root: continue
file_path = os.path.join(root, filename)
f = db.query(File).filter(File.path == toUnicode(file_path)).first()
if not f:
os.remove(file_path)
if os.path.splitext(filename)[1] in ['.png', '.jpg', '.jpeg']:
file_path = os.path.join(root, filename)
f = db.query(File).filter(File.path == toUnicode(file_path)).first()
if not f:
os.remove(file_path)
except:
log.error('Failed removing unused file: %s', traceback.format_exc())

10
couchpotato/core/plugins/quality/main.py

@ -26,7 +26,15 @@ class QualityPlugin(Plugin):
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
# TODO come back to this later, think this could be handled better
# WEB-DL
{'identifier': 'webdl_1080p', 'hd': True, 'size': (800, 5000), 'label': 'WEB-DL - 1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'webdl_720p', 'hd': True, 'size': (800, 5000), 'label': 'WEB-DL - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
# HDTV
{'identifier': 'hdtv_720p', 'hd': True, 'size': (800, 5000), 'label': 'HDTV - 720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv']},
{'identifier': 'hdtv_sd', 'hd': False, 'size': (100, 1000), 'label': 'HDTV - SD', 'width': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'mp4', 'avi']},
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']

4
couchpotato/core/plugins/release/main.py

@ -190,7 +190,7 @@ class Release(Plugin):
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
success = fireEvent('searcher.download', data = item, movie = rel.movie.to_dict({
success = fireEvent('searcher.download', data = item, movie = rel.media.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
@ -224,7 +224,7 @@ class Release(Plugin):
releases_raw = db.query(Relea) \
.options(joinedload_all('info')) \
.options(joinedload_all('files')) \
.filter(Relea.movie_id == id) \
.filter(Relea.media_id == id) \
.all()
releases = [r.to_dict({'info':{}, 'files':{}}) for r in releases_raw]

8
couchpotato/core/plugins/renamer/__init__.py

@ -120,7 +120,13 @@ config = [{
{
'advanced': True,
'name': 'separator',
'label': 'Separator',
'label': 'File-Separator',
'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.',
},
{
'advanced': True,
'name': 'foldersep',
'label': 'Folder-Separator',
'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.',
},
{

14
couchpotato/core/plugins/renamer/main.py

@ -174,7 +174,7 @@ class Renamer(Plugin):
# Overwrite destination when set in category
destination = self.conf('to')
for movie in library_ent.media:
if movie.category and movie.category.destination and len(movie.category.destination) > 0:
if movie.category and movie.category.destination and len(movie.category.destination) > 0 and movie.category.destination != 'None':
destination = movie.category.destination
log.debug('Setting category destination for "%s": %s' % (movie_title, destination))
else:
@ -252,7 +252,7 @@ class Renamer(Plugin):
replacements['cd_nr'] = cd if multiple else ''
# Naming
final_folder_name = self.doReplace(folder_name, replacements)
final_folder_name = self.doReplace(folder_name, replacements, folder = True)
final_file_name = self.doReplace(file_name, replacements)
replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)]
@ -508,7 +508,7 @@ class Renamer(Plugin):
for extra in set(filter(test, group['files'][extra_type])):
replacements['ext'] = getExt(extra)
final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple)
final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True)
final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name)
@ -603,7 +603,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return True
def doReplace(self, string, replacements, remove_multiple = False):
def doReplace(self, string, replacements, remove_multiple = False, folder = False):
"""
replace confignames with the real thing
"""
@ -623,7 +623,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced)
sep = self.conf('separator')
sep = self.conf('foldersep') if folder else self.conf('separator')
return self.replaceDoubles(replaced.lstrip('. ')).replace(' ', ' ' if not sep else sep)
def replaceDoubles(self, string):
@ -678,7 +678,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('movie.get', rel.movie_id, single = True)
movie_dict = fireEvent('movie.get', rel.media_id, single = True)
# check status
nzbname = self.createNzbName(rel_dict['info'], movie_dict)
@ -734,7 +734,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
db.commit()
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', movie_id = rel.movie_id)
fireEvent('movie.searcher.try_next_release', media_id = rel.media_id)
elif item['status'] == 'completed':
log.info('Download of %s completed!', item['name'])
if self.statusInfoComplete(item):

27
couchpotato/core/plugins/scanner/main.py

@ -565,7 +565,7 @@ class Scanner(Plugin):
if not imdb_id:
try:
for nf in files['nfo']:
imdb_id = getImdb(nf)
imdb_id = getImdb(nf, check_inside = True)
if imdb_id:
log.debug('Found movie via nfo file: %s', nf)
nfo_file = nf
@ -578,7 +578,7 @@ class Scanner(Plugin):
try:
for filetype in files:
for filetype_file in files[filetype]:
imdb_id = getImdb(filetype_file, check_inside = False)
imdb_id = getImdb(filetype_file)
if imdb_id:
log.debug('Found movie via imdb in filename: %s', nfo_file)
break
@ -819,6 +819,13 @@ class Scanner(Plugin):
return None
def findYear(self, text):
# Search year inside () or [] first
matches = re.search('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text)
if matches:
return matches.group('year')
# Search normal
matches = re.search('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches:
return matches.group('year')
@ -831,11 +838,11 @@ class Scanner(Plugin):
guess = {}
if file_name:
try:
guess = guess_movie_info(toUnicode(file_name))
if guess.get('title') and guess.get('year'):
guessit = guess_movie_info(toUnicode(file_name))
if guessit.get('title') and guessit.get('year'):
guess = {
'name': guess.get('title'),
'year': guess.get('year'),
'name': guessit.get('title'),
'year': guessit.get('year'),
}
except:
log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc()))
@ -843,7 +850,13 @@ class Scanner(Plugin):
# Backup to simple
cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
cleaned = re.sub(self.clean, ' ', cleaned)
year = self.findYear(cleaned)
for year_str in [file_name, cleaned]:
if not year_str: continue
year = self.findYear(year_str)
if year:
break
cp_guess = {}
if year: # Split name on year

4
couchpotato/core/plugins/suggestion/main.py

@ -73,7 +73,7 @@ class Suggestion(Plugin):
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None):
# Combine with previous suggestion_cache
cached_suggestion = self.getCache('suggestion_cached')
cached_suggestion = self.getCache('suggestion_cached') or []
new_suggestions = []
ignored = [] if not ignored else ignored
seen = [] if not seen else seen
@ -102,6 +102,6 @@ class Suggestion(Plugin):
if suggestions:
new_suggestions.extend(suggestions)
self.setCache('suggestion_cached', new_suggestions, timeout = 6048000)
self.setCache('suggestion_cached', new_suggestions, timeout = 3024000)
return new_suggestions

93
couchpotato/core/plugins/suggestion/static/suggest.js

@ -58,54 +58,59 @@ var SuggestList = new Class({
var self = this;
if(!json) return;
if(!json || json.count == 0){
self.el.hide();
}
else {
Object.each(json.suggestions, function(movie){
var m = new Block.Search.Item(movie, {
'onAdded': function(){
self.afterAdded(m, movie)
}
});
m.data_container.grab(
new Element('div.actions').adopt(
new Element('a.add.icon2', {
'title': 'Add movie with your default quality',
'data-add': movie.imdb,
'events': {
'click': m.showOptions.bind(m)
}
}),
$(new MA.IMDB(m)),
$(new MA.Trailer(m, {
'height': 150
})),
new Element('a.delete.icon2', {
'title': 'Don\'t suggest this movie again',
'data-ignore': movie.imdb
}),
new Element('a.eye-open.icon2', {
'title': 'Seen it, like it, don\'t add',
'data-seen': movie.imdb
})
)
);
m.data_container.removeEvents('click');
// Add rating
m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
'text': parseFloat(m.info.rating.imdb[0]),
'title': parseInt(m.info.rating.imdb[1]) + ' votes'
}) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ')
}) : null
)
Object.each(json.suggestions, function(movie){
$(m).inject(self.el);
var m = new Block.Search.Item(movie, {
'onAdded': function(){
self.afterAdded(m, movie)
}
});
m.data_container.grab(
new Element('div.actions').adopt(
new Element('a.add.icon2', {
'title': 'Add movie with your default quality',
'data-add': movie.imdb,
'events': {
'click': m.showOptions.bind(m)
}
}),
$(new MA.IMDB(m)),
$(new MA.Trailer(m, {
'height': 150
})),
new Element('a.delete.icon2', {
'title': 'Don\'t suggest this movie again',
'data-ignore': movie.imdb
}),
new Element('a.eye-open.icon2', {
'title': 'Seen it, like it, don\'t add',
'data-seen': movie.imdb
})
)
);
m.data_container.removeEvents('click');
// Add rating
m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
'text': parseFloat(m.info.rating.imdb[0]),
'title': parseInt(m.info.rating.imdb[1]) + ' votes'
}) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ')
}) : null
)
$(m).inject(self.el);
});
}
self.fireEvent('loaded');

2
couchpotato/core/plugins/userscript/static/userscript.js

@ -96,7 +96,7 @@ var UserscriptSettingTab = new Class({
})
)
).setStyles({
'background-image': "url('"+Api.createUrl('static/userscript/userscript.png')+"')"
'background-image': "url('"+App.createUrl('static/plugin/userscript/userscript.png')+"')"
});
});

12
couchpotato/core/plugins/wizard/static/wizard.js

@ -24,9 +24,10 @@ Page.Wizard = new Class({
'title': 'What download apps are you using?',
'description': 'CP needs an external download app to work with. Choose one below. For more downloaders check settings after you have filled in the wizard. If your download app isn\'t in the list, use the default Blackhole.'
},
'providers': {
'searcher': {
'label': 'Providers',
'title': 'Are you registered at any of these sites?',
'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have a few more. Check settings for the full list of available providers.'
'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have more.'
},
'renamer': {
'title': 'Move & rename the movies after downloading?',
@ -38,7 +39,7 @@ Page.Wizard = new Class({
'<br />Once installed, just click the bookmarklet on a movie page and watch the magic happen ;)',
'content': function(){
return App.createUserscriptButtons().setStyles({
'background-image': "url('"+Api.createUrl('static/userscript/userscript.png')+"')"
'background-image': "url('"+App.createUrl('static/plugin/userscript/userscript.png')+"')"
})
}
},
@ -76,7 +77,7 @@ Page.Wizard = new Class({
)
}
},
groups: ['welcome', 'general', 'downloaders', 'searcher', 'providers', 'renamer', 'automation', 'finish'],
groups: ['welcome', 'general', 'downloaders', 'searcher', 'renamer', 'automation', 'finish'],
open: function(action, params){
var self = this;
@ -195,8 +196,7 @@ Page.Wizard = new Class({
self.el.getElement('.advanced_toggle').destroy();
// Hide retention
self.el.getElement('.tab_searcher').hide();
self.el.getElement('.t_searcher').hide();
self.el.getElement('.section_nzb').hide();
// Add pointer
new Element('.tab_wrapper').wraps(tabs);

44
couchpotato/core/providers/base.py

@ -1,6 +1,6 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
possibleTitles, getTitle
possibleTitles, toIterable
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@ -15,7 +15,6 @@ import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
class MultiProvider(Plugin):
def __init__(self):
@ -102,6 +101,7 @@ class YarrProvider(Provider):
type = 'movie'
cat_ids = {}
cat_ids_structure = None
cat_backup_id = None
sizeGb = ['gb', 'gib']
@ -183,7 +183,7 @@ class YarrProvider(Provider):
return 'try_next'
def search(self, movie, quality):
def search(self, media, quality):
if self.isDisabled():
return []
@ -195,15 +195,15 @@ class YarrProvider(Provider):
# Create result container
imdb_results = hasattr(self, '_search')
results = ResultList(self, movie, quality, imdb_results = imdb_results)
results = ResultList(self, media, quality, imdb_results = imdb_results)
# Do search based on imdb id
if imdb_results:
self._search(movie, quality, results)
self._search(media, quality, results)
# Search possible titles
else:
for title in possibleTitles(getTitle(movie['library'])):
self._searchOnTitle(title, movie, quality, results)
for title in possibleTitles(fireEvent('searcher.get_search_title', media, single = True)):
self._searchOnTitle(title, media, quality, results)
return results
@ -244,9 +244,32 @@ class YarrProvider(Provider):
return 0
def getCatId(self, identifier):
def _discoverCatIdStructure(self):
# Discover cat_ids structure (single or groups)
for group_name, group_cat_ids in self.cat_ids:
if len(group_cat_ids) > 0:
if type(group_cat_ids[0]) is tuple:
self.cat_ids_structure = 'group'
if type(group_cat_ids[0]) is str:
self.cat_ids_structure = 'single'
def getCatId(self, identifier, group = None):
cat_ids = self.cat_ids
if not self.cat_ids_structure:
self._discoverCatIdStructure()
# If cat_ids is in a 'groups' structure, locate the media group
if self.cat_ids_structure == 'group':
if not group:
raise ValueError("group is required on group cat_ids structure")
for group_type, group_cat_ids in cat_ids:
if group in toIterable(group_type):
cat_ids = group_cat_ids
for cats in self.cat_ids:
for cats in cat_ids:
ids, qualities = cats
if identifier in qualities:
return ids
@ -279,8 +302,7 @@ class ResultList(list):
new_result = self.fillResult(result)
is_correct_movie = fireEvent('movie.searcher.correct_movie',
nzb = new_result, movie = self.movie, quality = self.quality,
is_correct_movie = fireEvent('searcher.correct_release', new_result, self.movie, self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct_movie and new_result['id'] not in self.result_ids:

9
couchpotato/core/providers/info/themoviedb/main.py

@ -1,6 +1,5 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss
from couchpotato.core.helpers.variable import md5
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.info.base import MovieProvider
import tmdb3
@ -129,11 +128,9 @@ class TheMovieDb(MovieProvider):
movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles:
alt_name = alt.title
if alt_name and not alt_name in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None:
movie_data['titles'].append(alt_name)
movie_data['titles'] = list(set(movie_data['titles']))
# Cache movie parsed
self.setCache(cache_key, movie_data)
@ -143,7 +140,7 @@ class TheMovieDb(MovieProvider):
image_url = ''
try:
image_url = getattr(movie, type).geturl(size='original')
image_url = getattr(movie, type).geturl(size = 'original')
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, movie.title))

6
couchpotato/core/providers/nzb/binsearch/main.py

@ -56,6 +56,10 @@ class BinSearch(NZBProvider):
info = row.find('span', attrs = {'class':'d'})
size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text)
age = 0
try: age = re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]
except: pass
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
@ -74,7 +78,7 @@ class BinSearch(NZBProvider):
results.append({
'id': nzb_id,
'name': title.text,
'age': tryInt(re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]),
'age': tryInt(age),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % info.find('a')['href'],

40
couchpotato/core/providers/nzb/ftdworld/__init__.py

@ -1,40 +0,0 @@
from .main import FTDWorld
def start():
return FTDWorld()
config = [{
'name': 'ftdworld',
'groups': [
{
'tab': 'searcher',
'list': 'nzb_providers',
'name': 'FTDWorld',
'description': 'Free provider, less accurate. See <a href="http://ftdworld.net">FTDWorld</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]

83
couchpotato/core/providers/nzb/ftdworld/main.py

@ -1,83 +0,0 @@
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import json
import traceback
log = CPLog(__name__)
class FTDWorld(NZBProvider):
urls = {
'search': 'http://ftdworld.net/api/index.php?%s',
'detail': 'http://ftdworld.net/spotinfo.php?id=%s',
'download': 'http://ftdworld.net/cgi-bin/nzbdown.pl?fileID=%s',
'login': 'http://ftdworld.net/api/login.php',
'login_check': 'http://ftdworld.net/api/login.php',
}
http_time_between_calls = 3 #seconds
cat_ids = [
([4, 11], ['dvdr']),
([1], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([7, 10, 13, 14], ['bd50', '720p', '1080p']),
]
cat_backup_id = 1
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s' % (title, movie['library']['year'])
params = tryUrlencode({
'ctitle': q,
'customQuery': 'usr',
'cage': Env.setting('retention', 'nzb'),
'csizemin': quality.get('size_min'),
'csizemax': quality.get('size_max'),
'ccategory': 14,
'ctype': ','.join([str(x) for x in self.getCatId(quality['identifier'])]),
})
data = self.getJsonData(self.urls['search'] % params, opener = self.login_opener)
if data:
try:
if data.get('numRes') == 0:
return
for item in data.get('data'):
nzb_id = tryInt(item.get('id'))
results.append({
'id': nzb_id,
'name': toUnicode(item.get('Title')),
'age': self.calculateAge(tryInt(item.get('Created'))),
'size': item.get('Size', 0),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % nzb_id,
'score': (tryInt(item.get('webPlus', 0)) - tryInt(item.get('webMin', 0))) * 3,
})
except:
log.error('Failed to parse HTML response from FTDWorld: %s', traceback.format_exc())
def getLoginParams(self):
return tryUrlencode({
'userlogin': self.conf('username'),
'passlogin': self.conf('password'),
'submit': 'Log In',
})
def loginSuccess(self, output):
try:
return json.loads(output).get('goodToGo', False)
except:
return False
loginCheckSuccess = loginSuccess

1
couchpotato/core/providers/nzb/newznab/__init__.py

@ -20,6 +20,7 @@ config = [{
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'use',

75
couchpotato/core/providers/torrent/iptorrents/main.py

@ -2,42 +2,56 @@ from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import MultiProvider
from couchpotato.core.providers.info.base import MovieProvider, ShowProvider
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class IPTorrents(TorrentProvider):
class IPTorrents(MultiProvider):
def getTypes(self):
return [Movie, Show]
class Base(TorrentProvider):
urls = {
'test' : 'http://www.iptorrents.com/',
'base_url' : 'http://www.iptorrents.com',
'login' : 'http://www.iptorrents.com/torrents/',
'login_check': 'http://www.iptorrents.com/inbox.php',
'search' : 'http://www.iptorrents.com/torrents/?l%d=1%s&q=%s&qf=ti&p=%d',
'search' : 'http://www.iptorrents.com/torrents/?l%d=1%%s&q=%s&qf=ti&p=%%d',
}
cat_ids = [
([48], ['720p', '1080p', 'bd50']),
([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
([7], ['dvdrip', 'brrip']),
([6], ['dvdr']),
]
http_time_between_calls = 1 #seconds
cat_backup_id = None
def _searchOnTitle(self, title, movie, quality, results):
def _buildUrl(self, query, quality_identifier, cat_ids_group = None):
cat_id = self.getCatId(quality_identifier, cat_ids_group)[0]
if not cat_id:
log.warning('Unable to find category for quality %s', quality_identifier)
return
return self.urls['search'] % (cat_id, tryUrlencode(query).replace('%', '%%'))
def _searchOnTitle(self, title, media, quality, results):
freeleech = '' if not self.conf('freeleech') else '&free=on'
base_url = self.buildUrl(title, media, quality)
if not base_url: return
pages = 1
current_page = 1
while current_page <= pages and not self.shuttingDown():
url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), current_page)
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(
base_url % (freeleech, current_page),
opener = self.login_opener
)
if data:
html = BeautifulSoup(data)
@ -101,3 +115,38 @@ class IPTorrents(TorrentProvider):
def loginCheckSuccess(self, output):
return '/logout.php' in output.lower()
class Movie(MovieProvider, Base):
cat_ids = [
([48], ['720p', '1080p', 'bd50']),
([72], ['cam', 'ts', 'tc', 'r5', 'scr']),
([7], ['dvdrip', 'brrip']),
([6], ['dvdr']),
]
def buildUrl(self, title, media, quality):
query = '%s %s' % (title.replace(':', ''), media['library']['year'])
return self._buildUrl(query, quality['identifier'])
class Show(ShowProvider, Base):
cat_ids = [
('season', [
([65], ['hdtv_sd', 'hdtv_720p', 'webdl_720p', 'webdl_1080p']),
]),
('episode', [
([5], ['hdtv_720p', 'webdl_720p', 'webdl_1080p']),
([78], ['hdtv_sd']),
([4, 79], ['hdtv_sd'])
])
]
def buildUrl(self, title, media, quality):
if media['type'] not in ['season', 'episode']:
return
return self._buildUrl(title.replace(':', ''), quality['identifier'], media['type'])

2
couchpotato/core/providers/torrent/sceneaccess/main.py

@ -102,7 +102,7 @@ class Base(TorrentProvider):
loginCheckSuccess = loginSuccess
class Movie(Base, MovieProvider):
class Movie(MovieProvider, Base):
cat_ids = [
([22], ['720p', '1080p']),

12
couchpotato/core/providers/torrent/torrentshack/main.py

@ -11,12 +11,12 @@ log = CPLog(__name__)
class TorrentShack(TorrentProvider):
urls = {
'test' : 'http://www.torrentshack.net/',
'login' : 'http://www.torrentshack.net/login.php',
'login_check': 'http://www.torrentshack.net/inbox.php',
'detail' : 'http://www.torrentshack.net/torrent/%s',
'search' : 'http://www.torrentshack.net/torrents.php?searchstr=%s&filter_cat[%d]=1',
'download' : 'http://www.torrentshack.net/%s',
'test' : 'https://torrentshack.net/',
'login' : 'https://torrentshack.net/login.php',
'login_check': 'https://torrentshack.net/inbox.php',
'detail' : 'https://torrentshack.net/torrent/%s',
'search' : 'https://torrentshack.net/torrents.php?searchstr=%s&filter_cat[%d]=1',
'download' : 'https://torrentshack.net/%s',
}
cat_ids = [

11
couchpotato/core/providers/trailer/hdtrailers/main.py

@ -90,21 +90,18 @@ class HDTrailers(TrailerProvider):
html = BeautifulSoup(data, parse_only = tables)
result_table = html.find('table', attrs = {'class':'bottomTable'})
for tr in result_table.find_all('tr'):
trtext = str(tr).lower()
if 'clips' in trtext:
break
if 'trailer' in trtext and not 'clip' in trtext and provider in trtext:
nr = 0
if 'trailer' in trtext and not 'clip' in trtext and provider in trtext and not '3d' in trtext:
if 'trailer' not in tr.find('span', 'standardTrailerName').text.lower():
continue
resolutions = tr.find_all('td', attrs = {'class':'bottomTableResolution'})
for res in resolutions:
results[str(res.a.contents[0])].insert(0, res.a['href'])
nr += 1
return results
if res.a:
results[str(res.a.contents[0])].insert(0, res.a['href'])
except AttributeError:
log.debug('No trailers found in provider %s.', provider)

24
couchpotato/runner.py

@ -85,18 +85,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
# Backup before start and cleanup old databases
new_backup = toUnicode(os.path.join(data_dir, 'db_backup', str(int(time.time()))))
# Create path and copy
if not os.path.isdir(new_backup): os.makedirs(new_backup)
src_files = [options.config_file, db_path, db_path + '-shm', db_path + '-wal']
for src_file in src_files:
if os.path.isfile(src_file):
dst_file = toUnicode(os.path.join(new_backup, os.path.basename(src_file)))
shutil.copyfile(src_file, dst_file)
# Try and copy stats seperately
try: shutil.copystat(src_file, dst_file)
except: pass
# Remove older backups, keep backups 3 days or at least 3
backups = []
@ -105,6 +94,19 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
if os.path.isdir(backup):
backups.append(backup)
latest_backup = tryInt(os.path.basename(sorted(backups)[-1])) if len(backups) > 0 else 0
if latest_backup < time.time() - 3600:
# Create path and copy
src_files = [options.config_file, db_path, db_path + '-shm', db_path + '-wal']
for src_file in src_files:
if os.path.isfile(src_file):
dst_file = toUnicode(os.path.join(new_backup, os.path.basename(src_file)))
shutil.copyfile(src_file, dst_file)
# Try and copy stats seperately
try: shutil.copystat(src_file, dst_file)
except: pass
total_backups = len(backups)
for backup in backups:
if total_backups > 3:

2
init/ubuntu

@ -52,7 +52,7 @@ APP_PATH=${CP_HOME-/opt/couchpotato/}
DATA_DIR=${CP_DATA-/var/couchpotato}
# Path to store PID file
PID_FILE=${CP_PID_FILE-/var/run/couchpotato.pid}
PID_FILE=${CP_PIDFILE-/var/run/couchpotato.pid}
# path to python bin
DAEMON=${PYTHON_BIN-/usr/bin/python}

161
libs/caper/__init__.py

@ -0,0 +1,161 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
from caper.matcher import FragmentMatcher
from caper.objects import CaperFragment, CaperClosure
from caper.parsers.anime import AnimeParser
from caper.parsers.scene import SceneParser
__version_info__ = ('0', '2', '0')
__version_branch__ = 'master'
__version__ = "%s%s" % (
'.'.join(__version_info__),
'-' + __version_branch__ if __version_branch__ else ''
)
CL_START_CHARS = ['(', '[']
CL_END_CHARS = [')', ']']
STRIP_START_CHARS = ''.join(CL_START_CHARS)
STRIP_END_CHARS = ''.join(CL_END_CHARS)
STRIP_CHARS = ''.join(['_', ' ', '.'])
FRAGMENT_SEPARATORS = ['.', '-', '_', ' ']
CL_START = 0
CL_END = 1
class Caper(object):
def __init__(self):
self.parsers = {
'scene': SceneParser(),
'anime': AnimeParser()
}
def _closure_split(self, name):
"""
:type name: str
:rtype: list of CaperClosure
"""
closures = []
def end_closure(closures, buf):
buf = buf.strip(STRIP_CHARS)
if len(buf) < 1:
return
cur = CaperClosure(buf)
cur.left = closures[len(closures) - 1] if len(closures) > 0 else None
if cur.left:
cur.left.right = cur
closures.append(cur)
state = CL_START
buf = ""
for x, ch in enumerate(name):
if state == CL_START and ch in CL_START_CHARS:
end_closure(closures, buf)
state = CL_END
buf = ""
buf += ch
if state == CL_END and ch in CL_END_CHARS:
end_closure(closures, buf)
state = CL_START
buf = ""
end_closure(closures, buf)
return closures
def _clean_closure(self, closure):
"""
:type closure: str
:rtype: str
"""
return closure.lstrip(STRIP_START_CHARS).rstrip(STRIP_END_CHARS)
def _fragment_split(self, closures):
"""
:type closures: list of CaperClosure
:rtype: list of CaperClosure
"""
cur_position = 0
cur = CaperFragment()
def end_fragment(fragments, cur, cur_position):
cur.position = cur_position
cur.left = fragments[len(fragments) - 1] if len(fragments) > 0 else None
if cur.left:
cur.left_sep = cur.left.right_sep
cur.left.right = cur
cur.right_sep = ch
fragments.append(cur)
for closure in closures:
closure.fragments = []
for x, ch in enumerate(self._clean_closure(closure.value)):
if ch in FRAGMENT_SEPARATORS:
end_fragment(closure.fragments, cur, cur_position)
# Reset
cur = CaperFragment()
cur_position += 1
else:
cur.value += ch
# Finish parsing the last fragment
if cur.value != "":
end_fragment(closure.fragments, cur, cur_position)
# Reset
cur_position = 0
cur = CaperFragment()
return closures
def parse(self, name, parser='scene'):
closures = self._closure_split(name)
closures = self._fragment_split(closures)
# Print closures
for closure in closures:
Logr.debug("closure [%s]", closure.value)
if parser not in self.parsers:
raise ValueError("Unknown parser")
# TODO autodetect the parser type
return self.parsers[parser].run(closures)

74
libs/caper/constraint.py

@ -0,0 +1,74 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CaptureConstraint(object):
def __init__(self, capture_group, comparisons=None, **kwargs):
"""Capture constraint object
:type capture_group: CaptureGroup
"""
self.capture_group = capture_group
self.comparisons = comparisons if comparisons else []
for key, value in kwargs.items():
key = key.split('__')
if len(key) != 2:
continue
name, method = key
method = '_compare_' + method
if not hasattr(self, method):
continue
self.comparisons.append((name, getattr(self, method), value))
def _compare_eq(self, fragment, name, expected):
if not hasattr(fragment, name):
return None
return 1.0, getattr(fragment, name) == expected
def _compare_re(self, fragment, name, arg):
if name == 'fragment':
group, minimum_weight = arg if type(arg) is tuple and len(arg) > 1 else (arg, 0)
weight, match, num_fragments = self.capture_group.parser.matcher.fragment_match(fragment, group)
return weight, weight > minimum_weight
elif type(arg).__name__ == 'SRE_Pattern':
return 1.0, arg.match(getattr(fragment, name)) is not None
elif hasattr(fragment, name):
match = self.capture_group.parser.matcher.value_match(getattr(fragment, name), arg, single=True)
return 1.0, match is not None
if not hasattr(fragment, name):
raise ValueError("Unable to find fragment with name '%s'" % name)
else:
raise ValueError("Unexpected argument type")
def execute(self, fragment):
results = []
total_weight = 0
for name, method, argument in self.comparisons:
weight, success = method(fragment, name, argument)
total_weight += weight
results.append(success)
return total_weight / float(len(results)), all(results) if len(results) > 0 else False
def __repr__(self):
return "CaptureConstraint(comparisons=%s)" % repr(self.comparisons)

147
libs/caper/group.py

@ -0,0 +1,147 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
from caper.helpers import clean_dict
from caper.result import CaperFragmentNode
from caper.step import CaptureStep
from caper.constraint import CaptureConstraint
class CaptureGroup(object):
def __init__(self, parser, result):
"""Capture group object
:type parser: caper.parsers.base.Parser
:type result: caper.result.CaperResult
"""
self.parser = parser
self.result = result
#: @type: list of CaptureStep
self.steps = []
#: @type: list of CaptureConstraint
self.constraints = []
def capture_fragment(self, tag, regex=None, func=None, single=True):
Logr.debug('capture_fragment("%s", "%s", %s, %s)', tag, regex, func, single)
self.steps.append(CaptureStep(
self, tag,
'fragment',
regex=regex,
func=func,
single=single
))
return self
def capture_closure(self, tag, regex=None, func=None, single=True):
Logr.debug('capture_closure("%s", "%s", %s, %s)', tag, regex, func, single)
self.steps.append(CaptureStep(
self, tag,
'closure',
regex=regex,
func=func,
single=single
))
return self
def until(self, **kwargs):
self.constraints.append(CaptureConstraint(self, **kwargs))
return self
def parse_subject(self, parent_head, subject):
parent_node = parent_head[0] if type(parent_head) is list else parent_head
# TODO - if subject is a closure?
nodes = []
# Check constraints
for constraint in self.constraints:
weight, success = constraint.execute(subject)
if success:
Logr.debug('capturing broke on "%s" at %s', subject.value, constraint)
parent_node.finished_groups.append(self)
nodes.append(parent_head)
if weight == 1.0:
return nodes
else:
Logr.debug('Branching result')
# Try match subject against the steps available
tag, success, weight, match, num_fragments = (None, None, None, None, None)
for step in self.steps:
tag = step.tag
success, weight, match, num_fragments = step.execute(subject)
if success:
match = clean_dict(match) if type(match) is dict else match
Logr.debug('Found match with weight %s, match: %s, num_fragments: %s' % (weight, match, num_fragments))
break
Logr.debug('created fragment node with subject.value: "%s"' % subject.value)
result = [CaperFragmentNode(parent_node.closure, subject.take_right(num_fragments), parent_head, tag, weight, match)]
if match and weight < 1.0:
if num_fragments == 1:
result.append(CaperFragmentNode(parent_node.closure, [subject], parent_head, None, None, None))
else:
nodes.append(CaperFragmentNode(parent_node.closure, [subject], parent_head, None, None, None))
nodes.append(result[0] if len(result) == 1 else result)
return nodes
def execute(self):
heads_finished = None
while heads_finished is None or not (len(heads_finished) == len(self.result.heads) and all(heads_finished)):
heads_finished = []
heads = self.result.heads
self.result.heads = []
for head in heads:
node = head[0] if type(head) is list else head
Logr.debug("head node: %s" % node)
if self in node.finished_groups:
Logr.debug("head finished for group")
self.result.heads.append(head)
heads_finished.append(True)
continue
next_subject = node.next()
if next_subject:
for node_result in self.parse_subject(head, next_subject):
self.result.heads.append(node_result)
heads_finished.append(self in node.finished_groups or next_subject is None)
if len(self.result.heads) == 0:
self.result.heads = heads
Logr.debug("heads_finished: %s, self.result.heads: %s", heads_finished, self.result.heads)
Logr.debug("group finished")

64
libs/caper/helpers.py

@ -0,0 +1,64 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def is_list_type(obj, element_type):
if not type(obj) is list:
return False
if len(obj) < 1:
raise ValueError("Unable to determine list element type from empty list")
return type(obj[0]) is element_type
def clean_dict(target, remove=None):
"""Recursively remove items matching a value 'remove' from the dictionary
:type target: dict
"""
if type(target) is not dict:
raise ValueError("Target is required to be a dict")
remove_keys = []
for key in target.keys():
if type(target[key]) is not dict:
if target[key] == remove:
remove_keys.append(key)
else:
clean_dict(target[key], remove)
for key in remove_keys:
target.pop(key)
return target
def xrange_six(start, stop=None, step=None):
if stop is not None and step is not None:
if PY3:
return range(start, stop, step)
else:
return xrange(start, stop, step)
else:
if PY3:
return range(start)
else:
return xrange(start)

193
libs/caper/matcher.py

@ -0,0 +1,193 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re
from logr import Logr
from caper.helpers import is_list_type, clean_dict
class FragmentMatcher(object):
def __init__(self, pattern_groups):
self.regex = {}
for group_name, patterns in pattern_groups:
if group_name not in self.regex:
self.regex[group_name] = []
# Transform into weight groups
if type(patterns[0]) is str or type(patterns[0][0]) not in [int, float]:
patterns = [(1.0, patterns)]
for weight, patterns in patterns:
weight_patterns = []
for pattern in patterns:
# Transform into multi-fragment patterns
if type(pattern) is str:
pattern = (pattern,)
if type(pattern) is tuple and len(pattern) == 2:
if type(pattern[0]) is str and is_list_type(pattern[1], str):
pattern = (pattern,)
result = []
for value in pattern:
if type(value) is tuple:
if len(value) == 2:
# Construct OR-list pattern
value = value[0] % '|'.join(value[1])
elif len(value) == 1:
value = value[0]
result.append(re.compile(value, re.IGNORECASE))
weight_patterns.append(tuple(result))
self.regex[group_name].append((weight, weight_patterns))
pprint.pprint(self.regex)
def find_group(self, name):
for group_name, weight_groups in self.regex.items():
if group_name and group_name == name:
return group_name, weight_groups
return None
def parser_match(self, parser, group_name, single=True):
"""
:type parser: caper.parsers.base.Parser
"""
result = None
for group, weight_groups in self.regex.items():
if group_name and group != group_name:
continue
# TODO handle multiple weights
weight, patterns = weight_groups[0]
for pattern in patterns:
fragments = []
pattern_matched = True
pattern_result = {}
for fragment_pattern in pattern:
if not parser.fragment_available():
pattern_matched = False
break
fragment = parser.next_fragment()
fragments.append(fragment)
Logr.debug('[r"%s"].match("%s")', fragment_pattern.pattern, fragment.value)
match = fragment_pattern.match(fragment.value)
if match:
Logr.debug('Pattern "%s" matched', fragment_pattern.pattern)
else:
pattern_matched = False
break
pattern_result.update(clean_dict(match.groupdict()))
if pattern_matched:
if result is None:
result = {}
if group not in result:
result[group] = {}
Logr.debug('Matched on <%s>', ' '.join([f.value for f in fragments]))
result[group].update(pattern_result)
parser.commit()
if single:
return result
else:
parser.rewind()
return result
def value_match(self, value, group_name=None, single=True):
result = None
for group, weight_groups in self.regex.items():
if group_name and group != group_name:
continue
# TODO handle multiple weights
weight, patterns = weight_groups[0]
for pattern in patterns:
match = pattern[0].match(value)
if not match:
continue
if result is None:
result = {}
if group not in result:
result[group] = {}
result[group].update(match.groupdict())
if single:
return result
return result
def fragment_match(self, fragment, group_name=None):
"""Follow a fragment chain to try find a match
:type fragment: caper.objects.CaperFragment
:type group_name: str or None
:return: The weight of the match found between 0.0 and 1.0,
where 1.0 means perfect match and 0.0 means no match
:rtype: (float, dict, int)
"""
group_name, weight_groups = self.find_group(group_name)
for weight, patterns in weight_groups:
for pattern in patterns:
cur_fragment = fragment
success = True
result = {}
# Ignore empty patterns
if len(pattern) < 1:
break
for fragment_pattern in pattern:
if not cur_fragment:
success = False
break
match = fragment_pattern.match(cur_fragment.value)
if match:
result.update(match.groupdict())
else:
success = False
break
cur_fragment = cur_fragment.right if cur_fragment else None
if success:
Logr.debug("Found match with weight %s" % weight)
return float(weight), result, len(pattern)
return 0.0, None, 1

75
libs/caper/objects.py

@ -0,0 +1,75 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from caper.helpers import xrange_six
class CaperClosure(object):
def __init__(self, value):
#: :type: str
self.value = value
#: :type: CaperClosure
self.left = None
#: :type: CaperClosure
self.right = None
#: :type: list of CaperFragment
self.fragments = []
class CaperFragment(object):
def __init__(self):
#: :type: str
self.value = ""
#: :type: CaperFragment
self.left = None
#: :type: str
self.left_sep = None
#: :type: CaperFragment
self.right = None
#: :type: str
self.right_sep = None
#: :type: int
self.position = None
def take(self, direction, count, include_self=True):
if direction not in ['left', 'right']:
raise ValueError('Un-Expected value for "direction", expected "left" or "right".')
result = []
if include_self:
result.append(self)
count -= 1
cur = self
for x in xrange_six(count):
if cur and getattr(cur, direction):
cur = getattr(cur, direction)
result.append(cur)
else:
result.append(None)
cur = None
return result
def take_left(self, count, include_self=True):
return self.take('left', count, include_self)
def take_right(self, count, include_self=True):
return self.take('right', count, include_self)

0
libs/caper/parsers/__init__.py

88
libs/caper/parsers/anime.py

@ -0,0 +1,88 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from caper.parsers.base import Parser
REGEX_GROUP = re.compile(r'(\(|\[)(?P<group>.*?)(\)|\])', re.IGNORECASE)
PATTERN_GROUPS = [
('identifier', [
r'S(?P<season>\d+)E(?P<episode>\d+)',
r'(S(?P<season>\d+))|(E(?P<episode>\d+))',
r'Ep(?P<episode>\d+)',
r'$(?P<absolute>\d+)^',
(r'Episode', r'(?P<episode>\d+)'),
]),
('video', [
(r'(?P<h264_profile>%s)', [
'Hi10P'
]),
(r'.(?P<resolution>%s)', [
'720p',
'1080p',
'960x720',
'1920x1080'
]),
(r'(?P<source>%s)', [
'BD'
]),
]),
('audio', [
(r'(?P<codec>%s)', [
'FLAC'
]),
])
]
class AnimeParser(Parser):
def __init__(self):
super(AnimeParser, self).__init__(PATTERN_GROUPS)
def capture_group(self, fragment):
match = REGEX_GROUP.match(fragment.value)
if not match:
return None
return match.group('group')
def run(self, closures):
"""
:type closures: list of CaperClosure
"""
self.setup(closures)
self.capture_closure('group', func=self.capture_group)\
.execute(once=True)
self.capture_fragment('show_name', single=False)\
.until(value__re='identifier')\
.until(value__re='video')\
.execute()
self.capture_fragment('identifier', regex='identifier') \
.capture_fragment('video', regex='video', single=False) \
.capture_fragment('audio', regex='audio', single=False) \
.execute()
self.result.build()
return self.result

136
libs/caper/parsers/base.py

@ -0,0 +1,136 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
from caper import FragmentMatcher
from caper.group import CaptureGroup
from caper.result import CaperResult, CaperClosureNode
class Parser(object):
def __init__(self, pattern_groups):
self.matcher = FragmentMatcher(pattern_groups)
self.closures = None
#: :type: caper.result.CaperResult
self.result = None
self._match_cache = None
self._fragment_pos = None
self._closure_pos = None
self._history = None
self.reset()
def reset(self):
self.closures = None
self.result = CaperResult()
self._match_cache = {}
self._fragment_pos = -1
self._closure_pos = -1
self._history = []
def setup(self, closures):
"""
:type closures: list of CaperClosure
"""
self.reset()
self.closures = closures
self.result.heads = [CaperClosureNode(closures[0])]
def run(self, closures):
"""
:type closures: list of CaperClosure
"""
raise NotImplementedError()
#
# Closure Methods
#
def next_closure(self):
self._closure_pos += 1
closure = self.closures[self._closure_pos]
self._history.append(('fragment', -1 - self._fragment_pos))
self._fragment_pos = -1
if self._closure_pos != 0:
self._history.append(('closure', 1))
Logr.debug('(next_closure) closure.value: "%s"', closure.value)
return closure
def closure_available(self):
return self._closure_pos + 1 < len(self.closures)
#
# Fragment Methods
#
def next_fragment(self):
closure = self.closures[self._closure_pos]
self._fragment_pos += 1
fragment = closure.fragments[self._fragment_pos]
self._history.append(('fragment', 1))
Logr.debug('(next_fragment) closure.value "%s" - fragment.value: "%s"', closure.value, fragment.value)
return fragment
def fragment_available(self):
if not self.closure_available():
return False
return self._fragment_pos + 1 < len(self.closures[self._closure_pos].fragments)
def rewind(self):
for source, delta in reversed(self._history):
Logr.debug('(rewind) Rewinding step: %s', (source, delta))
if source == 'fragment':
self._fragment_pos -= delta
elif source == 'closure':
self._closure_pos -= delta
else:
raise NotImplementedError()
self.commit()
def commit(self):
Logr.debug('(commit)')
self._history = []
#
# Capture Methods
#
def capture_fragment(self, tag, regex=None, func=None, single=True):
return CaptureGroup(self, self.result).capture_fragment(
tag,
regex=regex,
func=func,
single=single
)
def capture_closure(self, tag, regex=None, func=None, single=True):
return CaptureGroup(self, self.result).capture_closure(
tag,
regex=regex,
func=func,
single=single
)

148
libs/caper/parsers/scene.py

@ -0,0 +1,148 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
from caper.parsers.base import Parser
from caper.result import CaperFragmentNode
PATTERN_GROUPS = [
('identifier', [
(1.0, [
# S01E01-E02
('^S(?P<season>\d+)E(?P<episode_from>\d+)$', '^E(?P<episode_to>\d+)$'),
# S02E13
r'^S(?P<season>\d+)E(?P<episode>\d+)$',
# S01 E13
(r'^(S(?P<season>\d+))$', r'^(E(?P<episode>\d+))$'),
# S02
# E13
r'^((S(?P<season>\d+))|(E(?P<episode>\d+)))$',
# 3x19
r'^(?P<season>\d+)x(?P<episode>\d+)$',
# 2013.09.15
(r'^(?P<year>\d{4})$', r'^(?P<month>\d{2})$', r'^(?P<day>\d{2})$'),
# 09.15.2013
(r'^(?P<month>\d{2})$', r'^(?P<day>\d{2})$', r'^(?P<year>\d{4})$'),
# TODO - US/UK Date Format Conflict? will only support US format for now..
# 15.09.2013
#(r'^(?P<day>\d{2})$', r'^(?P<month>\d{2})$', r'^(?P<year>\d{4})$'),
# 130915
r'^(?P<year_short>\d{2})(?P<month>\d{2})(?P<day>\d{2})$',
# Season 3 Episode 14
(r'^Se(ason)?$', r'^(?P<season>\d+)$', r'^Ep(isode)?$', r'^(?P<episode>\d+)$'),
# Season 3
(r'^Se(ason)?$', r'^(?P<season>\d+)$'),
# Episode 14
(r'^Ep(isode)?$', r'^(?P<episode>\d+)$'),
# Part.3
# Part.1.and.Part.3
('^Part$', '(?P<part>\d+)'),
]),
(0.8, [
# 100 - 1899, 2100 - 9999 (skips 1900 to 2099 - so we don't get years my mistake)
# TODO - Update this pattern on 31 Dec 2099
r'^(?P<season>([1-9])|(1[0-8])|(2[1-9])|([3-9][0-9]))(?P<episode>\d{2})$'
]),
(0.5, [
# 100 - 9999
r'^(?P<season>([1-9])|([1-9][0-9]))(?P<episode>\d{2})$'
])
]),
('video', [
r'(?P<aspect>FS|WS)',
(r'(?P<resolution>%s)', [
'480p',
'720p',
'1080p'
]),
(r'(?P<source>%s)', [
'HDTV',
'PDTV',
'DSR',
'DVDRiP'
]),
(r'(?P<codec>%s)', [
'x264',
'XViD'
]),
(r'(?P<language>%s)', [
'GERMAN',
'DUTCH',
'FRENCH',
'SWEDiSH',
'DANiSH',
'iTALiAN'
]),
])
]
class SceneParser(Parser):
def __init__(self):
super(SceneParser, self).__init__(PATTERN_GROUPS)
def capture_group(self, fragment):
if fragment.left_sep == '-' and not fragment.right:
return fragment.value
return None
def run(self, closures):
"""
:type closures: list of CaperClosure
"""
self.setup(closures)
self.capture_fragment('show_name', single=False)\
.until(fragment__re='identifier')\
.until(fragment__re='video')\
.execute()
self.capture_fragment('identifier', regex='identifier', single=False)\
.capture_fragment('video', regex='video', single=False)\
.until(left_sep__eq='-', right__eq=None)\
.execute()
self.capture_fragment('group', func=self.capture_group)\
.execute()
self.print_tree(self.result.heads)
self.result.build()
return self.result
def print_tree(self, heads):
for head in heads:
head = head if type(head) is list else [head]
if type(head[0]) is CaperFragmentNode:
for fragment in head[0].fragments:
Logr.debug(fragment.value)
else:
Logr.debug(head[0].closure.value)
for node in head:
Logr.debug('\t' + str(node).ljust(55) + '\t' + str(node.weight) + '\t' + str(node.match))
if len(head) > 0 and head[0].parent:
self.print_tree([head[0].parent])

172
libs/caper/result.py

@ -0,0 +1,172 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from logr import Logr
GROUP_MATCHES = ['identifier']
class CaperNode(object):
def __init__(self, closure, parent=None, tag=None, weight=None, match=None):
"""
:type parent: CaperNode
:type weight: float
"""
#: :type: caper.objects.CaperClosure
self.closure = closure
#: :type: CaperNode
self.parent = parent
#: :type: str
self.tag = tag
#: :type: float
self.weight = weight
#: :type: dict
self.match = match
#: :type: list of CaptureGroup
self.finished_groups = []
def next(self):
raise NotImplementedError()
class CaperClosureNode(CaperNode):
def __init__(self, closure, parent=None, tag=None, weight=None, match=None):
"""
:type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure
"""
super(CaperClosureNode, self).__init__(closure, parent, tag, weight, match)
def next(self):
if self.closure and len(self.closure.fragments) > 0:
return self.closure.fragments[0]
return None
class CaperFragmentNode(CaperNode):
def __init__(self, closure, fragments, parent=None, tag=None, weight=None, match=None):
"""
:type closure: caper.objects.CaperClosure
:type fragments: list of caper.objects.CaperFragment
"""
super(CaperFragmentNode, self).__init__(closure, parent, tag, weight, match)
#: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment
self.fragments = fragments
def next(self):
if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right:
return self.fragments[-1].right
if self.closure.right:
return self.closure.right
return None
class CaperResult(object):
def __init__(self):
#: :type: list of CaperNode
self.heads = []
self.chains = []
def build(self):
max_matched = 0
for head in self.heads:
for chain in self.combine_chain(head):
if chain.num_matched > max_matched:
max_matched = chain.num_matched
self.chains.append(chain)
for chain in self.chains:
chain.weights.append(chain.num_matched / float(max_matched))
chain.finish()
self.chains.sort(key=lambda chain: chain.weight, reverse=True)
for chain in self.chains:
Logr.debug("chain weight: %.02f", chain.weight)
Logr.debug("\tInfo: %s", chain.info)
Logr.debug("\tWeights: %s", chain.weights)
Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched)
def combine_chain(self, subject, chain=None):
nodes = subject if type(subject) is list else [subject]
if chain is None:
chain = CaperResultChain()
result = []
for x, node in enumerate(nodes):
node_chain = chain if x == len(nodes) - 1 else chain.copy()
if not node.parent:
result.append(node_chain)
continue
# Skip over closure nodes
if type(node) is CaperClosureNode:
result.extend(self.combine_chain(node.parent, node_chain))
# Parse fragment matches
if type(node) is CaperFragmentNode:
node_chain.update(node)
result.extend(self.combine_chain(node.parent, node_chain))
return result
class CaperResultChain(object):
def __init__(self):
#: :type: float
self.weight = None
self.info = {}
self.num_matched = 0
self.weights = []
def update(self, subject):
if subject.weight is None:
return
self.num_matched += len(subject.fragments) if subject.fragments is not None else 0
self.weights.append(subject.weight)
if subject.match:
if subject.tag not in self.info:
self.info[subject.tag] = []
self.info[subject.tag].insert(0, subject.match)
def finish(self):
self.weight = sum(self.weights) / len(self.weights)
def copy(self):
chain = CaperResultChain()
chain.weight = self.weight
chain.info = copy.deepcopy(self.info)
chain.num_matched = self.num_matched
chain.weights = copy.copy(self.weights)
return chain

72
libs/caper/step.py

@ -0,0 +1,72 @@
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logr import Logr
class CaptureStep(object):
REPR_KEYS = ['regex', 'func', 'single']
def __init__(self, capture_group, tag, source, regex=None, func=None, single=None):
#: @type: CaptureGroup
self.capture_group = capture_group
#: @type: str
self.tag = tag
#: @type: str
self.source = source
#: @type: str
self.regex = regex
#: @type: function
self.func = func
#: @type: bool
self.single = single
def _get_next_subject(self, parser):
if self.source == 'fragment':
if not parser.fragment_available():
return None
return parser.next_fragment()
elif self.source == 'closure':
if not parser.closure_available():
return None
return parser.next_closure()
raise NotImplementedError()
def execute(self, fragment):
if self.regex:
weight, match, num_fragments = self.capture_group.parser.matcher.fragment_match(fragment, self.regex)
Logr.debug('(execute) [regex] tag: "%s"', self.tag)
if match:
return True, weight, match, num_fragments
elif self.func:
match = self.func(fragment)
Logr.debug('(execute) [func] %s += "%s"', self.tag, match)
if match:
return True, 1.0, match, 1
else:
Logr.debug('(execute) [raw] %s += "%s"', self.tag, fragment.value)
return True, 1.0, fragment.value, 1
return False, None, None, 1
def __repr__(self):
attribute_values = [key + '=' + repr(getattr(self, key))
for key in self.REPR_KEYS
if hasattr(self, key) and getattr(self, key)]
attribute_string = ', ' + ', '.join(attribute_values) if len(attribute_values) > 0 else ''
return "CaptureStep('%s'%s)" % (self.tag, attribute_string)

35
libs/guessit/__init__.py

@ -20,7 +20,7 @@
from __future__ import unicode_literals
__version__ = '0.6-dev'
__version__ = '0.7-dev'
__all__ = ['Guess', 'Language',
'guess_file_info', 'guess_video_info',
'guess_movie_info', 'guess_episode_info']
@ -91,7 +91,28 @@ log.addHandler(h)
def _guess_filename(filename, filetype):
def find_nodes(tree, props):
"""Yields all nodes containing any of the given props."""
if isinstance(props, base_text_type):
props = [props]
for node in tree.nodes():
if any(prop in node.guess for prop in props):
yield node
def warning(title):
log.warning('%s, guesses: %s - %s' % (title, m.nice_string(), m2.nice_string()))
return m
mtree = IterativeMatcher(filename, filetype=filetype)
# if there are multiple possible years found, we assume the first one is
# part of the title, reparse the tree taking this into account
years = set(n.value for n in find_nodes(mtree.match_tree, 'year'))
if len(years) >= 2:
mtree = IterativeMatcher(filename, filetype=filetype,
opts=['skip_first_year'])
m = mtree.matched()
if 'language' not in m and 'subtitleLanguage' not in m:
@ -102,20 +123,10 @@ def _guess_filename(filename, filetype):
opts=['nolanguage', 'nocountry'])
m2 = mtree2.matched()
def find_nodes(tree, props):
"""Yields all nodes containing any of the given props."""
if isinstance(props, base_text_type):
props = [props]
for node in tree.nodes():
if any(prop in node.guess for prop in props):
yield node
def warning(title):
log.warning('%s, guesses: %s - %s' % (title, m.nice_string(), m2.nice_string()))
if m.get('title') is None:
return m
if m.get('title') != m2.get('title'):
title = next(find_nodes(mtree.match_tree, 'title'))
title2 = next(find_nodes(mtree2.match_tree, 'title'))

6
libs/guessit/fileutils.py

@ -77,12 +77,12 @@ def file_in_same_dir(ref_file, desired_file):
def load_file_in_same_dir(ref_file, filename):
"""Load a given file. Works even when the file is contained inside a zip."""
path = split_path(ref_file)[:-1] + [str(filename)]
path = split_path(ref_file)[:-1] + [filename]
for i, p in enumerate(path):
if p[-4:] == '.zip':
if p.endswith('.zip'):
zfilename = os.path.join(*path[:i + 1])
zfile = zipfile.ZipFile(zfilename)
return zfile.read('/'.join(path[i + 1:]))
return u(io.open(os.path.join(*path), encoding = 'utf-8').read())
return u(io.open(os.path.join(*path), encoding='utf-8').read())

2
libs/guessit/guess.py

@ -295,7 +295,7 @@ def merge_all(guesses, append=None):
# then merge the remaining ones
dups = set(result) & set(g)
if dups:
log.warning('duplicate properties %s in merged result...' % dups)
log.warning('duplicate properties %s in merged result...' % [ (result[p], g[p]) for p in dups] )
result.update_highest_confidence(g)

2
libs/guessit/language.py

@ -326,7 +326,7 @@ def search_language(string, lang_filter=None):
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da'
'vi', 'ben', 'da', 'lt'
])
sep = r'[](){} \._-+'

6
libs/guessit/matcher.py

@ -128,12 +128,14 @@ class IterativeMatcher(object):
apply_transfo(name)
# more guessers for both movies and episodes
for name in ['guess_bonus_features', 'guess_year']:
apply_transfo(name)
apply_transfo('guess_bonus_features')
apply_transfo('guess_year', skip_first_year=('skip_first_year' in opts))
if 'nocountry' not in opts:
apply_transfo('guess_country')
apply_transfo('guess_idnumber')
# split into '-' separated subgroups (with required separator chars
# around the dash)

2
libs/guessit/matchtree.py

@ -275,7 +275,7 @@ class MatchTree(BaseMatchTree):
for string_part in ('title', 'series', 'container', 'format',
'releaseGroup', 'website', 'audioCodec',
'videoCodec', 'screenSize', 'episodeFormat',
'audioChannels'):
'audioChannels', 'idNumber'):
merge_similar_guesses(parts, string_part, choose_string)
# 2- merge the rest, potentially discarding information not properly

50
libs/guessit/patterns.py

@ -43,13 +43,13 @@ episode_rexps = [ # ... Season 2 ...
(r'saison (?P<season>[0-9]+)', 1.0, (0, 0)),
# ... s02e13 ...
(r'[Ss](?P<season>[0-9]{1,2}).?(?P<episodeNumber>(?:[Ee-][0-9]{1,2})+)[^0-9]', 1.0, (0, -1)),
(r'[Ss](?P<season>[0-9]{1,3})[^0-9]?(?P<episodeNumber>(?:-?[eE-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)),
# ... s03-x02 ...
(r'[Ss](?P<season>[0-9]{1,2}).?(?P<bonusNumber>(?:[Xx][0-9]{1,2})+)[^0-9]', 1.0, (0, -1)),
# ... s03-x02 ... # FIXME: redundant? remove it?
#(r'[Ss](?P<season>[0-9]{1,3})[^0-9]?(?P<bonusNumber>(?:-?[xX-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)),
# ... 2x13 ...
(r'[^0-9](?P<season>[0-9]{1,2}).?(?P<episodeNumber>(?:[xX][0-9]{1,2})+)[^0-9]', 0.8, (1, -1)),
(r'[^0-9](?P<season>[0-9]{1,2})[^0-9]?(?P<episodeNumber>(?:-?[xX][0-9]{1,3})+)[^0-9]', 1.0, (1, -1)),
# ... s02 ...
#(sep + r's(?P<season>[0-9]{1,2})' + sep, 0.6, (1, -1)),
@ -122,20 +122,25 @@ prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ],
'VHS': [ 'VHS' ],
'WEB-DL': [ 'WEB-DL' ] },
'screenSize': { '480p': [ '480p?' ],
'720p': [ '720p?' ],
'1080p': [ '1080p?' ] },
'screenSize': { '480p': [ '480[pi]?' ],
'720p': [ '720[pi]?' ],
'1080p': [ '1080[pi]?' ] },
'videoCodec': { 'XviD': [ 'Xvid' ],
'DivX': [ 'DVDivX', 'DivX' ],
'h264': [ '[hx]-264' ],
'Rv10': [ 'Rv10' ] },
'Rv10': [ 'Rv10' ],
'Mpeg2': [ 'Mpeg2' ] },
# has nothing to do here (or on filenames for that matter), but some
# releases use it and it helps to identify release groups, so we adapt
'videoApi': { 'DXVA': [ 'DXVA' ] },
'audioCodec': { 'AC3': [ 'AC3' ],
'DTS': [ 'DTS' ],
'AAC': [ 'He-AAC', 'AAC-He', 'AAC' ] },
'audioChannels': { '5.1': [ r'5\.1', 'DD5\.1', '5ch' ] },
'audioChannels': { '5.1': [ r'5\.1', 'DD5[\._ ]1', '5ch' ] },
'episodeFormat': { 'Minisode': [ 'Minisodes?' ] }
@ -143,14 +148,21 @@ prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ],
# prop_single dict of { property_name: [ canonical_form ] }
prop_single = { 'releaseGroup': [ 'ESiR', 'WAF', 'SEPTiC', r'\[XCT\]', 'iNT', 'PUKKA',
'CHD', 'ViTE', 'TLF', 'DEiTY', 'FLAiTE',
'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS', 'FiNaLe',
'UnSeeN', 'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL',
'SiNNERS', 'DiRTY', 'REWARD', 'ECI', 'KiNGS', 'CLUE',
'CtrlHD', 'POD', 'WiKi', 'DIMENSION', 'IMMERSE', 'FQM',
'2HD', 'REPTiLE', 'CTU', 'HALCYON', 'EbP', 'SiTV',
'SAiNTS', 'HDBRiSe', 'AlFleNi-TeaM', 'EVOLVE', '0TV',
'TLA', 'NTB', 'ASAP', 'MOMENTUM', 'FoV', 'D-Z0N3' ],
'CHD', 'ViTE', 'TLF', 'FLAiTE',
'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS',
'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL',
'CtrlHD', 'POD', 'WiKi','IMMERSE', 'FQM',
'2HD', 'CTU', 'HALCYON', 'EbP', 'SiTV',
'HDBRiSe', 'AlFleNi-TeaM', 'EVOLVE', '0TV',
'TLA', 'NTB', 'ASAP', 'MOMENTUM', 'FoV', 'D-Z0N3',
'TrollHD', 'ECI'
],
# potentially confusing release group names (they are words)
'weakReleaseGroup': [ 'DEiTY', 'FiNaLe', 'UnSeeN', 'KiNGS', 'CLUE', 'DIMENSION',
'SAiNTS', 'ARROW', 'EuReKA', 'SiNNERS', 'DiRTY', 'REWARD',
'REPTiLE',
],
'other': [ 'PROPER', 'REPACK', 'LIMITED', 'DualAudio', 'Audiofixed', 'R5',
'complete', 'classic', # not so sure about these ones, could appear in a title
@ -179,6 +191,10 @@ properties_rexps.update(dict((type, dict((canonical_form, [ _to_rexp(canonical_f
def find_properties(string):
result = []
for property_name, props in properties_rexps.items():
# FIXME: this should be done in a more flexible way...
if property_name in ['weakReleaseGroup']:
continue
for canonical_form, rexps in props.items():
for value_rexp in rexps:
match = value_rexp.search(string)

18
libs/guessit/transfo/guess_episodes_rexps.py

@ -28,7 +28,13 @@ import logging
log = logging.getLogger(__name__)
def number_list(s):
return list(re.sub('[^0-9]+', ' ', s).split())
l = [ int(n) for n in re.sub('[^0-9]+', ' ', s).split() ]
if len(l) == 2:
# it is an episode interval, return all numbers in between
return range(l[0], l[1]+1)
return l
def guess_episodes_rexps(string):
for rexp, confidence, span_adjust in episode_rexps:
@ -38,23 +44,23 @@ def guess_episodes_rexps(string):
span = (match.start() + span_adjust[0],
match.end() + span_adjust[1])
# episodes which have a season > 25 are most likely errors
# episodes which have a season > 30 are most likely errors
# (Simpsons is at 24!)
if int(guess.get('season', 0)) > 25:
if int(guess.get('season', 0)) > 30:
continue
# decide whether we have only a single episode number or an
# episode list
if guess.get('episodeNumber'):
eplist = number_list(guess['episodeNumber'])
guess.set('episodeNumber', int(eplist[0]), confidence=confidence)
guess.set('episodeNumber', eplist[0], confidence=confidence)
if len(eplist) > 1:
guess.set('episodeList', list(map(int, eplist)), confidence=confidence)
guess.set('episodeList', eplist, confidence=confidence)
if guess.get('bonusNumber'):
eplist = number_list(guess['bonusNumber'])
guess.set('bonusNumber', int(eplist[0]), confidence=confidence)
guess.set('bonusNumber', eplist[0], confidence=confidence)
return guess, span

71
libs/guessit/transfo/guess_idnumber.py

@ -0,0 +1,71 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import find_properties
import re
import logging
log = logging.getLogger(__name__)
def guess_properties(string):
try:
prop, value, pos, end = find_properties(string)[0]
return { prop: value }, (pos, end)
except IndexError:
return None, None
_idnum = re.compile(r'(?P<idNumber>[a-zA-Z0-9-]{10,})') # 1.0, (0, 0))
def guess_idnumber(string):
match = _idnum.search(string)
if match is not None:
result = match.groupdict()
switch_count = 0
DIGIT = 0
LETTER = 1
OTHER = 2
last = LETTER
for c in result['idNumber']:
if c in '0123456789':
ci = DIGIT
elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
ci = LETTER
else:
ci = OTHER
if ci != last:
switch_count += 1
last = ci
switch_ratio = float(switch_count) / len(result['idNumber'])
# only return the result as probable if we alternate often between
# char type (more likely for hash values than for common words)
if switch_ratio > 0.4:
return result, match.span()
return None, None
def process(mtree):
SingleNodeGuesser(guess_idnumber, 0.4, log).process(mtree)

21
libs/guessit/transfo/guess_release_group.py

@ -31,16 +31,22 @@ def get_patterns(property_name):
CODECS = get_patterns('videoCodec')
FORMATS = get_patterns('format')
VAPIS = get_patterns('videoApi')
GROUP_NAMES = [ r'(?P<videoCodec>' + codec + r')-?(?P<releaseGroup>.*?)[ \.]'
# RG names following a codec or format, with a potential space or dash inside the name
GROUP_NAMES = [ r'(?P<videoCodec>' + codec + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for codec in CODECS ]
GROUP_NAMES += [ r'(?P<format>' + fmt + r')-?(?P<releaseGroup>.*?)[ \.]'
GROUP_NAMES += [ r'(?P<format>' + fmt + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for fmt in FORMATS ]
GROUP_NAMES += [ r'(?P<videoApi>' + api + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]'
for api in VAPIS ]
GROUP_NAMES2 = [ r'\.(?P<videoCodec>' + codec + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for codec in CODECS ]
GROUP_NAMES2 += [ r'\.(?P<format>' + fmt + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
GROUP_NAMES2 += [ r'\.(?P<format>' + fmt + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for fmt in FORMATS ]
GROUP_NAMES2 += [ r'\.(?P<videoApi>' + vapi + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for vapi in VAPIS ]
GROUP_NAMES = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES ]
GROUP_NAMES2 = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES2 ]
@ -54,12 +60,17 @@ def guess_release_group(string):
# first try to see whether we have both a known codec and a known release group
for rexp in GROUP_NAMES:
match = rexp.search(string)
if match:
while match:
metadata = match.groupdict()
release_group = compute_canonical_form('releaseGroup', metadata['releaseGroup'])
# make sure this is an actual release group we caught
release_group = (compute_canonical_form('releaseGroup', metadata['releaseGroup']) or
compute_canonical_form('weakReleaseGroup', metadata['releaseGroup']))
if release_group:
return adjust_metadata(metadata), (match.start(1), match.end(2))
# we didn't find anything conclusive, keep searching
match = rexp.search(string, match.span()[0]+1)
# pick anything as releaseGroup as long as we have a codec in front
# this doesn't include a potential dash ('-') ending the release group
# eg: [...].X264-HiS@SiLUHD-English.[...]

16
libs/guessit/transfo/guess_year.py

@ -33,6 +33,18 @@ def guess_year(string):
else:
return None, None
def guess_year_skip_first(string):
year, span = search_year(string)
if year:
year2, span2 = guess_year(string[span[1]:])
if year2:
return year2, (span2[0]+span[1], span2[1]+span[1])
return None, None
def process(mtree):
SingleNodeGuesser(guess_year, 1.0, log).process(mtree)
def process(mtree, skip_first_year=False):
if skip_first_year:
SingleNodeGuesser(guess_year_skip_first, 1.0, log).process(mtree)
else:
SingleNodeGuesser(guess_year, 1.0, log).process(mtree)

201
libs/logr/__init__.py

@ -0,0 +1,201 @@
# logr - Simple python logging wrapper
# Packed by Dean Gardiner <gardiner91@gmail.com>
#
# File part of:
# rdio-sock - Rdio WebSocket Library
# Copyright (C) 2013 fzza- <fzzzzzzzza@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect
import logging
import os
import sys
IGNORE = ()
PY3 = sys.version_info[0] == 3
class Logr(object):
loggers = {}
handler = None
@staticmethod
def configure(level=logging.WARNING, handler=None, formatter=None):
"""Configure Logr
@param handler: Logger message handler
@type handler: logging.Handler or None
@param formatter: Logger message Formatter
@type formatter: logging.Formatter or None
"""
if formatter is None:
formatter = LogrFormatter()
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(level)
Logr.handler = handler
@staticmethod
def configure_check():
if Logr.handler is None:
Logr.configure()
@staticmethod
def _get_name_from_path(filename):
try:
return os.path.splitext(os.path.basename(filename))[0]
except TypeError:
return "<unknown>"
@staticmethod
def get_logger_name():
stack = inspect.stack()
for x in xrange_six(len(stack)):
frame = stack[x][0]
name = None
# Try find name of function defined inside a class
if len(frame.f_code.co_varnames) > 0:
self_argument = frame.f_code.co_varnames[0]
if self_argument == 'self' and self_argument in frame.f_locals:
instance = frame.f_locals[self_argument]
class_ = instance.__class__
class_name = class_.__name__
module_name = class_.__module__
if module_name != '__main__':
name = module_name + '.' + class_name
else:
name = class_name
# Try find name of function defined outside of a class
if name is None:
if frame.f_code.co_name in frame.f_globals:
name = frame.f_globals.get('__name__')
if name == '__main__':
name = Logr._get_name_from_path(frame.f_globals.get('__file__'))
name = name
elif frame.f_code.co_name == '<module>':
name = Logr._get_name_from_path(frame.f_globals.get('__file__'))
if name is not None and name not in IGNORE:
return name
return ""
@staticmethod
def get_logger():
"""Get or create logger (if it does not exist)
@rtype: RootLogger
"""
name = Logr.get_logger_name()
if name not in Logr.loggers:
Logr.configure_check()
Logr.loggers[name] = logging.Logger(name)
Logr.loggers[name].addHandler(Logr.handler)
return Logr.loggers[name]
@staticmethod
def debug(msg, *args, **kwargs):
Logr.get_logger().debug(msg, *args, **kwargs)
@staticmethod
def info(msg, *args, **kwargs):
Logr.get_logger().info(msg, *args, **kwargs)
@staticmethod
def warning(msg, *args, **kwargs):
Logr.get_logger().warning(msg, *args, **kwargs)
warn = warning
@staticmethod
def error(msg, *args, **kwargs):
Logr.get_logger().error(msg, *args, **kwargs)
@staticmethod
def exception(msg, *args, **kwargs):
Logr.get_logger().exception(msg, *args, **kwargs)
@staticmethod
def critical(msg, *args, **kwargs):
Logr.get_logger().critical(msg, *args, **kwargs)
fatal = critical
@staticmethod
def log(level, msg, *args, **kwargs):
Logr.get_logger().log(level, msg, *args, **kwargs)
class LogrFormatter(logging.Formatter):
LENGTH_NAME = 32
LENGTH_LEVEL_NAME = 5
def __init__(self, fmt=None, datefmt=None):
if sys.version_info[:2] > (2,6):
super(LogrFormatter, self).__init__(fmt, datefmt)
else:
logging.Formatter.__init__(self, fmt, datefmt)
def usesTime(self):
return True
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = "%(asctime)s %(name)s %(levelname)s %(message)s" % {
'asctime': record.asctime,
'name': record.name[-self.LENGTH_NAME:].rjust(self.LENGTH_NAME, ' '),
'levelname': record.levelname[:self.LENGTH_LEVEL_NAME].ljust(self.LENGTH_LEVEL_NAME, ' '),
'message': record.message
}
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s += "\n"
try:
s += record.exc_text
except UnicodeError:
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
def xrange_six(start, stop=None, step=None):
if stop is not None and step is not None:
if PY3:
return range(start, stop, step)
else:
return xrange(start, stop, step)
else:
if PY3:
return range(start)
else:
return xrange(start)

2
libs/synchronousdeluge/transfer.py

@ -19,7 +19,7 @@ class DelugeTransfer(object):
self.disconnect()
self.sock = socket.create_connection(hostport)
self.conn = ssl.wrap_socket(self.sock)
self.conn = ssl.wrap_socket(self.sock, None, None, False, ssl.CERT_NONE, ssl.PROTOCOL_SSLv3)
self.connected = True
def disconnect(self):

27
libs/unrar2/PKG-INFO

@ -1,27 +0,0 @@
Metadata-Version: 1.0
Name: pyUnRAR2
Version: 0.99.2
Summary: Improved Python wrapper around the free UnRAR.dll
Home-page: http://code.google.com/py-unrar2
Author: Konstantin Yegupov
Author-email: yk4ever@gmail.com
License: MIT
Description: pyUnRAR2 is a ctypes based wrapper around the free UnRAR.dll.
It is an modified version of Jimmy Retzlaff's pyUnRAR - more simple,
stable and foolproof.
Notice that it has INCOMPATIBLE interface.
It enables reading and unpacking of archives created with the
RAR/WinRAR archivers. There is a low-level interface which is very
similar to the C interface provided by UnRAR. There is also a
higher level interface which makes some common operations easier.
Platform: Windows
Classifier: Development Status :: 4 - Beta
Classifier: Environment :: Win32 (MS Windows)
Classifier: License :: OSI Approved :: MIT License
Classifier: Natural Language :: English
Classifier: Operating System :: Microsoft :: Windows
Classifier: Programming Language :: Python
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: System :: Archiving :: Compression

191
libs/unrar2/UnRAR2.html

@ -1,191 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: package UnRAR2</title>
</head><body bgcolor="#f0f0f8">
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom>&nbsp;<br>
<font color="#ffffff" face="helvetica, arial">&nbsp;<br><big><big><strong>UnRAR2</strong></big></big> (version 0.99.1)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:///C|/python26/lib/site-packages/unrar2/__init__.py">c:\python26\lib\site-packages\unrar2\__init__.py</a></font></td></tr></table>
<p><tt>pyUnRAR2&nbsp;is&nbsp;a&nbsp;ctypes&nbsp;based&nbsp;wrapper&nbsp;around&nbsp;the&nbsp;free&nbsp;UnRAR.dll.&nbsp;<br>
&nbsp;<br>
It&nbsp;is&nbsp;an&nbsp;modified&nbsp;version&nbsp;of&nbsp;Jimmy&nbsp;Retzlaff's&nbsp;pyUnRAR&nbsp;-&nbsp;more&nbsp;simple,<br>
stable&nbsp;and&nbsp;foolproof.<br>
Notice&nbsp;that&nbsp;it&nbsp;has&nbsp;INCOMPATIBLE&nbsp;interface.<br>
&nbsp;<br>
It&nbsp;enables&nbsp;reading&nbsp;and&nbsp;unpacking&nbsp;of&nbsp;archives&nbsp;created&nbsp;with&nbsp;the<br>
RAR/WinRAR&nbsp;archivers.&nbsp;There&nbsp;is&nbsp;a&nbsp;low-level&nbsp;interface&nbsp;which&nbsp;is&nbsp;very<br>
similar&nbsp;to&nbsp;the&nbsp;C&nbsp;interface&nbsp;provided&nbsp;by&nbsp;UnRAR.&nbsp;There&nbsp;is&nbsp;also&nbsp;a<br>
higher&nbsp;level&nbsp;interface&nbsp;which&nbsp;makes&nbsp;some&nbsp;common&nbsp;operations&nbsp;easier.</tt></p>
<p>
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#aa55cc">
<td colspan=3 valign=bottom>&nbsp;<br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Package Contents</strong></big></font></td></tr>
<tr><td bgcolor="#aa55cc"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
<td width="100%"><table width="100%" summary="list"><tr><td width="25%" valign=top><a href="UnRAR2.rar_exceptions.html">rar_exceptions</a><br>
<a href="UnRAR2.setup.html">setup</a><br>
</td><td width="25%" valign=top><a href="UnRAR2.test_UnRAR2.html">test_UnRAR2</a><br>
<a href="UnRAR2.unix.html">unix</a><br>
</td><td width="25%" valign=top><a href="UnRAR2.windows.html">windows</a><br>
</td><td width="25%" valign=top></td></tr></table></td></tr></table><p>
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom>&nbsp;<br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
<tr><td bgcolor="#ee77aa"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
<td width="100%"><dl>
<dt><font face="helvetica, arial"><a href="UnRAR2.windows.html#RarFileImplementation">UnRAR2.windows.RarFileImplementation</a>(<a href="__builtin__.html#object">__builtin__.object</a>)
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="UnRAR2.html#RarFile">RarFile</a>
</font></dt></dl>
</dd>
<dt><font face="helvetica, arial"><a href="__builtin__.html#object">__builtin__.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="UnRAR2.html#RarInfo">RarInfo</a>
</font></dt></dl>
</dd>
</dl>
<p>
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom>&nbsp;<br>
<font color="#000000" face="helvetica, arial"><a name="RarFile">class <strong>RarFile</strong></a>(<a href="UnRAR2.windows.html#RarFileImplementation">UnRAR2.windows.RarFileImplementation</a>)</font></td></tr>
<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
<td width="100%"><dl><dt>Method resolution order:</dt>
<dd><a href="UnRAR2.html#RarFile">RarFile</a></dd>
<dd><a href="UnRAR2.windows.html#RarFileImplementation">UnRAR2.windows.RarFileImplementation</a></dd>
<dd><a href="__builtin__.html#object">__builtin__.object</a></dd>
</dl>
<hr>
Methods defined here:<br>
<dl><dt><a name="RarFile-__del__"><strong>__del__</strong></a>(self)</dt></dl>
<dl><dt><a name="RarFile-__init__"><strong>__init__</strong></a>(self, archiveName, password<font color="#909090">=None</font>)</dt><dd><tt>Instantiate&nbsp;the&nbsp;archive.<br>
&nbsp;<br>
archiveName&nbsp;is&nbsp;the&nbsp;name&nbsp;of&nbsp;the&nbsp;RAR&nbsp;file.<br>
password&nbsp;is&nbsp;used&nbsp;to&nbsp;decrypt&nbsp;the&nbsp;files&nbsp;in&nbsp;the&nbsp;archive.<br>
&nbsp;<br>
Properties:<br>
&nbsp;&nbsp;&nbsp;&nbsp;comment&nbsp;-&nbsp;comment&nbsp;associated&nbsp;with&nbsp;the&nbsp;archive<br>
&nbsp;<br>
&gt;&gt;&gt;&nbsp;print&nbsp;<a href="#RarFile">RarFile</a>('test.rar').comment<br>
This&nbsp;is&nbsp;a&nbsp;test.</tt></dd></dl>
<dl><dt><a name="RarFile-extract"><strong>extract</strong></a>(self, condition<font color="#909090">='*'</font>, path<font color="#909090">='.'</font>, withSubpath<font color="#909090">=True</font>, overwrite<font color="#909090">=True</font>)</dt><dd><tt>Extract&nbsp;specific&nbsp;files&nbsp;from&nbsp;archive&nbsp;to&nbsp;disk.<br>
&nbsp;<br>
If&nbsp;"condition"&nbsp;is&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers,&nbsp;then&nbsp;extract&nbsp;files&nbsp;which&nbsp;have&nbsp;those&nbsp;positions&nbsp;in&nbsp;infolist.<br>
If&nbsp;"condition"&nbsp;is&nbsp;a&nbsp;string,&nbsp;then&nbsp;it&nbsp;is&nbsp;treated&nbsp;as&nbsp;a&nbsp;wildcard&nbsp;for&nbsp;names&nbsp;of&nbsp;files&nbsp;to&nbsp;extract.<br>
If&nbsp;"condition"&nbsp;is&nbsp;a&nbsp;function,&nbsp;it&nbsp;is&nbsp;treated&nbsp;as&nbsp;a&nbsp;callback&nbsp;function,&nbsp;which&nbsp;accepts&nbsp;a&nbsp;<a href="#RarInfo">RarInfo</a>&nbsp;<a href="__builtin__.html#object">object</a><br>
&nbsp;&nbsp;&nbsp;&nbsp;and&nbsp;returns&nbsp;either&nbsp;boolean&nbsp;True&nbsp;(extract)&nbsp;or&nbsp;boolean&nbsp;False&nbsp;(skip).<br>
DEPRECATED:&nbsp;If&nbsp;"condition"&nbsp;callback&nbsp;returns&nbsp;string&nbsp;(only&nbsp;supported&nbsp;for&nbsp;Windows)&nbsp;-&nbsp;<br>
&nbsp;&nbsp;&nbsp;&nbsp;that&nbsp;string&nbsp;will&nbsp;be&nbsp;used&nbsp;as&nbsp;a&nbsp;new&nbsp;name&nbsp;to&nbsp;save&nbsp;the&nbsp;file&nbsp;under.<br>
If&nbsp;"condition"&nbsp;is&nbsp;omitted,&nbsp;all&nbsp;files&nbsp;are&nbsp;extracted.<br>
&nbsp;<br>
"path"&nbsp;is&nbsp;a&nbsp;directory&nbsp;to&nbsp;extract&nbsp;to<br>
"withSubpath"&nbsp;flag&nbsp;denotes&nbsp;whether&nbsp;files&nbsp;are&nbsp;extracted&nbsp;with&nbsp;their&nbsp;full&nbsp;path&nbsp;in&nbsp;the&nbsp;archive.<br>
"overwrite"&nbsp;flag&nbsp;denotes&nbsp;whether&nbsp;extracted&nbsp;files&nbsp;will&nbsp;overwrite&nbsp;old&nbsp;ones.&nbsp;Defaults&nbsp;to&nbsp;true.<br>
&nbsp;<br>
Returns&nbsp;list&nbsp;of&nbsp;RarInfos&nbsp;for&nbsp;extracted&nbsp;files.</tt></dd></dl>
<dl><dt><a name="RarFile-infoiter"><strong>infoiter</strong></a>(self)</dt><dd><tt>Iterate&nbsp;over&nbsp;all&nbsp;the&nbsp;files&nbsp;in&nbsp;the&nbsp;archive,&nbsp;generating&nbsp;RarInfos.<br>
&nbsp;<br>
&gt;&gt;&gt;&nbsp;import&nbsp;os<br>
&gt;&gt;&gt;&nbsp;for&nbsp;fileInArchive&nbsp;in&nbsp;<a href="#RarFile">RarFile</a>('test.rar').<a href="#RarFile-infoiter">infoiter</a>():<br>
...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;os.path.split(fileInArchive.filename)[-1],<br>
...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;fileInArchive.isdir,<br>
...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;fileInArchive.size,<br>
...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;fileInArchive.comment,<br>
...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;tuple(fileInArchive.datetime)[0:5],<br>
...&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print&nbsp;time.strftime('%a,&nbsp;%d&nbsp;%b&nbsp;%Y&nbsp;%H:%M',&nbsp;fileInArchive.datetime)<br>
test&nbsp;True&nbsp;0&nbsp;None&nbsp;(2003,&nbsp;6,&nbsp;30,&nbsp;1,&nbsp;59)&nbsp;Mon,&nbsp;30&nbsp;Jun&nbsp;2003&nbsp;01:59<br>
test.txt&nbsp;False&nbsp;20&nbsp;None&nbsp;(2003,&nbsp;6,&nbsp;30,&nbsp;2,&nbsp;1)&nbsp;Mon,&nbsp;30&nbsp;Jun&nbsp;2003&nbsp;02:01<br>
this.py&nbsp;False&nbsp;1030&nbsp;None&nbsp;(2002,&nbsp;2,&nbsp;8,&nbsp;16,&nbsp;47)&nbsp;Fri,&nbsp;08&nbsp;Feb&nbsp;2002&nbsp;16:47</tt></dd></dl>
<dl><dt><a name="RarFile-infolist"><strong>infolist</strong></a>(self)</dt><dd><tt>Return&nbsp;a&nbsp;list&nbsp;of&nbsp;RarInfos,&nbsp;descripting&nbsp;the&nbsp;contents&nbsp;of&nbsp;the&nbsp;archive.</tt></dd></dl>
<dl><dt><a name="RarFile-read_files"><strong>read_files</strong></a>(self, condition<font color="#909090">='*'</font>)</dt><dd><tt>Read&nbsp;specific&nbsp;files&nbsp;from&nbsp;archive&nbsp;into&nbsp;memory.<br>
If&nbsp;"condition"&nbsp;is&nbsp;a&nbsp;list&nbsp;of&nbsp;numbers,&nbsp;then&nbsp;return&nbsp;files&nbsp;which&nbsp;have&nbsp;those&nbsp;positions&nbsp;in&nbsp;infolist.<br>
If&nbsp;"condition"&nbsp;is&nbsp;a&nbsp;string,&nbsp;then&nbsp;it&nbsp;is&nbsp;treated&nbsp;as&nbsp;a&nbsp;wildcard&nbsp;for&nbsp;names&nbsp;of&nbsp;files&nbsp;to&nbsp;extract.<br>
If&nbsp;"condition"&nbsp;is&nbsp;a&nbsp;function,&nbsp;it&nbsp;is&nbsp;treated&nbsp;as&nbsp;a&nbsp;callback&nbsp;function,&nbsp;which&nbsp;accepts&nbsp;a&nbsp;<a href="#RarInfo">RarInfo</a>&nbsp;<a href="__builtin__.html#object">object</a>&nbsp;<br>
&nbsp;&nbsp;&nbsp;&nbsp;and&nbsp;returns&nbsp;boolean&nbsp;True&nbsp;(extract)&nbsp;or&nbsp;False&nbsp;(skip).<br>
If&nbsp;"condition"&nbsp;is&nbsp;omitted,&nbsp;all&nbsp;files&nbsp;are&nbsp;returned.<br>
&nbsp;<br>
Returns&nbsp;list&nbsp;of&nbsp;tuples&nbsp;(<a href="#RarInfo">RarInfo</a>&nbsp;info,&nbsp;str&nbsp;contents)</tt></dd></dl>
<hr>
Methods inherited from <a href="UnRAR2.windows.html#RarFileImplementation">UnRAR2.windows.RarFileImplementation</a>:<br>
<dl><dt><a name="RarFile-destruct"><strong>destruct</strong></a>(self)</dt></dl>
<dl><dt><a name="RarFile-init"><strong>init</strong></a>(self, password<font color="#909090">=None</font>)</dt></dl>
<dl><dt><a name="RarFile-make_sure_ready"><strong>make_sure_ready</strong></a>(self)</dt></dl>
<hr>
Data descriptors inherited from <a href="UnRAR2.windows.html#RarFileImplementation">UnRAR2.windows.RarFileImplementation</a>:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
</dl>
</td></tr></table> <p>
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom>&nbsp;<br>
<font color="#000000" face="helvetica, arial"><a name="RarInfo">class <strong>RarInfo</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
<tr bgcolor="#ffc8d8"><td rowspan=2><tt>&nbsp;&nbsp;&nbsp;</tt></td>
<td colspan=2><tt>Represents&nbsp;a&nbsp;file&nbsp;header&nbsp;in&nbsp;an&nbsp;archive.&nbsp;Don't&nbsp;instantiate&nbsp;directly.<br>
Use&nbsp;only&nbsp;to&nbsp;obtain&nbsp;information&nbsp;about&nbsp;file.<br>
YOU&nbsp;CANNOT&nbsp;EXTRACT&nbsp;FILE&nbsp;CONTENTS&nbsp;USING&nbsp;THIS&nbsp;OBJECT.<br>
USE&nbsp;METHODS&nbsp;OF&nbsp;<a href="#RarFile">RarFile</a>&nbsp;CLASS&nbsp;INSTEAD.<br>
&nbsp;<br>
Properties:<br>
&nbsp;&nbsp;&nbsp;&nbsp;index&nbsp;-&nbsp;index&nbsp;of&nbsp;file&nbsp;within&nbsp;the&nbsp;archive<br>
&nbsp;&nbsp;&nbsp;&nbsp;filename&nbsp;-&nbsp;name&nbsp;of&nbsp;the&nbsp;file&nbsp;in&nbsp;the&nbsp;archive&nbsp;including&nbsp;path&nbsp;(if&nbsp;any)<br>
&nbsp;&nbsp;&nbsp;&nbsp;datetime&nbsp;-&nbsp;file&nbsp;date/time&nbsp;as&nbsp;a&nbsp;struct_time&nbsp;suitable&nbsp;for&nbsp;time.strftime<br>
&nbsp;&nbsp;&nbsp;&nbsp;isdir&nbsp;-&nbsp;True&nbsp;if&nbsp;the&nbsp;file&nbsp;is&nbsp;a&nbsp;directory<br>
&nbsp;&nbsp;&nbsp;&nbsp;size&nbsp;-&nbsp;size&nbsp;in&nbsp;bytes&nbsp;of&nbsp;the&nbsp;uncompressed&nbsp;file<br>
&nbsp;&nbsp;&nbsp;&nbsp;comment&nbsp;-&nbsp;comment&nbsp;associated&nbsp;with&nbsp;the&nbsp;file<br>
&nbsp;&nbsp;&nbsp;&nbsp;<br>
Note&nbsp;-&nbsp;this&nbsp;is&nbsp;not&nbsp;currently&nbsp;intended&nbsp;to&nbsp;be&nbsp;a&nbsp;Python&nbsp;file-like&nbsp;<a href="__builtin__.html#object">object</a>.<br>&nbsp;</tt></td></tr>
<tr><td>&nbsp;</td>
<td width="100%">Methods defined here:<br>
<dl><dt><a name="RarInfo-__init__"><strong>__init__</strong></a>(self, rarfile, data)</dt></dl>
<dl><dt><a name="RarInfo-__str__"><strong>__str__</strong></a>(self)</dt></dl>
<hr>
Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
</dl>
</td></tr></table></td></tr></table><p>
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom>&nbsp;<br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
<tr><td bgcolor="#eeaa77"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
<td width="100%"><dl><dt><a name="-condition2checker"><strong>condition2checker</strong></a>(condition)</dt><dd><tt>Converts&nbsp;different&nbsp;condition&nbsp;types&nbsp;to&nbsp;callback</tt></dd></dl>
</td></tr></table><p>
<table width="100%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom>&nbsp;<br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
<tr><td bgcolor="#55aa55"><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
<td width="100%"><strong>__version__</strong> = '0.99.1'<br>
<strong>in_windows</strong> = True</td></tr></table>
</body></html>

18
libs/unrar2/UnRARDLL/license.txt

@ -1,18 +0,0 @@
The unrar.dll library is freeware. This means:
1. All copyrights to RAR and the unrar.dll are exclusively
owned by the author - Alexander Roshal.
2. The unrar.dll library may be used in any software to handle RAR
archives without limitations free of charge.
3. THE RAR ARCHIVER AND THE UNRAR.DLL LIBRARY ARE DISTRIBUTED "AS IS".
NO WARRANTY OF ANY KIND IS EXPRESSED OR IMPLIED. YOU USE AT
YOUR OWN RISK. THE AUTHOR WILL NOT BE LIABLE FOR DATA LOSS,
DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING
OR MISUSING THIS SOFTWARE.
Thank you for your interest in RAR and unrar.dll.
Alexander L. Roshal

140
libs/unrar2/UnRARDLL/unrar.h

@ -1,140 +0,0 @@
#ifndef _UNRAR_DLL_
#define _UNRAR_DLL_
#define ERAR_END_ARCHIVE 10
#define ERAR_NO_MEMORY 11
#define ERAR_BAD_DATA 12
#define ERAR_BAD_ARCHIVE 13
#define ERAR_UNKNOWN_FORMAT 14
#define ERAR_EOPEN 15
#define ERAR_ECREATE 16
#define ERAR_ECLOSE 17
#define ERAR_EREAD 18
#define ERAR_EWRITE 19
#define ERAR_SMALL_BUF 20
#define ERAR_UNKNOWN 21
#define ERAR_MISSING_PASSWORD 22
#define RAR_OM_LIST 0
#define RAR_OM_EXTRACT 1
#define RAR_OM_LIST_INCSPLIT 2
#define RAR_SKIP 0
#define RAR_TEST 1
#define RAR_EXTRACT 2
#define RAR_VOL_ASK 0
#define RAR_VOL_NOTIFY 1
#define RAR_DLL_VERSION 4
#ifdef _UNIX
#define CALLBACK
#define PASCAL
#define LONG long
#define HANDLE void *
#define LPARAM long
#define UINT unsigned int
#endif
struct RARHeaderData
{
char ArcName[260];
char FileName[260];
unsigned int Flags;
unsigned int PackSize;
unsigned int UnpSize;
unsigned int HostOS;
unsigned int FileCRC;
unsigned int FileTime;
unsigned int UnpVer;
unsigned int Method;
unsigned int FileAttr;
char *CmtBuf;
unsigned int CmtBufSize;
unsigned int CmtSize;
unsigned int CmtState;
};
struct RARHeaderDataEx
{
char ArcName[1024];
wchar_t ArcNameW[1024];
char FileName[1024];
wchar_t FileNameW[1024];
unsigned int Flags;
unsigned int PackSize;
unsigned int PackSizeHigh;
unsigned int UnpSize;
unsigned int UnpSizeHigh;
unsigned int HostOS;
unsigned int FileCRC;
unsigned int FileTime;
unsigned int UnpVer;
unsigned int Method;
unsigned int FileAttr;
char *CmtBuf;
unsigned int CmtBufSize;
unsigned int CmtSize;
unsigned int CmtState;
unsigned int Reserved[1024];
};
struct RAROpenArchiveData
{
char *ArcName;
unsigned int OpenMode;
unsigned int OpenResult;
char *CmtBuf;
unsigned int CmtBufSize;
unsigned int CmtSize;
unsigned int CmtState;
};
struct RAROpenArchiveDataEx
{
char *ArcName;
wchar_t *ArcNameW;
unsigned int OpenMode;
unsigned int OpenResult;
char *CmtBuf;
unsigned int CmtBufSize;
unsigned int CmtSize;
unsigned int CmtState;
unsigned int Flags;
unsigned int Reserved[32];
};
enum UNRARCALLBACK_MESSAGES {
UCM_CHANGEVOLUME,UCM_PROCESSDATA,UCM_NEEDPASSWORD
};
typedef int (CALLBACK *UNRARCALLBACK)(UINT msg,LPARAM UserData,LPARAM P1,LPARAM P2);
typedef int (PASCAL *CHANGEVOLPROC)(char *ArcName,int Mode);
typedef int (PASCAL *PROCESSDATAPROC)(unsigned char *Addr,int Size);
#ifdef __cplusplus
extern "C" {
#endif
HANDLE PASCAL RAROpenArchive(struct RAROpenArchiveData *ArchiveData);
HANDLE PASCAL RAROpenArchiveEx(struct RAROpenArchiveDataEx *ArchiveData);
int PASCAL RARCloseArchive(HANDLE hArcData);
int PASCAL RARReadHeader(HANDLE hArcData,struct RARHeaderData *HeaderData);
int PASCAL RARReadHeaderEx(HANDLE hArcData,struct RARHeaderDataEx *HeaderData);
int PASCAL RARProcessFile(HANDLE hArcData,int Operation,char *DestPath,char *DestName);
int PASCAL RARProcessFileW(HANDLE hArcData,int Operation,wchar_t *DestPath,wchar_t *DestName);
void PASCAL RARSetCallback(HANDLE hArcData,UNRARCALLBACK Callback,LPARAM UserData);
void PASCAL RARSetChangeVolProc(HANDLE hArcData,CHANGEVOLPROC ChangeVolProc);
void PASCAL RARSetProcessDataProc(HANDLE hArcData,PROCESSDATAPROC ProcessDataProc);
void PASCAL RARSetPassword(HANDLE hArcData,char *Password);
int PASCAL RARGetDllVersion();
#ifdef __cplusplus
}
#endif
#endif

BIN
libs/unrar2/UnRARDLL/unrar.lib

Binary file not shown.

606
libs/unrar2/UnRARDLL/unrardll.txt

@ -1,606 +0,0 @@
UnRAR.dll Manual
~~~~~~~~~~~~~~~~
UnRAR.dll is a 32-bit Windows dynamic-link library which provides
file extraction from RAR archives.
Exported functions
====================================================================
HANDLE PASCAL RAROpenArchive(struct RAROpenArchiveData *ArchiveData)
====================================================================
Description
~~~~~~~~~~~
Open RAR archive and allocate memory structures
Parameters
~~~~~~~~~~
ArchiveData Points to RAROpenArchiveData structure
struct RAROpenArchiveData
{
char *ArcName;
UINT OpenMode;
UINT OpenResult;
char *CmtBuf;
UINT CmtBufSize;
UINT CmtSize;
UINT CmtState;
};
Structure fields:
ArcName
Input parameter which should point to zero terminated string
containing the archive name.
OpenMode
Input parameter.
Possible values
RAR_OM_LIST
Open archive for reading file headers only.
RAR_OM_EXTRACT
Open archive for testing and extracting files.
RAR_OM_LIST_INCSPLIT
Open archive for reading file headers only. If you open an archive
in such mode, RARReadHeader[Ex] will return all file headers,
including those with "file continued from previous volume" flag.
In case of RAR_OM_LIST such headers are automatically skipped.
So if you process RAR volumes in RAR_OM_LIST_INCSPLIT mode, you will
get several file header records for same file if file is split between
volumes. For such files only the last file header record will contain
the correct file CRC and if you wish to get the correct packed size,
you need to sum up packed sizes of all parts.
OpenResult
Output parameter.
Possible values
0 Success
ERAR_NO_MEMORY Not enough memory to initialize data structures
ERAR_BAD_DATA Archive header broken
ERAR_BAD_ARCHIVE File is not valid RAR archive
ERAR_UNKNOWN_FORMAT Unknown encryption used for archive headers
ERAR_EOPEN File open error
CmtBuf
Input parameter which should point to the buffer for archive
comments. Maximum comment size is limited to 64Kb. Comment text is
zero terminated. If the comment text is larger than the buffer
size, the comment text will be truncated. If CmtBuf is set to
NULL, comments will not be read.
CmtBufSize
Input parameter which should contain size of buffer for archive
comments.
CmtSize
Output parameter containing size of comments actually read into the
buffer, cannot exceed CmtBufSize.
CmtState
Output parameter.
Possible values
0 comments not present
1 Comments read completely
ERAR_NO_MEMORY Not enough memory to extract comments
ERAR_BAD_DATA Broken comment
ERAR_UNKNOWN_FORMAT Unknown comment format
ERAR_SMALL_BUF Buffer too small, comments not completely read
Return values
~~~~~~~~~~~~~
Archive handle or NULL in case of error
========================================================================
HANDLE PASCAL RAROpenArchiveEx(struct RAROpenArchiveDataEx *ArchiveData)
========================================================================
Description
~~~~~~~~~~~
Similar to RAROpenArchive, but uses RAROpenArchiveDataEx structure
allowing to specify Unicode archive name and returning information
about archive flags.
Parameters
~~~~~~~~~~
ArchiveData Points to RAROpenArchiveDataEx structure
struct RAROpenArchiveDataEx
{
char *ArcName;
wchar_t *ArcNameW;
unsigned int OpenMode;
unsigned int OpenResult;
char *CmtBuf;
unsigned int CmtBufSize;
unsigned int CmtSize;
unsigned int CmtState;
unsigned int Flags;
unsigned int Reserved[32];
};
Structure fields:
ArcNameW
Input parameter which should point to zero terminated Unicode string
containing the archive name or NULL if Unicode name is not specified.
Flags
Output parameter. Combination of bit flags.
Possible values
0x0001 - Volume attribute (archive volume)
0x0002 - Archive comment present
0x0004 - Archive lock attribute
0x0008 - Solid attribute (solid archive)
0x0010 - New volume naming scheme ('volname.partN.rar')
0x0020 - Authenticity information present
0x0040 - Recovery record present
0x0080 - Block headers are encrypted
0x0100 - First volume (set only by RAR 3.0 and later)
Reserved[32]
Reserved for future use. Must be zero.
Information on other structure fields and function return values
is available above, in RAROpenArchive function description.
====================================================================
int PASCAL RARCloseArchive(HANDLE hArcData)
====================================================================
Description
~~~~~~~~~~~
Close RAR archive and release allocated memory. It must be called when
archive processing is finished, even if the archive processing was stopped
due to an error.
Parameters
~~~~~~~~~~
hArcData
This parameter should contain the archive handle obtained from the
RAROpenArchive function call.
Return values
~~~~~~~~~~~~~
0 Success
ERAR_ECLOSE Archive close error
====================================================================
int PASCAL RARReadHeader(HANDLE hArcData,
struct RARHeaderData *HeaderData)
====================================================================
Description
~~~~~~~~~~~
Read header of file in archive.
Parameters
~~~~~~~~~~
hArcData
This parameter should contain the archive handle obtained from the
RAROpenArchive function call.
HeaderData
It should point to RARHeaderData structure:
struct RARHeaderData
{
char ArcName[260];
char FileName[260];
UINT Flags;
UINT PackSize;
UINT UnpSize;
UINT HostOS;
UINT FileCRC;
UINT FileTime;
UINT UnpVer;
UINT Method;
UINT FileAttr;
char *CmtBuf;
UINT CmtBufSize;
UINT CmtSize;
UINT CmtState;
};
Structure fields:
ArcName
Output parameter which contains a zero terminated string of the
current archive name. May be used to determine the current volume
name.
FileName
Output parameter which contains a zero terminated string of the
file name in OEM (DOS) encoding.
Flags
Output parameter which contains file flags:
0x01 - file continued from previous volume
0x02 - file continued on next volume
0x04 - file encrypted with password
0x08 - file comment present
0x10 - compression of previous files is used (solid flag)
bits 7 6 5
0 0 0 - dictionary size 64 Kb
0 0 1 - dictionary size 128 Kb
0 1 0 - dictionary size 256 Kb
0 1 1 - dictionary size 512 Kb
1 0 0 - dictionary size 1024 Kb
1 0 1 - dictionary size 2048 KB
1 1 0 - dictionary size 4096 KB
1 1 1 - file is directory
Other bits are reserved.
PackSize
Output parameter means packed file size or size of the
file part if file was split between volumes.
UnpSize
Output parameter - unpacked file size.
HostOS
Output parameter - operating system used for archiving:
0 - MS DOS;
1 - OS/2.
2 - Win32
3 - Unix
FileCRC
Output parameter which contains unpacked file CRC. In case of file parts
split between volumes only the last part contains the correct CRC
and it is accessible only in RAR_OM_LIST_INCSPLIT listing mode.
FileTime
Output parameter - contains date and time in standard MS DOS format.
UnpVer
Output parameter - RAR version needed to extract file.
It is encoded as 10 * Major version + minor version.
Method
Output parameter - packing method.
FileAttr
Output parameter - file attributes.
CmtBuf
File comments support is not implemented in the new DLL version yet.
Now CmtState is always 0.
/*
* Input parameter which should point to the buffer for file
* comments. Maximum comment size is limited to 64Kb. Comment text is
* a zero terminated string in OEM encoding. If the comment text is
* larger than the buffer size, the comment text will be truncated.
* If CmtBuf is set to NULL, comments will not be read.
*/
CmtBufSize
Input parameter which should contain size of buffer for archive
comments.
CmtSize
Output parameter containing size of comments actually read into the
buffer, should not exceed CmtBufSize.
CmtState
Output parameter.
Possible values
0 Absent comments
1 Comments read completely
ERAR_NO_MEMORY Not enough memory to extract comments
ERAR_BAD_DATA Broken comment
ERAR_UNKNOWN_FORMAT Unknown comment format
ERAR_SMALL_BUF Buffer too small, comments not completely read
Return values
~~~~~~~~~~~~~
0 Success
ERAR_END_ARCHIVE End of archive
ERAR_BAD_DATA File header broken
====================================================================
int PASCAL RARReadHeaderEx(HANDLE hArcData,
struct RARHeaderDataEx *HeaderData)
====================================================================
Description
~~~~~~~~~~~
Similar to RARReadHeader, but uses RARHeaderDataEx structure,
containing information about Unicode file names and 64 bit file sizes.
struct RARHeaderDataEx
{
char ArcName[1024];
wchar_t ArcNameW[1024];
char FileName[1024];
wchar_t FileNameW[1024];
unsigned int Flags;
unsigned int PackSize;
unsigned int PackSizeHigh;
unsigned int UnpSize;
unsigned int UnpSizeHigh;
unsigned int HostOS;
unsigned int FileCRC;
unsigned int FileTime;
unsigned int UnpVer;
unsigned int Method;
unsigned int FileAttr;
char *CmtBuf;
unsigned int CmtBufSize;
unsigned int CmtSize;
unsigned int CmtState;
unsigned int Reserved[1024];
};
====================================================================
int PASCAL RARProcessFile(HANDLE hArcData,
int Operation,
char *DestPath,
char *DestName)
====================================================================
Description
~~~~~~~~~~~
Performs action and moves the current position in the archive to
the next file. Extract or test the current file from the archive
opened in RAR_OM_EXTRACT mode. If the mode RAR_OM_LIST is set,
then a call to this function will simply skip the archive position
to the next file.
Parameters
~~~~~~~~~~
hArcData
This parameter should contain the archive handle obtained from the
RAROpenArchive function call.
Operation
File operation.
Possible values
RAR_SKIP Move to the next file in the archive. If the
archive is solid and RAR_OM_EXTRACT mode was set
when the archive was opened, the current file will
be processed - the operation will be performed
slower than a simple seek.
RAR_TEST Test the current file and move to the next file in
the archive. If the archive was opened with
RAR_OM_LIST mode, the operation is equal to
RAR_SKIP.
RAR_EXTRACT Extract the current file and move to the next file.
If the archive was opened with RAR_OM_LIST mode,
the operation is equal to RAR_SKIP.
DestPath
This parameter should point to a zero terminated string containing the
destination directory to which to extract files to. If DestPath is equal
to NULL, it means extract to the current directory. This parameter has
meaning only if DestName is NULL.
DestName
This parameter should point to a string containing the full path and name
to assign to extracted file or it can be NULL to use the default name.
If DestName is defined (not NULL), it overrides both the original file
name saved in the archive and path specigied in DestPath setting.
Both DestPath and DestName must be in OEM encoding. If necessary,
use CharToOem to convert text to OEM before passing to this function.
Return values
~~~~~~~~~~~~~
0 Success
ERAR_BAD_DATA File CRC error
ERAR_BAD_ARCHIVE Volume is not valid RAR archive
ERAR_UNKNOWN_FORMAT Unknown archive format
ERAR_EOPEN Volume open error
ERAR_ECREATE File create error
ERAR_ECLOSE File close error
ERAR_EREAD Read error
ERAR_EWRITE Write error
Note: if you wish to cancel extraction, return -1 when processing
UCM_PROCESSDATA callback message.
====================================================================
int PASCAL RARProcessFileW(HANDLE hArcData,
int Operation,
wchar_t *DestPath,
wchar_t *DestName)
====================================================================
Description
~~~~~~~~~~~
Unicode version of RARProcessFile. It uses Unicode DestPath
and DestName parameters, other parameters and return values
are the same as in RARProcessFile.
====================================================================
void PASCAL RARSetCallback(HANDLE hArcData,
int PASCAL (*CallbackProc)(UINT msg,LPARAM UserData,LPARAM P1,LPARAM P2),
LPARAM UserData);
====================================================================
Description
~~~~~~~~~~~
Set a user-defined callback function to process Unrar events.
Parameters
~~~~~~~~~~
hArcData
This parameter should contain the archive handle obtained from the
RAROpenArchive function call.
CallbackProc
It should point to a user-defined callback function.
The function will be passed four parameters:
msg Type of event. Described below.
UserData User defined value passed to RARSetCallback.
P1 and P2 Event dependent parameters. Described below.
Possible events
UCM_CHANGEVOLUME Process volume change.
P1 Points to the zero terminated name
of the next volume.
P2 The function call mode:
RAR_VOL_ASK Required volume is absent. The function should
prompt user and return a positive value
to retry or return -1 value to terminate
operation. The function may also specify a new
volume name, placing it to the address specified
by P1 parameter.
RAR_VOL_NOTIFY Required volume is successfully opened.
This is a notification call and volume name
modification is not allowed. The function should
return a positive value to continue or -1
to terminate operation.
UCM_PROCESSDATA Process unpacked data. It may be used to read
a file while it is being extracted or tested
without actual extracting file to disk.
Return a positive value to continue process
or -1 to cancel the archive operation
P1 Address pointing to the unpacked data.
Function may refer to the data but must not
change it.
P2 Size of the unpacked data. It is guaranteed
only that the size will not exceed the maximum
dictionary size (4 Mb in RAR 3.0).
UCM_NEEDPASSWORD DLL needs a password to process archive.
This message must be processed if you wish
to be able to handle archives with encrypted
file names. It can be also used as replacement
of RARSetPassword function even for usual
encrypted files with non-encrypted names.
P1 Address pointing to the buffer for a password.
You need to copy a password here.
P2 Size of the password buffer.
UserData
User data passed to callback function.
Other functions of UnRAR.dll should not be called from the callback
function.
Return values
~~~~~~~~~~~~~
None
====================================================================
void PASCAL RARSetChangeVolProc(HANDLE hArcData,
int PASCAL (*ChangeVolProc)(char *ArcName,int Mode));
====================================================================
Obsoleted, use RARSetCallback instead.
====================================================================
void PASCAL RARSetProcessDataProc(HANDLE hArcData,
int PASCAL (*ProcessDataProc)(unsigned char *Addr,int Size))
====================================================================
Obsoleted, use RARSetCallback instead.
====================================================================
void PASCAL RARSetPassword(HANDLE hArcData,
char *Password);
====================================================================
Description
~~~~~~~~~~~
Set a password to decrypt files.
Parameters
~~~~~~~~~~
hArcData
This parameter should contain the archive handle obtained from the
RAROpenArchive function call.
Password
It should point to a string containing a zero terminated password.
Return values
~~~~~~~~~~~~~
None
====================================================================
void PASCAL RARGetDllVersion();
====================================================================
Description
~~~~~~~~~~~
Returns API version.
Parameters
~~~~~~~~~~
None.
Return values
~~~~~~~~~~~~~
Returns an integer value denoting UnRAR.dll API version, which is also
defined in unrar.h as RAR_DLL_VERSION. API version number is incremented
only in case of noticeable changes in UnRAR.dll API. Do not confuse it
with version of UnRAR.dll stored in DLL resources, which is incremented
with every DLL rebuild.
If RARGetDllVersion() returns a value lower than UnRAR.dll which your
application was designed for, it may indicate that DLL version is too old
and it will fail to provide all necessary functions to your application.
This function is absent in old versions of UnRAR.dll, so it is safer
to use LoadLibrary and GetProcAddress to access this function.

80
libs/unrar2/UnRARDLL/whatsnew.txt

@ -1,80 +0,0 @@
List of unrar.dll API changes. We do not include performance and reliability
improvements into this list, but this library and RAR/UnRAR tools share
the same source code. So the latest version of unrar.dll usually contains
same decompression algorithm changes as the latest UnRAR version.
============================================================================
-- 18 January 2008
all LONG parameters of CallbackProc function were changed
to LPARAM type for 64 bit mode compatibility.
-- 12 December 2007
Added new RAR_OM_LIST_INCSPLIT open mode for function RAROpenArchive.
-- 14 August 2007
Added NoCrypt\unrar_nocrypt.dll without decryption code for those
applications where presence of encryption or decryption code is not
allowed because of legal restrictions.
-- 14 December 2006
Added ERAR_MISSING_PASSWORD error type. This error is returned
if empty password is specified for encrypted file.
-- 12 June 2003
Added RARProcessFileW function, Unicode version of RARProcessFile
-- 9 August 2002
Added RAROpenArchiveEx function allowing to specify Unicode archive
name and get archive flags.
-- 24 January 2002
Added RARReadHeaderEx function allowing to read Unicode file names
and 64 bit file sizes.
-- 23 January 2002
Added ERAR_UNKNOWN error type (it is used for all errors which
do not have special ERAR code yet) and UCM_NEEDPASSWORD callback
message.
Unrar.dll automatically opens all next volumes not only when extracting,
but also in RAR_OM_LIST mode.
-- 27 November 2001
RARSetChangeVolProc and RARSetProcessDataProc are replaced by
the single callback function installed with RARSetCallback.
Unlike old style callbacks, the new function accepts the user defined
parameter. Unrar.dll still supports RARSetChangeVolProc and
RARSetProcessDataProc for compatibility purposes, but if you write
a new application, better use RARSetCallback.
File comments support is not implemented in the new DLL version yet.
Now CmtState is always 0.
-- 13 August 2001
Added RARGetDllVersion function, so you may distinguish old unrar.dll,
which used C style callback functions and the new one with PASCAL callbacks.
-- 10 May 2001
Callback functions in RARSetChangeVolProc and RARSetProcessDataProc
use PASCAL style call convention now.

1
libs/unrar2/UnRARDLL/x64/readme.txt

@ -1 +0,0 @@
This is x64 version of unrar.dll.

BIN
libs/unrar2/UnRARDLL/x64/unrar64.lib

Binary file not shown.

21
libs/unrar2/license.txt

@ -1,21 +0,0 @@
Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

66
libs/unrar2/unix.py

@ -38,38 +38,38 @@ def call_unrar(params):
"Calls rar/unrar command line executable, returns stdout pipe"
global rar_executable_cached
if rar_executable_cached is None:
for command in ('unrar', 'rar'):
for command in ('unrar', 'rar', os.path.join(os.path.dirname(__file__), 'unrar')):
try:
subprocess.Popen([command], stdout=subprocess.PIPE)
subprocess.Popen([command], stdout = subprocess.PIPE)
rar_executable_cached = command
break
except OSError:
pass
if rar_executable_cached is None:
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
assert type(params) == list, "params must be list"
args = [rar_executable_cached] + params
try:
gc.disable() # See http://bugs.python.org/issue1336
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return subprocess.Popen(args, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
finally:
gc.enable()
class RarFileImplementation(object):
def init(self, password=None):
def init(self, password = None):
self.password = password
stdoutdata, stderrdata = self.call('v', []).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
if line.find("CRC failed")>=0:
raise IncorrectRARPassword
if line.find("CRC failed") >= 0:
raise IncorrectRARPassword
accum = []
source = iter(stdoutdata.splitlines())
line = ''
@ -85,39 +85,39 @@ class RarFileImplementation(object):
self.comment = '\n'.join(accum[:-1])
else:
self.comment = None
def escaped_password(self):
return '-' if self.password == None else self.password
def call(self, cmd, options=[], files=[]):
options2 = options + ['p'+self.escaped_password()]
soptions = ['-'+x for x in options2]
return call_unrar([cmd]+soptions+['--',self.archiveName]+files)
def call(self, cmd, options = [], files = []):
options2 = options + ['p' + self.escaped_password()]
soptions = ['-' + x for x in options2]
return call_unrar([cmd] + soptions + ['--', self.archiveName] + files)
def infoiter(self):
stdoutdata, stderrdata = self.call('v', ['c-']).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('--------------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.find("CRC failed")>=0:
raise IncorrectRARPassword
if line.find("CRC failed") >= 0:
raise IncorrectRARPassword
line = source.next()
line = source.next()
i = 0
re_spaces = re.compile(r"\s+")
while not line.startswith('--------------'):
accum.append(line)
if len(accum)==2:
if len(accum) == 2:
data = {}
data['index'] = i
data['filename'] = accum[0].strip()
@ -125,7 +125,7 @@ class RarFileImplementation(object):
data['size'] = int(info[0])
attr = info[5]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(info[3]+" "+info[4], '%d-%m-%y %H:%M')
data['datetime'] = time.strptime(info[3] + " " + info[4], '%d-%m-%y %H:%M')
data['comment'] = None
yield data
accum = []
@ -136,12 +136,12 @@ class RarFileImplementation(object):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres==True and not info.isdir:
if checkres == True and not info.isdir:
pipe = self.call('p', ['inul'], [info.filename]).stdout
res.append((info, pipe.read()))
return res
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
command = 'x'
@ -159,17 +159,17 @@ class RarFileImplementation(object):
checkres = checker(info)
if type(checkres) in [str, unicode]:
raise NotImplementedError("Condition callbacks returning strings are deprecated and only supported in Windows")
if checkres==True and not info.isdir:
if checkres == True and not info.isdir:
names.append(info.filename)
res.append(info)
names.append(path)
proc = self.call(command, options, names)
stdoutdata, stderrdata = proc.communicate()
if stderrdata.find("CRC failed")>=0:
raise IncorrectRARPassword
return res
if stderrdata.find("CRC failed") >= 0:
raise IncorrectRARPassword
return res
def destruct(self):
pass

BIN
libs/unrar2/unrar

Binary file not shown.

0
libs/unrar2/UnRARDLL/unrar.dll → libs/unrar2/unrar.dll

0
libs/unrar2/UnRARDLL/x64/unrar64.dll → libs/unrar2/unrar64.dll

27
libs/unrar2/windows.py

@ -23,10 +23,10 @@
# Low level interface - see UnRARDLL\UNRARDLL.TXT
from __future__ import generators
import ctypes, ctypes.wintypes
import os, os.path, sys
import Queue
from couchpotato.environment import Env
from shutil import copyfile
import ctypes.wintypes
import os.path
import time
from rar_exceptions import *
@ -64,13 +64,18 @@ UCM_NEEDPASSWORD = 2
architecture_bits = ctypes.sizeof(ctypes.c_voidp)*8
dll_name = "unrar.dll"
if architecture_bits == 64:
dll_name = "x64\\unrar64.dll"
try:
unrar = ctypes.WinDLL(os.path.join(os.path.split(__file__)[0], 'UnRARDLL', dll_name))
except WindowsError:
unrar = ctypes.WinDLL(dll_name)
dll_name = "unrar64.dll"
# Copy dll first
dll_file = os.path.join(os.path.dirname(__file__), dll_name)
dll_copy = os.path.join(Env.get('cache_dir'), 'copied.dll')
if os.path.isfile(dll_copy):
os.remove(dll_copy)
copyfile(dll_file, dll_copy)
unrar = ctypes.WinDLL(dll_copy)
class RAROpenArchiveDataEx(ctypes.Structure):

Loading…
Cancel
Save