Browse Source

Merge branch 'refs/heads/develop' into tv

pull/4940/head
Ruud 11 years ago
parent
commit
30c56f29d0
  1. 4
      CouchPotato.py
  2. 19
      README.md
  3. 8
      contributing.md
  4. 2
      couchpotato/__init__.py
  5. 67
      couchpotato/api.py
  6. 16
      couchpotato/core/_base/_core.py
  7. 3
      couchpotato/core/_base/downloader/main.py
  8. 5
      couchpotato/core/_base/downloader/static/downloaders.js
  9. 18
      couchpotato/core/_base/updater/main.py
  10. 30
      couchpotato/core/database.py
  11. 13
      couchpotato/core/downloaders/rtorrent_.py
  12. 1
      couchpotato/core/downloaders/synology.py
  13. 4
      couchpotato/core/downloaders/transmission.py
  14. 3
      couchpotato/core/downloaders/utorrent.py
  15. 2
      couchpotato/core/helpers/encoding.py
  16. 6
      couchpotato/core/helpers/variable.py
  17. 6
      couchpotato/core/logger.py
  18. 5
      couchpotato/core/media/__init__.py
  19. 10
      couchpotato/core/media/_base/media/index.py
  20. 46
      couchpotato/core/media/_base/media/main.py
  21. 17
      couchpotato/core/media/_base/providers/base.py
  22. 8
      couchpotato/core/media/_base/providers/nzb/binsearch.py
  23. 53
      couchpotato/core/media/_base/providers/nzb/newznab.py
  24. 3
      couchpotato/core/media/_base/providers/torrent/base.py
  25. 2
      couchpotato/core/media/_base/providers/torrent/bithdtv.py
  26. 11
      couchpotato/core/media/_base/providers/torrent/bitsoup.py
  27. 21
      couchpotato/core/media/_base/providers/torrent/ilovetorrents.py
  28. 22
      couchpotato/core/media/_base/providers/torrent/kickasstorrents.py
  29. 4
      couchpotato/core/media/_base/providers/torrent/sceneaccess.py
  30. 21
      couchpotato/core/media/_base/providers/torrent/thepiratebay.py
  31. 4
      couchpotato/core/media/_base/providers/torrent/torrentday.py
  32. 4
      couchpotato/core/media/_base/providers/torrent/torrentleech.py
  33. 24
      couchpotato/core/media/_base/providers/torrent/yify.py
  34. 15
      couchpotato/core/media/_base/search/main.py
  35. 1
      couchpotato/core/media/_base/search/static/search.css
  36. 3
      couchpotato/core/media/_base/search/static/search.js
  37. 29
      couchpotato/core/media/_base/searcher/main.py
  38. 4
      couchpotato/core/media/movie/_base/main.py
  39. 3
      couchpotato/core/media/movie/_base/static/list.js
  40. 1
      couchpotato/core/media/movie/_base/static/manage.js
  41. 8
      couchpotato/core/media/movie/_base/static/movie.actions.js
  42. 6
      couchpotato/core/media/movie/_base/static/movie.css
  43. 17
      couchpotato/core/media/movie/_base/static/movie.js
  44. 1
      couchpotato/core/media/movie/_base/static/wanted.js
  45. 14
      couchpotato/core/media/movie/charts/__init__.py
  46. 5
      couchpotato/core/media/movie/charts/main.py
  47. 67
      couchpotato/core/media/movie/charts/static/charts.css
  48. 8
      couchpotato/core/media/movie/charts/static/charts.js
  49. 11
      couchpotato/core/media/movie/providers/automation/bluray.py
  50. 191
      couchpotato/core/media/movie/providers/automation/imdb.py
  51. 47
      couchpotato/core/media/movie/providers/automation/popularmovies.py
  52. 9
      couchpotato/core/media/movie/providers/info/_modifier.py
  53. 2
      couchpotato/core/media/movie/providers/info/couchpotatoapi.py
  54. 130
      couchpotato/core/media/movie/providers/info/fanarttv.py
  55. 33
      couchpotato/core/media/movie/providers/info/themoviedb.py
  56. 153
      couchpotato/core/media/movie/providers/metadata/base.py
  57. 36
      couchpotato/core/media/movie/providers/metadata/mediabrowser.py
  58. 33
      couchpotato/core/media/movie/providers/metadata/ps3.py
  59. 2
      couchpotato/core/media/movie/providers/metadata/wmc.py
  60. 141
      couchpotato/core/media/movie/providers/metadata/xbmc.py
  61. 9
      couchpotato/core/media/movie/providers/nzb/newznab.py
  62. 8
      couchpotato/core/media/movie/providers/torrent/bithdtv.py
  63. 8
      couchpotato/core/media/movie/providers/torrent/bitsoup.py
  64. 2
      couchpotato/core/media/movie/providers/torrent/iptorrents.py
  65. 2
      couchpotato/core/media/movie/providers/torrent/publichd.py
  66. 7
      couchpotato/core/media/movie/providers/torrent/sceneaccess.py
  67. 2
      couchpotato/core/media/movie/providers/torrent/thepiratebay.py
  68. 4
      couchpotato/core/media/movie/providers/torrent/torrentday.py
  69. 5
      couchpotato/core/media/movie/providers/torrent/torrentleech.py
  70. 4
      couchpotato/core/media/movie/providers/torrent/torrentshack.py
  71. 3
      couchpotato/core/media/movie/providers/trailer/base.py
  72. 2
      couchpotato/core/media/movie/providers/trailer/hdtrailers.py
  73. 30
      couchpotato/core/media/movie/providers/userscript/filmstarts.py
  74. 122
      couchpotato/core/media/movie/searcher.py
  75. 1
      couchpotato/core/media/movie/suggestion/main.py
  76. 1
      couchpotato/core/notifications/base.py
  77. 7
      couchpotato/core/notifications/core/main.py
  78. 3
      couchpotato/core/notifications/nmj.py
  79. 4
      couchpotato/core/notifications/synoindex.py
  80. 23
      couchpotato/core/notifications/xbmc.py
  81. 0
      couchpotato/core/notifications/xmpp_.py
  82. 41
      couchpotato/core/plugins/base.py
  83. 15
      couchpotato/core/plugins/dashboard.py
  84. 7
      couchpotato/core/plugins/file.py
  85. 62
      couchpotato/core/plugins/log/main.py
  86. 146
      couchpotato/core/plugins/log/static/log.css
  87. 275
      couchpotato/core/plugins/log/static/log.js
  88. 63
      couchpotato/core/plugins/manage.py
  89. 15
      couchpotato/core/plugins/profile/main.py
  90. 6
      couchpotato/core/plugins/profile/static/profile.css
  91. 11
      couchpotato/core/plugins/profile/static/profile.js
  92. 191
      couchpotato/core/plugins/quality/main.py
  93. 33
      couchpotato/core/plugins/quality/static/quality.js
  94. 135
      couchpotato/core/plugins/release/main.py
  95. 102
      couchpotato/core/plugins/renamer.py
  96. 167
      couchpotato/core/plugins/scanner.py
  97. 10
      couchpotato/core/plugins/subtitle.py
  98. 1
      couchpotato/core/plugins/userscript/static/userscript.js
  99. 5
      couchpotato/core/plugins/wizard/static/wizard.js
  100. 5
      couchpotato/core/settings.py

4
CouchPotato.py

@ -29,7 +29,7 @@ class Loader(object):
# Get options via arg
from couchpotato.runner import getOptions
self.options = getOptions(base_path, sys.argv[1:])
self.options = getOptions(sys.argv[1:])
# Load settings
settings = Env.get('settings')
@ -50,7 +50,7 @@ class Loader(object):
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog

19
README.md

@ -17,9 +17,9 @@ Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for
* Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`.
* You can now start CP via `CouchPotatoServer\CouchPotato.py` to start
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
OSx:
OS X:
* If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/)
* Install [GIT](http://git-scm.com/)
@ -27,19 +27,20 @@ OSx:
* Go to your App folder `cd /Applications`
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py`
* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
Linux (ubuntu / debian):
Linux (Ubuntu / Debian):
* Install [GIT](http://git-scm.com/) with `apt-get install git-core`
* 'cd' to the folder of your choosing.
* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py` to start
* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato`
* Make it executable. `sudo chmod +x /etc/init.d/couchpotato`
* Add it to defaults. `sudo update-rc.d couchpotato defaults`
* Open your browser and go to: `http://localhost:5050/`
* To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
* Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato`
* Change the paths inside the default file `sudo nano /etc/default/couchpotato`
* Make it executable `sudo chmod +x /etc/init.d/couchpotato`
* Add it to defaults `sudo update-rc.d couchpotato defaults`
* Open your browser and go to `http://localhost:5050/`
FreeBSD :

8
contributing.md

@ -22,11 +22,11 @@ Before you submit an issue, please go through the following checklist:
* What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby)
* Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs!
* Give a short step by step of how to reproduce the error.
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed then when you use CP on OSX or Windows.
* What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows.
* Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag.
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!!
The more relevant information you can provide, the more likely it is the issue will be resolved rather than closed.
* If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else!
The more relevant information you provide, the more likely that your issue will be resolved.
## Pull Requests
Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following:

2
couchpotato/__init__.py

@ -45,7 +45,7 @@ class WebHandler(BaseHandler):
self.write({'success': False, 'error': 'Failed returning results'})
def addView(route, func, static = False):
def addView(route, func):
views[route] = func

67
couchpotato/api.py

@ -7,9 +7,7 @@ import urllib
from couchpotato.core.helpers.request import getParams
from couchpotato.core.logger import CPLog
from tornado.gen import coroutine
from tornado.web import RequestHandler, asynchronous
import tornado
log = CPLog(__name__)
@ -28,10 +26,18 @@ def run_async(func):
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def run_handler(route, kwargs, callback = None):
try:
res = api[route](**kwargs)
callback(res, route)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'}, route)
# NonBlock API handler
class NonBlockHandler(RequestHandler):
@ -78,7 +84,7 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
# Blocking API handler
class ApiHandler(RequestHandler):
@coroutine
@asynchronous
def get(self, route, *args, **kwargs):
route = route.strip('/')
if not api.get(route):
@ -102,36 +108,43 @@ class ApiHandler(RequestHandler):
except: pass
# Add async callback handler
@run_async
def run_handler(callback):
try:
res = api[route](**kwargs)
callback(res)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
callback({'success': False, 'error': 'Failed returning results'})
result = yield tornado.gen.Task(run_handler)
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
run_handler(route, kwargs, callback = self.taskFinished)
except:
log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
self.write({'success': False, 'error': 'Failed returning results'})
try:
self.write({'success': False, 'error': 'Failed returning results'})
self.finish()
except:
log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
api_locks[route].release()
api_locks[route].release()
post = get
def taskFinished(self, result, route):
if not self.request.connection.stream.closed():
try:
# Check JSONP callback
jsonp_callback = self.get_argument('callback_func', default = None)
if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
self.finish()
elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1])
else:
self.write(result)
self.finish()
except:
log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc()))
try: self.finish({'success': False, 'error': 'Failed returning results'})
except: pass
api_locks[route].release()
def addApiView(route, func, static = False, docs = None, **kwargs):

16
couchpotato/core/_base/_core.py

@ -8,7 +8,7 @@ import webbrowser
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import cleanHost, md5
from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@ -71,13 +71,14 @@ class Core(Plugin):
return value if value and len(value) > 3 else uuid4().hex
def checkDataDir(self):
if Env.get('app_dir') in Env.get('data_dir'):
if isSubFolder(Env.get('data_dir'), Env.get('app_dir')):
log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
return True
def cleanUpFolders(self):
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False)
only_clean = ['couchpotato', 'libs', 'init']
self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean)
def available(self, **kwargs):
return {
@ -90,7 +91,11 @@ class Core(Plugin):
def shutdown():
self.initShutdown()
IOLoop.current().add_callback(shutdown)
if IOLoop.current()._closing:
shutdown()
else:
IOLoop.current().add_callback(shutdown)
return 'shutdown'
@ -139,7 +144,8 @@ class Core(Plugin):
log.debug('Safe to shutdown/restart')
try:
IOLoop.current().stop()
if not IOLoop.current()._closing:
IOLoop.current().stop()
except RuntimeError:
pass
except:

3
couchpotato/core/_base/downloader/main.py

@ -72,6 +72,9 @@ class DownloaderBase(Provider):
return
return self.download(data = data, media = media, filedata = filedata)
def download(self, *args, **kwargs):
return False
def _getAllDownloadStatus(self, download_ids):
if self.isDisabled(manual = True, data = {}):
return

5
couchpotato/core/_base/downloader/static/downloaders.js

@ -40,15 +40,16 @@ var DownloadersBase = new Class({
button.set('text', button_name);
var message;
if(json.success){
var message = new Element('span.success', {
message = new Element('span.success', {
'text': 'Connection successful'
}).inject(button, 'after')
}
else {
var msg_text = 'Connection failed. Check logs for details.';
if(json.hasOwnProperty('msg')) msg_text = json.msg;
var message = new Element('span.failed', {
message = new Element('span.failed', {
'text': msg_text
}).inject(button, 'after')
}

18
couchpotato/core/_base/updater/main.py

@ -10,13 +10,12 @@ from threading import RLock
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from dateutil.parser import parse
from git.repository import LocalRepository
from scandir import scandir
import version
from six.moves import filter
@ -143,7 +142,9 @@ class Updater(Plugin):
}
def doShutdown(self):
self.updater.deletePyc(show_logs = False)
if not Env.get('dev'):
self.updater.deletePyc(show_logs = False)
return super(Updater, self).doShutdown()
@ -182,7 +183,7 @@ class BaseUpdater(Plugin):
def deletePyc(self, only_excess = True, show_logs = True):
for root, dirs, files in scandir.walk(ss(Env.get('app_dir'))):
for root, dirs, files in os.walk(Env.get('app_dir')):
pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
py_files = set(filter(lambda filename: filename.endswith('.py'), files))
@ -322,17 +323,18 @@ class SourceUpdater(BaseUpdater):
return False
def replaceWith(self, path):
app_dir = ss(Env.get('app_dir'))
data_dir = ss(Env.get('data_dir'))
path = sp(path)
app_dir = Env.get('app_dir')
data_dir = Env.get('data_dir')
# Get list of files we want to overwrite
self.deletePyc()
existing_files = []
for root, subfiles, filenames in scandir.walk(app_dir):
for root, subfiles, filenames in os.walk(app_dir):
for filename in filenames:
existing_files.append(os.path.join(root, filename))
for root, subfiles, filenames in scandir.walk(path):
for root, subfiles, filenames in os.walk(path):
for filename in filenames:
fromfile = os.path.join(root, filename)
tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, ''))

30
couchpotato/core/database.py

@ -28,6 +28,7 @@ class Database(object):
addEvent('database.setup_index', self.setupIndex)
addEvent('app.migrate', self.migrate)
addEvent('app.after_shutdown', self.close)
def getDB(self):
@ -37,6 +38,9 @@ class Database(object):
return self.db
def close(self, **kwargs):
self.getDB().close()
def setupIndex(self, index_name, klass):
self.indexes.append(index_name)
@ -285,13 +289,16 @@ class Database(object):
for profile_type in types:
p_type = types[profile_type]
if types[profile_type]['profile_id'] == p['id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
if p_type['quality_id']:
new_profile['finish'].append(p_type['finish'])
new_profile['wait_for'].append(p_type['wait_for'])
new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
if len(new_profile['qualities']) > 0:
new_profile.update(db.insert(new_profile))
profile_link[x] = new_profile.get('_id')
else:
log.error('Corrupt profile list for "%s", using default.', p.get('label'))
# Qualities
log.info('Importing quality sizes')
@ -365,10 +372,10 @@ class Database(object):
m = medias[x]
status = statuses.get(m['status_id']).get('identifier')
l = libraries[m['library_id']]
l = libraries.get(m['library_id'])
# Only migrate wanted movies, Skip if no identifier present
if not getImdb(l.get('identifier')): continue
if not l or not getImdb(l.get('identifier')): continue
profile_id = profile_link.get(m['profile_id'])
category_id = category_link.get(m['category_id'])
@ -412,7 +419,10 @@ class Database(object):
empty_info = True
rel['info'] = {}
quality = quality_link[rel.get('quality_id')]
quality = quality_link.get(rel.get('quality_id'))
if not quality:
continue
release_status = statuses.get(rel.get('status_id')).get('identifier')
if rel['info'].get('download_id'):

13
couchpotato/core/downloaders/rtorrent_.py

@ -12,7 +12,6 @@ from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from bencode import bencode, bdecode
from rtorrent import RTorrent
from scandir import scandir
log = CPLog(__name__)
@ -154,19 +153,13 @@ class rTorrent(DownloaderBase):
return False
def getTorrentStatus(self, torrent):
if torrent.hashing or torrent.hash_checking or torrent.message:
return 'busy'
if not torrent.complete:
return 'busy'
if not torrent.open:
return 'completed'
if torrent.state and torrent.active:
if torrent.open:
return 'seeding'
return 'busy'
return 'completed'
def getAllDownloadStatus(self, ids):
log.debug('Checking rTorrent download status.')
@ -244,7 +237,7 @@ class rTorrent(DownloaderBase):
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
# Remove empty directories bottom up
try:
for path, _, _ in scandir.walk(torrent.directory, topdown = False):
for path, _, _ in os.walk(sp(torrent.directory), topdown = False):
os.rmdir(path)
except OSError:
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)

1
couchpotato/core/downloaders/synology.py

@ -90,6 +90,7 @@ class SynologyRPC(object):
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
self.sid = None
self.username = username
self.password = password
self.destination = destination

4
couchpotato/core/downloaders/transmission.py

@ -174,8 +174,8 @@ class TransmissionRPC(object):
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
password_manager.add_password(realm = 'Transmission', uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:

3
couchpotato/core/downloaders/utorrent.py

@ -168,7 +168,7 @@ class uTorrent(DownloaderBase):
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & self.status_flags['ERROR']):
elif torrent[1] & self.status_flags['ERROR']:
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
@ -229,7 +229,6 @@ class uTorrentAPI(object):
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager))
self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager))
elif username or password:
log.debug('User or password missing, not using authentication.')
self.token = self.get_token()

2
couchpotato/core/helpers/encoding.py

@ -78,7 +78,7 @@ def sp(path, *args):
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return toUnicode(path)
return path
def ek(original, *args):

6
couchpotato/core/helpers/variable.py

@ -307,3 +307,9 @@ def scanForPassword(name):
if m:
return m.group(1).strip('. '), m.group(2).strip()
under_pat = re.compile(r'_([a-z])')
def underscoreToCamel(name):
return under_pat.sub(lambda x: x.group(1).upper(), name)

6
couchpotato/core/logger.py

@ -25,6 +25,12 @@ class CPLog(object):
self.Env = Env
self.is_develop = Env.get('dev')
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.close)
def close(self, *args, **kwargs):
logging.shutdown()
def info(self, msg, replace_tuple = ()):
self.logger.info(self.addContext(msg, replace_tuple))

5
couchpotato/core/media/__init__.py

@ -1,7 +1,7 @@
import os
import traceback
from couchpotato import get_db, CPLog
from couchpotato import CPLog
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.plugins.base import Plugin
@ -25,11 +25,10 @@ class MediaBase(Plugin):
def onComplete():
try:
db = get_db()
media = fireEvent('media.get', media_id, single = True)
event_name = '%s.searcher.single' % media.get('type')
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id))
fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
except:
log.error('Failed creating onComplete: %s', traceback.format_exc())

10
couchpotato/core/media/_base/media/index.py

@ -99,7 +99,7 @@ from couchpotato.core.helpers.encoding import simplifyString"""
class TitleIndex(TreeBasedIndex):
_version = 2
_version = 4
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
from string import ascii_letters
@ -123,16 +123,16 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
nr_prefix = '' if title and len(title) > 0 and title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
for prefix in ['the ', 'an ', 'a ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return str(nr_prefix + title).ljust(32, '_')[:32]
return str(nr_prefix + title).ljust(32, ' ')[:32]
class StartsWithIndex(TreeBasedIndex):
_version = 2
_version = 3
custom_header = """from CodernityDB.tree_index import TreeBasedIndex
from string import ascii_letters
@ -153,7 +153,7 @@ from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
title = toUnicode(title)
title = simplifyString(title)
for prefix in ['the ']:
for prefix in ['the ', 'an ', 'a ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break

46
couchpotato/core/media/_base/media/main.py

@ -1,6 +1,7 @@
import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
@ -107,8 +108,7 @@ class MediaPlugin(MediaBase):
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
if handler:
return handler
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
@ -120,25 +120,30 @@ class MediaPlugin(MediaBase):
def get(self, media_id):
db = get_db()
try:
db = get_db()
imdb_id = getImdb(str(media_id))
imdb_id = getImdb(str(media_id))
media = None
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
return media
except RecordNotFound:
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
@ -361,13 +366,18 @@ class MediaPlugin(MediaBase):
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
media_releases = fireEvent('release.for_media', media['_id'], single = True)
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
@ -383,7 +393,7 @@ class MediaPlugin(MediaBase):
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active'):
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active') or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:

17
couchpotato/core/media/_base/providers/base.py

@ -88,10 +88,14 @@ class Provider(Plugin):
if data and len(data) > 0:
try:
data = XMLTree.fromstring(ss(data))
data = XMLTree.fromstring(data)
return self.getElements(data, item_path)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
try:
data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
@ -125,6 +129,9 @@ class YarrProvider(Provider):
else:
return []
def buildUrl(self, *args, **kwargs):
pass
def login(self):
# Check if we are still logged in every hour
@ -177,7 +184,7 @@ class YarrProvider(Provider):
try:
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'
@ -200,7 +207,7 @@ class YarrProvider(Provider):
self._search(media, quality, results)
# Search possible titles
else:
media_title = fireEvent('library.query', media, single = True)
media_title = fireEvent('library.query', media, include_year = False, single = True)
for title in possibleTitles(media_title):
self._searchOnTitle(title, media, quality, results)
@ -298,7 +305,7 @@ class ResultList(list):
old_score = new_result['score']
new_result['score'] = int(old_score * is_correct_weight)
log.info('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
is_correct_weight,
old_score,
new_result['score']

8
couchpotato/core/media/_base/providers/nzb/binsearch.py

@ -2,7 +2,7 @@ import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.helpers.variable import tryInt, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
@ -50,8 +50,8 @@ class Base(NZBProvider):
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts'))
total = float(tryInt(parts.group('total')))
parts = float(tryInt(parts.group('parts')))
if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
@ -65,7 +65,7 @@ class Base(NZBProvider):
results.append({
'id': nzb_id,
'name': title.text,
'name': simplifyString(title.text),
'age': tryInt(age),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,

53
couchpotato/core/media/_base/providers/nzb/newznab.py

@ -1,8 +1,7 @@
from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import urllib2
import re
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS
@ -12,6 +11,7 @@ from couchpotato.core.media._base.providers.base import ResultList
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
from requests import HTTPError
log = CPLog(__name__)
@ -20,10 +20,11 @@ log = CPLog(__name__)
class Base(NZBProvider, RSS):
urls = {
'detail': 'details&id=%s',
'detail': 'details/%s',
'download': 't=get&id=%s'
}
passwords_regex = 'password|wachtwoord'
limits_reached = {}
http_time_between_calls = 1 # Seconds
@ -43,10 +44,8 @@ class Base(NZBProvider, RSS):
def _searchOnHost(self, host, media, quality, results):
query = self.buildUrl(media, host['api_key'])
query = self.buildUrl(media, host)
url = '%s&%s' % (self.getUrl(host['host']), query)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
for nzb in nzbs:
@ -79,6 +78,23 @@ class Base(NZBProvider, RSS):
if spotter:
name_extra = spotter
description = ''
if "@spot.net" in nzb_id:
try:
# Get details for extended description to retrieve passwords
query = self.buildDetailsUrl(nzb_id, host['api_key'])
url = '%s&%s' % (self.getUrl(host['host']), query)
nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]
description = self.getTextElement(nzb_details, 'description')
# Extract a password from the description
password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1)
if password:
name += ' {{%s}}' % password.strip()
except:
log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))
results.append({
'id': nzb_id,
'provider_extra': urlparse(host['host']).hostname or host['host'],
@ -87,8 +103,9 @@ class Base(NZBProvider, RSS):
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id),
'content': self.getTextElement(nzb, 'description'),
'description': description,
'score': host['extra_score'],
})
@ -166,16 +183,7 @@ class Base(NZBProvider, RSS):
return 'try_next'
try:
# Get final redirected url
log.debug('Checking %s for redirects.', url)
req = urllib2.Request(url)
req.add_header('User-Agent', self.user_agent)
res = urllib2.urlopen(req)
finalurl = res.geturl()
if finalurl != url:
log.debug('Redirect url used: %s', finalurl)
data = self.urlopen(finalurl, show_error = False)
data = self.urlopen(url, show_error = False)
self.limits_reached[host] = False
return data
except HTTPError as e:
@ -191,6 +199,15 @@ class Base(NZBProvider, RSS):
return 'try_next'
def buildDetailsUrl(self, nzb_id, api_key):
query = tryUrlencode({
't': 'details',
'id': nzb_id,
'apikey': api_key,
})
return query
config = [{
'name': 'newznab',
@ -217,7 +234,7 @@ config = [{
},
{
'name': 'host',
'default': 'api.nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws',
'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws',
'description': 'The hostname of your newznab provider',
},
{

3
couchpotato/core/media/_base/providers/torrent/base.py

@ -44,7 +44,8 @@ class TorrentProvider(YarrProvider):
prop_name = 'proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
if last_check > time.time() - 86400:
continue
data = ''

2
couchpotato/core/media/_base/providers/torrent/bithdtv.py

@ -25,7 +25,7 @@ class Base(TorrentProvider):
def _search(self, media, quality, results):
query = self.buildUrl(media)
query = self.buildUrl(media, quality)
url = "%s&%s" % (self.urls['search'], query)

11
couchpotato/core/media/_base/providers/torrent/bitsoup.py

@ -1,7 +1,6 @@
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@ -16,7 +15,7 @@ class Base(TorrentProvider):
'test': 'https://www.bitsoup.me/',
'login': 'https://www.bitsoup.me/takelogin.php',
'login_check': 'https://www.bitsoup.me/my.php',
'search': 'https://www.bitsoup.me/browse.php?',
'search': 'https://www.bitsoup.me/browse.php?%s',
'baseurl': 'https://www.bitsoup.me/%s',
}
@ -24,13 +23,7 @@ class Base(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s' % (simplifyString(title), movie['info']['year'])
arguments = tryUrlencode({
'search': q,
})
url = "%s&%s" % (self.urls['search'], arguments)
url = self.urls['search'] % self.buildUrl(movie, quality)
url = self.urls['search'] % self.buildUrl(title, movie, quality)
data = self.getHTMLData(url)
if data:

21
couchpotato/core/media/_base/providers/torrent/ilovetorrents.py

@ -3,7 +3,7 @@ import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
@ -15,7 +15,7 @@ class Base(TorrentProvider):
urls = {
'download': 'https://www.ilovetorrents.me/%s',
'detail': 'https//www.ilovetorrents.me/%s',
'detail': 'https://www.ilovetorrents.me/%s',
'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
'test': 'https://www.ilovetorrents.me/',
'login': 'https://www.ilovetorrents.me/takelogin.php',
@ -47,17 +47,24 @@ class Base(TorrentProvider):
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
results_table = None
data_split = splitString(data, '<table')
soup = None
for x in data_split:
soup = BeautifulSoup(x)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
if results_table:
break
if not results_table:
return
try:
pagelinks = soup.findAll(href = re.compile('page'))
pageNumbers = [int(re.search('page=(?P<pageNumber>.+'')', i['href']).group('pageNumber')) for i in pagelinks]
total_pages = max(pageNumbers)
page_numbers = [int(re.search('page=(?P<page_number>.+'')', i['href']).group('page_number')) for i in pagelinks]
total_pages = max(page_numbers)
except:
pass

22
couchpotato/core/media/_base/providers/torrent/kickasstorrents.py

@ -32,8 +32,12 @@ class Base(TorrentMagnetProvider):
proxy_list = [
'https://kickass.to',
'http://kickass.pw',
'http://kickassto.come.in',
'http://katproxy.ws',
'http://www.kickassunblock.info',
'http://www.kickassproxy.info',
'http://katph.eu',
'http://kickassto.come.in',
]
def _search(self, media, quality, results):
@ -65,12 +69,13 @@ class Base(TorrentMagnetProvider):
if column_name:
if column_name == 'name':
link = td.find('div', {'class': 'torrentname'}).find_all('a')[1]
new['id'] = temp.get('id')[-8:]
link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
new['id'] = temp.get('id')[-7:]
new['name'] = link.text
new['url'] = td.find('a', 'imagnet')['href']
new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
new['score'] = 20 if td.find('a', 'iverif') else 0
new['verified'] = True if td.find('a', 'iverify') else False
new['score'] = 100 if new['verified'] else 0
elif column_name is 'size':
new['size'] = self.parseSize(td.text)
elif column_name is 'age':
@ -82,6 +87,10 @@ class Base(TorrentMagnetProvider):
nr += 1
# Only store verified torrents
if self.conf('only_verified') and not new['verified']:
continue
results.append(new)
except:
log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
@ -152,6 +161,13 @@ config = [{
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'only_verified',
'advanced': True,
'type': 'bool',
'default': False,
'description': 'Only search for verified releases.'
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',

4
couchpotato/core/media/_base/providers/torrent/sceneaccess.py

@ -24,9 +24,9 @@ class Base(TorrentProvider):
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
url = self.buildUrl(media, quality)
url = self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
if data:

21
couchpotato/core/media/_base/providers/torrent/thepiratebay.py

@ -24,15 +24,18 @@ class Base(TorrentMagnetProvider):
http_time_between_calls = 0
proxy_list = [
'https://tpb.ipredator.se',
'https://nobay.net',
'https://thebay.al',
'https://thepiratebay.se',
'http://pirateproxy.ca',
'http://tpb.al',
'http://thepiratebay.cd',
'http://thebootlegbay.com',
'http://www.tpb.gr',
'http://bayproxy.me',
'http://proxybay.eu',
'http://tpbproxy.co.uk',
'http://pirateproxy.in',
'http://www.getpirate.com',
'http://piratebay.io',
'http://bayproxy.li',
'http://proxybay.pw',
]
def _search(self, media, quality, results):
@ -65,7 +68,7 @@ class Base(TorrentMagnetProvider):
pass
entries = results_table.find_all('tr')
for result in entries[2:]:
for result in entries[1:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
@ -109,7 +112,11 @@ class Base(TorrentMagnetProvider):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class': 'nfo'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
description = ''
try:
description = toUnicode(nfo_pre.text)
except:
pass
item['description'] = description
return item

4
couchpotato/core/media/_base/providers/torrent/torrentday.py

@ -18,9 +18,9 @@ class Base(TorrentProvider):
http_time_between_calls = 1 # Seconds
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
query = self.buildUrl(media)
query = '"%s" %s' % (title, media['info']['year'])
data = {
'/browse.php?': None,

4
couchpotato/core/media/_base/providers/torrent/torrentleech.py

@ -24,9 +24,9 @@ class Base(TorrentProvider):
http_time_between_calls = 1 # Seconds
cat_backup_id = None
def _search(self, media, quality, results):
def _searchOnTitle(self, title, media, quality, results):
url = self.urls['search'] % self.buildUrl(media, quality)
url = self.urls['search'] % self.buildUrl(title, media, quality)
data = self.getHTMLData(url)

24
couchpotato/core/media/_base/providers/torrent/yify.py

@ -2,13 +2,13 @@ import traceback
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
log = CPLog(__name__)
class Base(TorrentProvider):
class Base(TorrentMagnetProvider):
urls = {
'test': '%s/api',
@ -35,7 +35,11 @@ class Base(TorrentProvider):
def _search(self, movie, quality, results):
search_url = self.urls['search'] % (self.getDomain(), getIdentifier(movie), quality['identifier'])
domain = self.getDomain()
if not domain:
return
search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier'])
data = self.getJsonData(search_url)
@ -43,21 +47,19 @@ class Base(TorrentProvider):
try:
for result in data.get('MovieList'):
try:
title = result['TorrentUrl'].split('/')[-1][:-8].replace('_', '.').strip('._')
title = title.replace('.-.', '-')
title = title.replace('..', '.')
except:
continue
if result['Quality'] and result['Quality'] not in result['MovieTitle']:
title = result['MovieTitle'] + ' BrRip ' + result['Quality']
else:
title = result['MovieTitle'] + ' BrRip'
results.append({
'id': result['MovieID'],
'name': title,
'url': result['TorrentMagnetUrl'],
'detail_url': self.urls['detail'] % (self.getDomain(), result['MovieID']),
'detail_url': self.urls['detail'] % (domain, result['MovieID']),
'size': self.parseSize(result['Size']),
'seeders': tryInt(result['TorrentSeeds']),
'leechers': tryInt(result['TorrentPeers'])
'leechers': tryInt(result['TorrentPeers']),
})
except:

15
couchpotato/core/media/_base/search/main.py

@ -1,6 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.variable import mergeDicts
from couchpotato.core.helpers.variable import mergeDicts, getImdb
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@ -35,12 +35,21 @@ class Search(Plugin):
elif isinstance(types, (list, tuple, set)):
types = list(types)
imdb_identifier = getImdb(q)
if not types:
result = fireEvent('info.search', q = q, merge = True)
if imdb_identifier:
result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
result = {result['type']: [result]}
else:
result = fireEvent('info.search', q = q, merge = True)
else:
result = {}
for media_type in types:
result[media_type] = fireEvent('%s.search' % media_type)
if imdb_identifier:
result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
else:
result[media_type] = fireEvent('%s.search' % media_type, q = q)
return mergeDicts({
'success': True,

1
couchpotato/core/media/_base/search/static/search.css

@ -7,7 +7,6 @@
text-align: right;
height: 100%;
transition: all .4s cubic-bezier(0.9,0,0.1,1);
position: absolute;
z-index: 20;
border: 0 solid transparent;
border-bottom-width: 4px;

3
couchpotato/core/media/_base/search/static/search.js

@ -13,6 +13,9 @@ Block.Search = new Class({
self.input = new Element('input', {
'placeholder': 'Search & add a new media',
'events': {
'input': self.keyup.bind(self),
'paste': self.keyup.bind(self),
'change': self.keyup.bind(self),
'keyup': self.keyup.bind(self),
'focus': function(){
if(focus_timer) clearTimeout(focus_timer);

29
couchpotato/core/media/_base/searcher/main.py

@ -87,31 +87,23 @@ class Searcher(SearcherBase):
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None):
if not preferred_quality: preferred_quality = {}
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
name = nzb['name']
size = nzb.get('size', 0)
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
if size > 20000: # Assume bd50
log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size)
found['bd50'] = True
elif size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size)
found['dvdr'] = True
else: # Assume dvdrip
@ -123,7 +115,10 @@ class Searcher(SearcherBase):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
if found.get(preferred_quality['identifier']) and len(found) == 1:
return False
return found
def correct3D(self, nzb, preferred_quality = None):
if not preferred_quality: preferred_quality = {}

4
couchpotato/core/media/movie/_base/main.py

@ -105,7 +105,7 @@ class MovieBase(MovieTypeBase):
'imdb': params.get('identifier')
},
'status': status if status else 'active',
'profile_id': params.get('profile_id', default_profile.get('_id')),
'profile_id': params.get('profile_id') or default_profile.get('_id'),
'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
}
@ -139,7 +139,7 @@ class MovieBase(MovieTypeBase):
# Clean snatched history
for release in fireEvent('release.for_media', m['_id'], single = True):
if release.get('status') in ['downloaded', 'snatched', 'done']:
if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
if params.get('ignore_previous', False):
release['status'] = 'ignored'
db.update(release)

3
couchpotato/core/media/movie/_base/static/list.js

@ -373,6 +373,7 @@ var MovieList = new Class({
(e).preventDefault();
this.set('text', 'Deleting..');
Api.request('media.delete', {
'method': 'post',
'data': {
'id': ids.join(','),
'delete_from': self.options.identifier
@ -413,6 +414,7 @@ var MovieList = new Class({
var ids = self.getSelectedMovies();
Api.request('movie.edit', {
'method': 'post',
'data': {
'id': ids.join(','),
'profile_id': self.mass_edit_quality.get('value')
@ -426,6 +428,7 @@ var MovieList = new Class({
var ids = self.getSelectedMovies();
Api.request('media.refresh', {
'method': 'post',
'data': {
'id': ids.join(',')
}

1
couchpotato/core/media/movie/_base/static/2_manage.js → couchpotato/core/media/movie/_base/static/manage.js

@ -2,6 +2,7 @@ Page.Manage = new Class({
Extends: PageBase,
order: 20,
name: 'manage',
title: 'Do stuff to your existing movies!',

8
couchpotato/core/media/movie/_base/static/movie.actions.js

@ -78,7 +78,7 @@ MA.IMDB = new Class({
create: function(){
var self = this;
self.id = self.movie.get('imdb') || self.movie.get('identifier');
self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get('imdb');
self.el = new Element('a.imdb', {
'title': 'Go to the IMDB page of ' + self.getTitle(),
@ -252,7 +252,7 @@ MA.Release = new Class({
self.trynext_container.adopt(
new Element('span.or', {
'text': 'This movie is snatched, if anything went wrong, download'
'text': 'If anything went wrong, download'
}),
lr ? new Element('a.button.orange', {
'text': 'the same release again',
@ -684,7 +684,7 @@ MA.Readd = new Class({
var movie_done = self.movie.data.status == 'done';
if(self.movie.data.releases && !movie_done)
var snatched = self.movie.data.releases.filter(function(release){
return release.status && (release.status == 'snatched' || release.status == 'downloaded' || release.status == 'done');
return release.status && (release.status == 'snatched' || release.status == 'seeding' || release.status == 'downloaded' || release.status == 'done');
}).length;
if(movie_done || snatched && snatched > 0)
@ -703,7 +703,7 @@ MA.Readd = new Class({
Api.request('movie.add', {
'data': {
'identifier': self.movie.get('identifier'),
'identifier': self.movie.getIdentifier(),
'ignore_previous': 1
}
});

6
couchpotato/core/media/movie/_base/static/movie.css

@ -123,6 +123,7 @@
.movies.thumbs_list .movie {
width: 16.66667%;
height: auto;
min-height: 200px;
display: inline-block;
margin: 0;
padding: 0;
@ -133,6 +134,7 @@
@media all and (max-width: 800px) {
.movies.thumbs_list .movie {
width: 25%;
min-height: 100px;
}
}
@ -355,12 +357,8 @@
top: 30px;
clear: both;
bottom: 30px;
overflow: hidden;
position: absolute;
}
.movies .data:hover .description {
overflow: auto;
}
.movies.list_list .movie:not(.details_view) .info .description,
.movies.mass_edit_list .info .description,
.movies.thumbs_list .info .description {

17
couchpotato/core/media/movie/_base/static/movie.js

@ -158,7 +158,7 @@ var Movie = new Class({
'text': self.data.info.year || 'n/a'
})
),
self.description = new Element('div.description', {
self.description = new Element('div.description.tiny_scroll', {
'text': self.data.info.plot
}),
self.quality = new Element('div.quality', {
@ -250,6 +250,10 @@ var Movie = new Class({
getUnprefixedTitle: function(t){
if(t.substr(0, 4).toLowerCase() == 'the ')
t = t.substr(4) + ', The';
else if(t.substr(0, 3).toLowerCase() == 'an ')
t = t.substr(3) + ', An';
else if(t.substr(0, 2).toLowerCase() == 'a ')
t = t.substr(2) + ', A';
return t;
},
@ -296,6 +300,17 @@ var Movie = new Class({
self.el.removeClass(self.view+'_view')
},
getIdentifier: function(){
var self = this;
try {
return self.get('identifiers').imdb;
}
catch (e){ }
return self.get('imdb');
},
get: function(attr){
return this.data[attr] || this.data.info[attr]
},

1
couchpotato/core/media/movie/_base/static/1_wanted.js → couchpotato/core/media/movie/_base/static/wanted.js

@ -2,6 +2,7 @@ Page.Wanted = new Class({
Extends: PageBase,
order: 10,
name: 'wanted',
title: 'Gimmy gimmy gimmy!',
folder_browser: null,

14
couchpotato/core/media/movie/charts/__init__.py

@ -28,6 +28,20 @@ config = [{
'advanced': True,
'description': '(hours)',
},
{
'name': 'hide_wanted',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Hide the chart movies that are already in your wanted list.',
},
{
'name': 'hide_library',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Hide the chart movies that are already in your library.',
},
],
},
],

5
couchpotato/core/media/movie/charts/main.py

@ -36,7 +36,6 @@ class Charts(Plugin):
'charts': charts
}
def updateViewCache(self):
if self.update_in_progress:
@ -46,9 +45,13 @@ class Charts(Plugin):
if catched_charts:
return catched_charts
charts = []
try:
self.update_in_progress = True
charts = fireEvent('automation.get_chart_list', merge = True)
for chart in charts:
chart['hide_wanted'] = self.conf('hide_wanted')
chart['hide_library'] = self.conf('hide_library')
self.setCache('charts_cached', charts, timeout = 7200 * tryInt(self.conf('update_interval', default = 12)))
except:
log.error('Failed refreshing charts')

67
couchpotato/core/media/movie/charts/static/charts.css

@ -3,15 +3,21 @@
margin-bottom: 30px;
}
.charts > h2 {
height: 40px;
}
.charts .chart {
display: inline-block;
width: 50%;
vertical-align: top;
}
.charts > h2 {
height: 40px;
}
.charts .chart {
display: inline-block;
width: 50%;
vertical-align: top;
max-height: 510px;
scrollbar-base-color: #4e5969;
}
.charts .chart .media_result.hidden {
display: none;
}
.charts .refresh {
clear:both;
@ -25,30 +31,30 @@
text-align:center;
}
.charts .refresh a {
text-align: center;
padding: 0;
display: none;
width: 30px;
height: 30px;
position: absolute;
right: 10px;
top: -40px;
opacity: .7;
}
.charts .refresh a {
text-align: center;
padding: 0;
display: none;
width: 30px;
height: 30px;
position: absolute;
right: 10px;
top: -40px;
opacity: .7;
}
.charts .refresh a:hover {
opacity: 1;
}
.charts .refresh a:hover {
opacity: 1;
}
.charts p.no_charts_enabled {
padding: 0.7em 1em;
display: none;
}
.charts p.no_charts_enabled {
padding: 0.7em 1em;
display: none;
}
.charts .chart h3 a {
color: #fff;
}
.charts .chart h3 a {
color: #fff;
}
.charts .chart .media_result {
@ -137,7 +143,6 @@
padding: 0 3px 10px 0;
}
.charts .chart .media_result .data:before {
bottom: 0;
content: '';
display: block;
height: 10px;

8
couchpotato/core/media/movie/charts/static/charts.js

@ -22,9 +22,11 @@ var Charts = new Class({
'events': {
'click': function(e) {
e.preventDefault();
self.el.getChildren('div.chart').destroy();
self.el.getElements('.chart').destroy();
self.el_refreshing_text.show();
self.el_refresh_link.hide();
self.api_request = Api.request('charts.view', {
'data': { 'force_update': 1 },
'onComplete': self.fill.bind(self)
@ -72,7 +74,7 @@ var Charts = new Class({
Object.each(json.charts, function(chart){
var c = new Element('div.chart').grab(
var c = new Element('div.chart.tiny_scroll').grab(
new Element('h3').grab( new Element('a', {
'text': chart.name,
'href': chart.url
@ -89,7 +91,7 @@ var Charts = new Class({
}
});
var in_database_class = movie.in_wanted ? 'chart_in_wanted' : (movie.in_library ? 'chart_in_library' : ''),
var in_database_class = (chart.hide_wanted && movie.in_wanted) ? 'hidden' : (movie.in_wanted ? 'chart_in_wanted' : ((chart.hide_library && movie.in_library) ? 'hidden': (movie.in_library ? 'chart_in_library' : ''))),
in_database_title = movie.in_wanted ? 'Movie in wanted list' : (movie.in_library ? 'Movie in library' : '');
m.el

11
couchpotato/core/media/movie/providers/automation/bluray.py

@ -1,4 +1,5 @@
from bs4 import BeautifulSoup
from couchpotato import fireEvent
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
@ -82,6 +83,7 @@ class Bluray(Automation, RSS):
def getChartList(self):
# Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
movie_list = {'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': []}
movie_ids = []
max_items = int(self.conf('max_items', section='charts', default=5))
rss_movies = self.getRSSData(self.rss_url)
@ -95,6 +97,15 @@ class Bluray(Automation, RSS):
movie = self.search(name, year)
if movie:
if movie.get('imdb') in movie_ids:
continue
is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True)
if not is_movie:
continue
movie_ids.append(movie.get('imdb'))
movie_list['list'].append( movie )
if len(movie_list['list']) >= max_items:
break

191
couchpotato/core/media/movie/providers/automation/imdb.py

@ -3,6 +3,7 @@ import re
from bs4 import BeautifulSoup
from couchpotato import fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
from couchpotato.core.logger import CPLog
@ -28,6 +29,39 @@ class IMDBBase(Automation, RSS):
def getInfo(self, imdb_id):
return fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
def getFromURL(self, url):
log.debug('Getting IMDBs from: %s', url)
html = self.getHTMLData(url)
try:
split = splitString(html, split_on = "<div class=\"list compact\">")[1]
html = splitString(split, split_on = "<div class=\"pages\">")[0]
except:
try:
split = splitString(html, split_on = "<div id=\"main\">")
if len(split) < 2:
log.error('Failed parsing IMDB page "%s", unexpected html.', url)
return []
html = BeautifulSoup(split[1])
for x in ['list compact', 'lister', 'list detail sub-list']:
html2 = html.find('div', attrs = {
'class': x
})
if html2:
html = html2.contents
html = ''.join([str(x) for x in html])
break
except:
log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc()))
html = ss(html)
imdbs = getImdb(html, multiple = True) if html else []
return imdbs
class IMDBWatchlist(IMDBBase):
@ -65,16 +99,7 @@ class IMDBWatchlist(IMDBBase):
try:
w_url = '%s&start=%s' % (watchlist_url, start)
log.debug('Started IMDB watchlists: %s', w_url)
html = self.getHTMLData(w_url)
try:
split = splitString(html, split_on="<div class=\"list compact\">")[1]
html = splitString(split, split_on="<div class=\"pages\">")[0]
except:
pass
imdbs = getImdb(html, multiple = True) if html else []
imdbs = self.getFromURL(w_url)
for imdb in imdbs:
if imdb not in movies:
@ -85,13 +110,14 @@ class IMDBWatchlist(IMDBBase):
log.debug('Found %s movies on %s', (len(imdbs), w_url))
if len(imdbs) < 250:
if len(imdbs) < 225:
break
start += 250
start = len(movies)
except:
log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc()))
break
return movies
@ -100,95 +126,88 @@ class IMDBAutomation(IMDBBase):
enabled_option = 'automation_providers_enabled'
chart_urls = {
'theater': 'http://www.imdb.com/movies-in-theaters/',
'top250': 'http://www.imdb.com/chart/top',
'boxoffice': 'http://www.imdb.com/chart/',
}
chart_names = {
'theater': 'IMDB - Movies in Theaters',
'top250': 'IMDB - Top 250 Movies',
'boxoffice': 'IMDB - Box Office',
}
chart_order = {
'theater': 2,
'top250': 4,
'boxoffice': 3,
charts = {
'theater': {
'order': 1,
'name': 'IMDB - Movies in Theaters',
'url': 'http://www.imdb.com/movies-in-theaters/',
},
'boxoffice': {
'order': 2,
'name': 'IMDB - Box Office',
'url': 'http://www.imdb.com/boxoffice/',
},
'rentals': {
'order': 3,
'name': 'IMDB - Top DVD rentals',
'url': 'http://www.imdb.com/boxoffice/rentals',
'type': 'json',
},
'top250': {
'order': 4,
'name': 'IMDB - Top 250 Movies',
'url': 'http://www.imdb.com/chart/top',
},
}
first_table = ['boxoffice']
def getIMDBids(self):
movies = []
for url in self.chart_urls:
if self.conf('automation_charts_%s' % url):
data = self.getHTMLData(self.chart_urls[url])
if data:
html = BeautifulSoup(data)
try:
result_div = html.find('div', attrs = {'id': 'main'})
for name in self.charts:
chart = self.charts[name]
url = chart.get('url')
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
if self.conf('automation_charts_%s' % name):
imdb_ids = self.getFromURL(url)
imdb_ids = getImdb(str(result_div), multiple = True)
for imdb_id in imdb_ids:
info = self.getInfo(imdb_id)
if info and self.isMinimalMovie(info):
movies.append(imdb_id)
try:
for imdb_id in imdb_ids:
info = self.getInfo(imdb_id)
if info and self.isMinimalMovie(info):
movies.append(imdb_id)
if self.shuttingDown():
break
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
return movies
def getChartList(self):
# Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id)
movie_lists = []
max_items = int(self.conf('max_items', section='charts', default=5))
max_items = int(self.conf('max_items', section = 'charts', default=5))
for url in self.chart_urls:
if self.conf('chart_display_%s' % url):
movie_list = {'name': self.chart_names[url], 'url': self.chart_urls[url], 'order': self.chart_order[url], 'list': []}
data = self.getHTMLData(self.chart_urls[url])
if data:
html = BeautifulSoup(data)
for name in self.charts:
chart = self.charts[name].copy()
url = chart.get('url')
try:
result_div = html.find('div', attrs = {'id': 'main'})
if self.conf('chart_display_%s' % name):
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
chart['list'] = []
imdb_ids = getImdb(str(result_div), multiple = True)
imdb_ids = self.getFromURL(url)
for imdb_id in imdb_ids[0:max_items]:
info = self.getInfo(imdb_id)
movie_list['list'].append(info)
try:
for imdb_id in imdb_ids[0:max_items]:
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
is_movie = fireEvent('movie.is_movie', identifier = imdb_id, single = True)
if not is_movie:
continue
info = self.getInfo(imdb_id)
chart['list'].append(info)
if self.shuttingDown():
break
except:
log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc()))
if movie_list['list']:
movie_lists.append(movie_list)
if chart['list']:
movie_lists.append(chart)
return movie_lists
@ -241,11 +260,18 @@ config = [{
'default': True,
},
{
'name': 'automation_charts_rentals',
'type': 'bool',
'label': 'DVD Rentals',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals/">rentals</a> chart',
'default': True,
},
{
'name': 'automation_charts_top250',
'type': 'bool',
'label': 'TOP 250',
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
'default': True,
'default': False,
},
{
'name': 'automation_charts_boxoffice',
@ -283,6 +309,13 @@ config = [{
'default': False,
},
{
'name': 'chart_display_rentals',
'type': 'bool',
'label': 'DVD Rentals',
'description': 'Top DVD <a href="http://www.imdb.com/boxoffice/rentals/">rentals</a> chart',
'default': True,
},
{
'name': 'chart_display_boxoffice',
'type': 'bool',
'label': 'Box office TOP 10',

47
couchpotato/core/media/movie/providers/automation/popularmovies.py

@ -0,0 +1,47 @@
from couchpotato import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'PopularMovies'
class PopularMovies(Automation):
interval = 1800
url = 'https://s3.amazonaws.com/popular-movies/movies.json'
def getIMDBids(self):
movies = []
retrieved_movies = self.getJsonData(self.url)
for movie in retrieved_movies.get('movies'):
imdb_id = movie.get('imdb_id')
info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
if self.isMinimalMovie(info):
movies.append(imdb_id)
return movies
config = [{
'name': 'popularmovies',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'popularmovies_automation',
'label': 'Popular Movies',
'description': 'Imports the <a href="http://movies.stevenlu.com/">top titles of movies that have been in theaters</a>. Script provided by <a href="https://github.com/sjlu/popular-movies">Steven Lu</a>',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

9
couchpotato/core/media/movie/providers/info/_modifier.py

@ -26,7 +26,14 @@ class MovieResultModifier(Plugin):
'backdrop': [],
'poster_original': [],
'backdrop_original': [],
'actors': {}
'actors': {},
'landscape': [],
'logo': [],
'clear_art': [],
'disc_art': [],
'banner': [],
'extra_thumbs': [],
'extra_fanart': []
},
'runtime': 0,
'plot': '',

2
couchpotato/core/media/movie/providers/info/couchpotatoapi.py

@ -29,7 +29,7 @@ class CouchPotatoApi(MovieProvider):
api_version = 1
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 1)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info.release_date', self.getReleaseDate)
addEvent('info.search', self.search, priority = 1)

130
couchpotato/core/media/movie/providers/info/fanarttv.py

@ -0,0 +1,130 @@
import traceback
from couchpotato import tryInt
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'FanartTV'
class FanartTV(MovieProvider):
urls = {
'api': 'http://api.fanart.tv/webservice/movie/b28b14e9be662e027cfbc7c3dd600405/%s/JSON/all/1/2'
}
MAX_EXTRAFANART = 20
http_time_between_calls = 0
def __init__(self):
addEvent('movie.info', self.getArt, priority = 1)
def getArt(self, identifier = None, **kwargs):
log.debug("Getting Extra Artwork from Fanart.tv...")
if not identifier:
return {}
images = {}
try:
url = self.urls['api'] % identifier
fanart_data = self.getJsonData(url)
if fanart_data:
name, resource = fanart_data.items()[0]
log.debug('Found images for %s', name)
images = self._parseMovie(resource)
except:
log.error('Failed getting extra art for %s: %s',
(identifier, traceback.format_exc()))
return {}
return {
'images': images
}
def _parseMovie(self, movie):
images = {
'landscape': self._getMultImages(movie.get('moviethumb', []), 1),
'logo': [],
'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1),
'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1),
'banner': self._getMultImages(movie.get('moviebanner', []), 1),
'extra_fanart': [],
}
if len(images['clear_art']) == 0:
images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1)
images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1)
if len(images['logo']) == 0:
images['logo'] = self._getMultImages(movie.get('movielogo', []), 1)
fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1)
if fanarts:
images['backdrop_original'] = [fanarts[0]]
images['extra_fanart'] = fanarts[1:]
return images
def _trimDiscs(self, disc_images):
"""
Return a subset of discImages. Only bluray disc images will be returned.
"""
trimmed = []
for disc in disc_images:
if disc.get('disc_type') == 'bluray':
trimmed.append(disc)
if len(trimmed) == 0:
return disc_images
return trimmed
def _getImage(self, images):
image_url = None
highscore = -1
for image in images:
if tryInt(image.get('likes')) > highscore:
highscore = tryInt(image.get('likes'))
image_url = image.get('url')
return image_url
def _getMultImages(self, images, n):
"""
Chooses the best n images and returns them as a list.
If n<0, all images will be returned.
"""
image_urls = []
pool = []
for image in images:
if image.get('lang') == 'en':
pool.append(image)
orig_pool_size = len(pool)
while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n):
best = None
highscore = -1
for image in pool:
if tryInt(image.get('likes')) > highscore:
highscore = tryInt(image.get('likes'))
best = image
image_urls.append(best.get('url'))
pool.remove(best)
return image_urls
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')
return True
return False

33
couchpotato/core/media/movie/providers/info/themoviedb.py

@ -13,9 +13,10 @@ autoload = 'TheMovieDb'
class TheMovieDb(MovieProvider):
MAX_EXTRATHUMBS = 4
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info', self.getInfo, priority = 3)
addEvent('movie.info_by_tmdb', self.getInfo)
# Configure TMDB settings
@ -97,16 +98,18 @@ class TheMovieDb(MovieProvider):
if not movie_data:
# Images
poster = self.getImage(movie, type = 'poster', size = 'poster')
poster = self.getImage(movie, type = 'poster', size = 'w154')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original', n = self.MAX_EXTRATHUMBS, skipfirst = True)
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
'actors': {},
'extra_thumbs': extra_thumbs
}
# Genres
@ -172,6 +175,30 @@ class TheMovieDb(MovieProvider):
return image_url
def getMultImages(self, movie, type = 'backdrops', size = 'original', n = -1, skipfirst = False):
"""
If n < 0, return all images. Otherwise return n images.
If n > len(getattr(movie, type)), then return all images.
If skipfirst is True, then it will skip getattr(movie, type)[0]. This
is because backdrops[0] is typically backdrop.
"""
image_urls = []
try:
images = getattr(movie, type)
if n < 0 or n > len(images):
num_images = len(images)
else:
num_images = n
for i in range(int(skipfirst), num_images + int(skipfirst)):
image_urls.append(images[i].geturl(size = size))
except:
log.debug('Failed getting %i %s.%s for "%s"', (n, type, size, ss(str(movie))))
return image_urls
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')

153
couchpotato/core/media/movie/providers/metadata/base.py

@ -4,7 +4,7 @@ import traceback
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.core.helpers.variable import getIdentifier, underscoreToCamel
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.metadata.base import MetaDataBase
from couchpotato.environment import Env
@ -38,75 +38,150 @@ class MovieMetaData(MetaDataBase):
movie_info = group['media'].get('info')
for file_type in ['nfo', 'thumbnail', 'fanart']:
for file_type in ['nfo']:
try:
# Get file path
name = getattr(self, 'get' + file_type.capitalize() + 'Name')(meta_name, root)
if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None):
# Get file content
content = getattr(self, 'get' + file_type.capitalize())(movie_info = movie_info, data = group)
if content:
log.debug('Creating %s file: %s', (file_type, name))
if os.path.isfile(content):
content = sp(content)
name = sp(name)
shutil.copy2(content, name)
shutil.copyfile(content, name)
# Try and copy stats seperately
try: shutil.copystat(content, name)
except: pass
else:
self.createFile(name, content)
group['renamed_files'].append(name)
try:
os.chmod(sp(name), Env.getPermission('file'))
except:
log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc()))
self._createType(meta_name, root, movie_info, group, file_type, 0)
except:
log.error('Unable to create %s file: %s', ('nfo', traceback.format_exc()))
for file_type in ['thumbnail', 'fanart', 'banner', 'disc_art', 'logo', 'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart']:
try:
if file_type == 'thumbnail':
num_images = len(movie_info['images']['poster_original'])
elif file_type == 'fanart':
num_images = len(movie_info['images']['backdrop_original'])
else:
num_images = len(movie_info['images'][file_type])
for i in range(num_images):
self._createType(meta_name, root, movie_info, group, file_type, i)
except:
log.error('Unable to create %s file: %s', (file_type, traceback.format_exc()))
def _createType(self, meta_name, root, movie_info, group, file_type, i): # Get file path
camelcase_method = underscoreToCamel(file_type.capitalize())
name = getattr(self, 'get' + camelcase_method + 'Name')(meta_name, root, i)
if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None):
# Get file content
content = getattr(self, 'get' + camelcase_method)(movie_info = movie_info, data = group, i = i)
if content:
log.debug('Creating %s file: %s', (file_type, name))
if os.path.isfile(content):
content = sp(content)
name = sp(name)
if not os.path.exists(os.path.dirname(name)):
os.makedirs(os.path.dirname(name))
shutil.copy2(content, name)
shutil.copyfile(content, name)
# Try and copy stats seperately
try: shutil.copystat(content, name)
except: pass
else:
self.createFile(name, content)
group['renamed_files'].append(name)
try:
os.chmod(sp(name), Env.getPermission('file'))
except:
log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc()))
def getRootName(self, data = None):
if not data: data = {}
return os.path.join(data['destination_dir'], data['filename'])
def getFanartName(self, name, root):
def getFanartName(self, name, root, i):
return
def getThumbnailName(self, name, root, i):
return
def getBannerName(self, name, root, i):
return
def getClearArtName(self, name, root, i):
return
def getLogoName(self, name, root, i):
return
def getDiscArtName(self, name, root, i):
return
def getLandscapeName(self, name, root, i):
return
def getExtraThumbsName(self, name, root, i):
return
def getThumbnailName(self, name, root):
def getExtraFanartName(self, name, root, i):
return
def getNfoName(self, name, root):
def getNfoName(self, name, root, i):
return
def getNfo(self, movie_info = None, data = None):
def getNfo(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original'):
def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original', i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
# See if it is in current files
files = data['media'].get('files')
if files.get('image_' + wanted_file_type):
if os.path.isfile(files['image_' + wanted_file_type][0]):
return files['image_' + wanted_file_type][0]
if os.path.isfile(files['image_' + wanted_file_type][i]):
return files['image_' + wanted_file_type][i]
# Download using existing info
try:
images = movie_info['images'][wanted_file_type]
file_path = fireEvent('file.download', url = images[0], single = True)
file_path = fireEvent('file.download', url = images[i], single = True)
return file_path
except:
pass
def getFanart(self, movie_info = None, data = None):
def getFanart(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original', i = i)
def getBanner(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'banner', i = i)
def getClearArt(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'clear_art', i = i)
def getLogo(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'logo', i = i)
def getDiscArt(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'disc_art', i = i)
def getLandscape(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data= data, wanted_file_type = 'landscape', i = i)
def getExtraThumbs(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_thumbs', i = i)
def getExtraFanart(self, movie_info = None, data = None, i = 0):
if not data: data = {}
if not movie_info: movie_info = {}
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original')
return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_fanart', i = i)

36
couchpotato/core/media/movie/providers/metadata/mediabrowser.py

@ -0,0 +1,36 @@
import os
from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData
autoload = 'MediaBrowser'
class MediaBrowser(MovieMetaData):
def getThumbnailName(self, name, root, i):
return os.path.join(root, 'folder.jpg')
def getFanartName(self, name, root, i):
return os.path.join(root, 'backdrop.jpg')
config = [{
'name': 'mediabrowser',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'mediabrowser_metadata',
'label': 'MediaBrowser',
'description': 'Generate folder.jpg and backdrop.jpg',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

33
couchpotato/core/media/movie/providers/metadata/ps3.py

@ -0,0 +1,33 @@
import os
from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData
autoload = 'SonyPS3'
class SonyPS3(MovieMetaData):
def getThumbnailName(self, name, root, i):
return os.path.join(root, 'cover.jpg')
config = [{
'name': 'sonyps3',
'groups': [
{
'tab': 'renamer',
'subtab': 'metadata',
'name': 'sonyps3_metadata',
'label': 'Sony PS3',
'description': 'Generate cover.jpg',
'options': [
{
'name': 'meta_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

2
couchpotato/core/media/movie/providers/metadata/wmc.py

@ -8,7 +8,7 @@ autoload = 'WindowsMediaCenter'
class WindowsMediaCenter(MovieMetaData):
def getThumbnailName(self, name, root):
def getThumbnailName(self, name, root, i):
return os.path.join(root, 'folder.jpg')

141
couchpotato/core/media/movie/providers/metadata/xbmc.py

@ -17,19 +17,43 @@ autoload = 'XBMC'
class XBMC(MovieMetaData):
def getFanartName(self, name, root):
def getFanartName(self, name, root, i):
return self.createMetaName(self.conf('meta_fanart_name'), name, root)
def getThumbnailName(self, name, root):
def getThumbnailName(self, name, root, i):
return self.createMetaName(self.conf('meta_thumbnail_name'), name, root)
def getNfoName(self, name, root):
def getNfoName(self, name, root, i):
return self.createMetaName(self.conf('meta_nfo_name'), name, root)
def getBannerName(self, name, root, i):
return self.createMetaName(self.conf('meta_banner_name'), name, root)
def getClearArtName(self, name, root, i):
return self.createMetaName(self.conf('meta_clear_art_name'), name, root)
def getLogoName(self, name, root, i):
return self.createMetaName(self.conf('meta_logo_name'), name, root)
def getDiscArtName(self, name, root, i):
return self.createMetaName(self.conf('meta_disc_art_name'), name, root)
def getLandscapeName(self, name, root, i):
return self.createMetaName(self.conf('meta_landscape_name'), name, root)
def getExtraThumbsName(self, name, root, i):
return self.createMetaNameMult(self.conf('meta_extra_thumbs_name'), name, root, i)
def getExtraFanartName(self, name, root, i):
return self.createMetaNameMult(self.conf('meta_extra_fanart_name'), name, root, i)
def createMetaName(self, basename, name, root):
return os.path.join(root, basename.replace('%s', name))
def getNfo(self, movie_info = None, data = None):
def createMetaNameMult(self, basename, name, root, i):
return os.path.join(root, basename.replace('%s', name).replace('<i>', str(i + 1)))
def getNfo(self, movie_info=None, data=None, i=0):
if not data: data = {}
if not movie_info: movie_info = {}
@ -129,10 +153,25 @@ class XBMC(MovieMetaData):
for image_url in movie_info['images']['poster_original']:
image = SubElement(nfoxml, 'thumb')
image.text = toUnicode(image_url)
fanart = SubElement(nfoxml, 'fanart')
for image_url in movie_info['images']['backdrop_original']:
image = SubElement(fanart, 'thumb')
image.text = toUnicode(image_url)
image_types = [
('fanart', 'backdrop_original'),
('banner', 'banner'),
('discart', 'disc_art'),
('logo', 'logo'),
('clearart', 'clear_art'),
('landscape', 'landscape'),
('extrathumb', 'extra_thumbs'),
('extrafanart', 'extra_fanart'),
]
for image_type in image_types:
sub, type = image_type
sub_element = SubElement(nfoxml, sub)
for image_url in movie_info['images'][type]:
image = SubElement(sub_element, 'thumb')
image.text = toUnicode(image_url)
# Add trailer if found
trailer_found = False
@ -239,6 +278,92 @@ config = [{
'default': '%s.tbn',
'advanced': True,
},
{
'name': 'meta_banner',
'label': 'Banner',
'default': False,
'type': 'bool'
},
{
'name': 'meta_banner_name',
'label': 'Banner filename',
'default': 'banner.jpg',
'advanced': True,
},
{
'name': 'meta_clear_art',
'label': 'ClearArt',
'default': False,
'type': 'bool'
},
{
'name': 'meta_clear_art_name',
'label': 'ClearArt filename',
'default': 'clearart.png',
'advanced': True,
},
{
'name': 'meta_disc_art',
'label': 'DiscArt',
'default': False,
'type': 'bool'
},
{
'name': 'meta_disc_art_name',
'label': 'DiscArt filename',
'default': 'disc.png',
'advanced': True,
},
{
'name': 'meta_landscape',
'label': 'Landscape',
'default': False,
'type': 'bool'
},
{
'name': 'meta_landscape_name',
'label': 'Landscape filename',
'default': 'landscape.jpg',
'advanced': True,
},
{
'name': 'meta_logo',
'label': 'ClearLogo',
'default': False,
'type': 'bool'
},
{
'name': 'meta_logo_name',
'label': 'ClearLogo filename',
'default': 'logo.png',
'advanced': True,
},
{
'name': 'meta_extra_thumbs',
'label': 'Extrathumbs',
'default': False,
'type': 'bool'
},
{
'name': 'meta_extra_thumbs_name',
'label': 'Extrathumbs filename',
'description': '&lt;i&gt; is the image number, and must be included to have multiple images',
'default': 'extrathumbs/thumb<i>.jpg',
'advanced': True
},
{
'name': 'meta_extra_fanart',
'label': 'Extrafanart',
'default': False,
'type': 'bool'
},
{
'name': 'meta_extra_fanart_name',
'label': 'Extrafanart filename',
'default': 'extrafanart/extrafanart<i>.jpg',
'description': '&lt;i&gt; is the image number, and must be included to have multiple images',
'advanced': True
}
],
},
],

9
couchpotato/core/media/movie/providers/nzb/newznab.py

@ -11,11 +11,16 @@ autoload = 'Newznab'
class Newznab(MovieProvider, Base):
def buildUrl(self, media, api_key):
def buildUrl(self, media, host):
query = tryUrlencode({
't': 'movie',
'imdbid': getIdentifier(media).replace('tt', ''),
'apikey': api_key,
'apikey': host['api_key'],
'extended': 1
})
if len(host.get('custom_tag', '')) > 0:
query = '%s&%s' % (query, host.get('custom_tag'))
return query

8
couchpotato/core/media/movie/providers/torrent/bithdtv.py

@ -10,10 +10,14 @@ autoload = 'BiTHDTV'
class BiTHDTV(MovieProvider, Base):
cat_ids = [
([2], ['bd50']),
]
cat_backup_id = 7 # Movies
def buildUrl(self, media):
def buildUrl(self, media, quality):
query = tryUrlencode({
'search': fireEvent('library.query', media, single = True),
'cat': 7 # Movie cat
'cat': self.getCatId(quality)[0]
})
return query

8
couchpotato/core/media/movie/providers/torrent/bitsoup.py

@ -1,6 +1,5 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.torrent.bitsoup import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
@ -18,12 +17,9 @@ class Bitsoup(MovieProvider, Base):
]
cat_backup_id = 0
def buildUrl(self, media, quality):
def buildUrl(self, title, media, quality):
query = tryUrlencode({
'search': '"%s" %s' % (
fireEvent('library.query', media, include_year = False, single = True),
media['info']['year']
),
'search': '"%s" %s' % (title, media['info']['year']),
'cat': self.getCatId(quality)[0],
})
return query

2
couchpotato/core/media/movie/providers/torrent/iptorrents.py

@ -18,6 +18,6 @@ class IPTorrents(MovieProvider, Base):
]
def buildUrl(self, title, media, quality):
query = '%s %s' % (title.replace(':', ''), media['info']['year'])
query = '"%s" %s' % (title.replace(':', ''), media['info']['year'])
return self._buildUrl(query, quality)

2
couchpotato/core/media/movie/providers/torrent/publichd.py

@ -11,4 +11,4 @@ autoload = 'PublicHD'
class PublicHD(MovieProvider, Base):
def buildUrl(self, media):
return fireEvent('library.query', media, single = True)
return fireEvent('library.query', media, single = True).replace(':', '')

7
couchpotato/core/media/movie/providers/torrent/sceneaccess.py

@ -1,5 +1,4 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.sceneaccess import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
@ -17,13 +16,13 @@ class SceneAccess(MovieProvider, Base):
([8], ['dvdr']),
]
def buildUrl(self, media, quality):
def buildUrl(self, title, media, quality):
cat_id = self.getCatId(quality)[0]
url = self.urls['search'] % (cat_id, cat_id)
arguments = tryUrlencode({
'search': fireEvent('library.query', media, single = True),
'method': 3,
'search': '%s %s' % (title, media['info']['year']),
'method': 2,
})
query = "%s&%s" % (url, arguments)

2
couchpotato/core/media/movie/providers/torrent/thepiratebay.py

@ -13,7 +13,7 @@ class ThePirateBay(MovieProvider, Base):
cat_ids = [
([209], ['3d']),
([207], ['720p', '1080p']),
([207], ['720p', '1080p', 'bd50']),
([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
([201, 207], ['brrip']),
([202], ['dvdr'])

4
couchpotato/core/media/movie/providers/torrent/torrentday.py

@ -1,5 +1,4 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent
from couchpotato.core.media._base.providers.torrent.torrentday import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
@ -16,6 +15,3 @@ class TorrentDay(MovieProvider, Base):
([3], ['dvdr']),
([5], ['bd50']),
]
def buildUrl(self, media):
return fireEvent('library.query', media, single = True)

5
couchpotato/core/media/movie/providers/torrent/torrentleech.py

@ -1,4 +1,3 @@
from couchpotato import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.torrentleech import Base
@ -21,8 +20,8 @@ class TorrentLeech(MovieProvider, Base):
([12], ['dvdr']),
]
def buildUrl(self, media, quality):
def buildUrl(self, title, media, quality):
return (
tryUrlencode(fireEvent('library.query', media, single = True)),
tryUrlencode(title.replace(':', '')),
self.getCatId(quality)[0]
)

4
couchpotato/core/media/movie/providers/torrent/torrentshack.py

@ -31,6 +31,6 @@ class TorrentShack(MovieProvider, Base):
def buildUrl(self, media, quality):
query = (tryUrlencode(fireEvent('library.query', media, single = True)),
self.getCatId(quality)[0],
self.getSceneOnly())
self.getSceneOnly(),
self.getCatId(quality)[0])
return query

3
couchpotato/core/media/movie/providers/trailer/base.py

@ -11,3 +11,6 @@ class TrailerProvider(Provider):
def __init__(self):
addEvent('trailer.search', self.search)
def search(self, *args, **kwargs):
pass

2
couchpotato/core/media/movie/providers/trailer/hdtrailers.py

@ -1,5 +1,4 @@
from string import digits, ascii_letters
from urllib2 import HTTPError
import re
from bs4 import SoupStrainer, BeautifulSoup
@ -7,6 +6,7 @@ from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import mergeDicts, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider
from requests import HTTPError
log = CPLog(__name__)

30
couchpotato/core/media/movie/providers/userscript/filmstarts.py

@ -0,0 +1,30 @@
from bs4 import BeautifulSoup
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase
autoload = 'Filmstarts'
class Filmstarts(UserscriptBase):
includes = ['*://www.filmstarts.de/kritiken/*']
def getMovie(self, url):
try:
data = self.getUrl(url)
except:
return
html = BeautifulSoup(data)
table = html.find("table", attrs={"class": "table table-standard thead-standard table-striped_2 fs11"})
if table.find(text='Originaltitel'):
# Get original film title from the table specified above
name = table.find("div", text="Originaltitel").parent.parent.parent.td.text
else:
# If none is available get the title from the meta data
name = html.find("meta", {"property":"og:title"})['content']
# Year of production is not available in the meta data, so get it from the table
year = table.find("tr", text="Produktionsjahr").parent.parent.parent.td.text
return self.search(name, year)

122
couchpotato/core/media/movie/searcher.py

@ -58,13 +58,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def searchAllView(self, **kwargs):
fireEventAsync('movie.searcher.all')
fireEventAsync('movie.searcher.all', manual = True)
return {
'success': not self.in_progress
}
def searchAll(self):
def searchAll(self, manual = False):
if self.in_progress:
log.info('Search already in progress')
@ -91,7 +91,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
media = fireEvent('media.get', media_id, single = True)
try:
self.single(media, search_protocols)
self.single(media, search_protocols, manual = manual)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc()))
fireEvent('movie.update_info', media_id)
@ -109,7 +109,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
self.in_progress = False
def single(self, movie, search_protocols = None, manual = False):
def single(self, movie, search_protocols = None, manual = False, force_download = False):
# Find out search type
try:
@ -126,7 +126,11 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True)
found_releases = []
previous_releases = movie.get('releases', [])
too_early_to_search = []
outside_eta_results = 0
alway_search = self.conf('always_search')
ignore_eta = manual
default_title = getTitle(movie)
if not default_title:
@ -136,68 +140,96 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title)
# Ignore eta once every 7 days
if not alway_search:
prop_name = 'last_ignored_eta.%s' % movie['_id']
last_ignored_eta = float(Env.prop(prop_name, default = 0))
if last_ignored_eta > time.time() - 604800:
ignore_eta = True
Env.prop(prop_name, value = time.time())
db = get_db()
profile = db.get('id', movie['profile_id'])
quality_order = fireEvent('quality.order', single = True)
ret = False
index = 0
for q_identifier in profile.get('qualities'):
quality_custom = {
'index': index,
'quality': q_identifier,
'finish': profile['finish'][index],
'wait_for': profile['wait_for'][index],
'wait_for': tryInt(profile['wait_for'][index]),
'3d': profile['3d'][index] if profile.get('3d') else False
}
index += 1
if not self.conf('always_search') and not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']):
could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year'])
if not alway_search and could_not_be_released:
too_early_to_search.append(q_identifier)
continue
# Skip release, if ETA isn't ignored
if not ignore_eta:
continue
has_better_quality = 0
# See if better quality is available
for release in movie.get('releases', []):
if quality_order.index(release['quality']) <= quality_order.index(q_identifier) and release['status'] not in ['available', 'ignored', 'failed']:
has_better_quality += 1
if release['status'] not in ['available', 'ignored', 'failed']:
is_higher = fireEvent('quality.ishigher', \
{'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \
{'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \
profile, single = True)
if is_higher != 'higher':
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
if has_better_quality > 0:
log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title))
fireEvent('media.restatus', movie['_id'])
break
quality = fireEvent('quality.single', identifier = q_identifier, single = True)
log.info('Search for %s in %s', (default_title, quality['label']))
quality = fireEvent('quality.single', identifier = q_identifier, single = True)
log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if alway_search or ignore_eta else ''))
# Extend quality with profile customs
quality['custom'] = quality_custom
# Extend quality with profile customs
quality['custom'] = quality_custom
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
results_count = len(results)
if results_count == 0:
log.debug('Nothing found for %s in %s', (default_title, quality['label']))
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
break
# Keep track of releases found outside ETA window
outside_eta_results += results_count if could_not_be_released else 0
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
# Check if movie isn't deleted while searching
if not fireEvent('media.get', movie.get('_id'), single = True):
break
# Try find a valid result and download it
if fireEvent('release.try_download_result', results, movie, quality_custom, manual, single = True):
ret = True
# Add them to this movie releases list
found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True)
# Remove releases that aren't found anymore
for release in movie.get('releases', []):
if release.get('status') == 'available' and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('_id'), single = True)
# Don't trigger download, but notify user of available releases
if could_not_be_released:
if results_count > 0:
log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title))
else:
log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title))
fireEvent('media.restatus', movie['_id'])
break
# Try find a valid result and download it
if (force_download or not could_not_be_released) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True):
ret = True
# Remove releases that aren't found anymore
temp_previous_releases = []
for release in previous_releases:
if release.get('status') == 'available' and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('_id'), single = True)
else:
temp_previous_releases.append(release)
previous_releases = temp_previous_releases
del temp_previous_releases
# Break if CP wants to shut down
if self.shuttingDown() or ret:
@ -206,6 +238,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
if outside_eta_results > 0:
message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title)
log.info(message)
if not manual:
fireEvent('media.available', message = message, data = {})
fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']})
return ret
@ -230,8 +269,9 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True)
if contains_other != False:
log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality'))
return False
# Contains lower quality string
@ -288,7 +328,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
now_year = date.today().year
now_month = date.today().month
if (year is None or year < now_year - 1) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)):
if (year is None or year < now_year - 1 or (year <= now_year - 1 and now_month > 4)) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)):
return True
else:
@ -325,13 +365,13 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def tryNextReleaseView(self, media_id = None, **kwargs):
trynext = self.tryNextRelease(media_id, manual = True)
trynext = self.tryNextRelease(media_id, manual = True, force_download = True)
return {
'success': trynext
}
def tryNextRelease(self, media_id, manual = False):
def tryNextRelease(self, media_id, manual = False, force_download = False):
try:
db = get_db()
@ -343,7 +383,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
movie_dict = fireEvent('media.get', media_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict))
self.single(movie_dict, manual = manual)
self.single(movie_dict, manual = manual, force_download = force_download)
return True

1
couchpotato/core/media/movie/suggestion/main.py

@ -84,7 +84,6 @@ class Suggestion(Plugin):
# Get new results and add them
if len(new_suggestions) - 1 < limit:
db = get_db()
active_movies = fireEvent('media.with_status', ['active', 'done'], single = True)
movies = [getIdentifier(x) for x in active_movies]
movies.extend(seen)

1
couchpotato/core/notifications/base.py

@ -15,6 +15,7 @@ class Notification(Provider):
test_message = 'ZOMG Lazors Pewpewpew!'
listen_to = [
'media.available',
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message.important',

7
couchpotato/core/notifications/core/main.py

@ -28,6 +28,7 @@ class CoreNotifier(Notification):
m_lock = None
listen_to = [
'media.available',
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message', 'core.message.important',
@ -258,14 +259,14 @@ class CoreNotifier(Notification):
messages = []
# Get unread
# Get last message
if init:
db = get_db()
notifications = db.all('notification_unread', with_doc = True)
notifications = db.all('notification', with_doc = True)
for n in notifications:
if n['doc'].get('time') > (time.time() - 259200):
if n['doc'].get('time') > (time.time() - 604800):
messages.append(n['doc'])
return {

3
couchpotato/core/notifications/nmj.py

@ -22,10 +22,11 @@ class NMJ(Notification):
# noinspection PyMissingConstructor
def __init__(self):
addEvent('renamer.after', self.addToLibrary)
addApiView(self.testNotifyName(), self.test)
addApiView('notify.nmj.auto_config', self.autoConfig)
addEvent('renamer.after', self.addToLibrary)
def autoConfig(self, host = 'localhost', **kwargs):
mount = ''

4
couchpotato/core/notifications/synoindex.py

@ -1,6 +1,7 @@
import os
import subprocess
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
@ -16,7 +17,8 @@ class Synoindex(Notification):
index_path = '/usr/syno/bin/synoindex'
def __init__(self):
super(Synoindex, self).__init__()
addApiView(self.testNotifyName(), self.test)
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = None):

23
couchpotato/core/notifications/xbmc.py

@ -36,7 +36,7 @@ class XBMC(Notification):
if self.use_json_notifications.get(host):
calls = [
('GUI.ShowNotification', {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}),
('GUI.ShowNotification', None, {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}),
]
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
@ -44,7 +44,7 @@ class XBMC(Notification):
if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])):
param = {'directory': data['destination_dir']}
calls.append(('VideoLibrary.Scan', param))
calls.append(('VideoLibrary.Scan', None, param))
max_successful += len(calls)
response = self.request(host, calls)
@ -52,7 +52,7 @@ class XBMC(Notification):
response = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message})
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
response += self.request(host, [('VideoLibrary.Scan', {})])
response += self.request(host, [('VideoLibrary.Scan', None, {})])
max_successful += 1
max_successful += 1
@ -75,7 +75,7 @@ class XBMC(Notification):
# XBMC JSON-RPC version request
response = self.request(host, [
('JSONRPC.Version', {})
('JSONRPC.Version', None, {})
])
for result in response:
if result.get('result') and type(result['result']['version']).__name__ == 'int':
@ -112,7 +112,7 @@ class XBMC(Notification):
self.use_json_notifications[host] = True
# send the text message
resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})])
resp = self.request(host, [('GUI.ShowNotification', None, {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})])
for r in resp:
if r.get('result') and r['result'] == 'OK':
log.debug('Message delivered successfully!')
@ -184,12 +184,13 @@ class XBMC(Notification):
data = []
for req in do_requests:
method, kwargs = req
method, id, kwargs = req
data.append({
'method': method,
'params': kwargs,
'jsonrpc': '2.0',
'id': method,
'id': id if id else method,
})
data = json.dumps(data)
@ -223,7 +224,7 @@ config = [{
'list': 'notification_providers',
'name': 'xbmc',
'label': 'XBMC',
'description': 'v11 (Eden) and v12 (Frodo)',
'description': 'v11 (Eden), v12 (Frodo), v13 (Gotham)',
'options': [
{
'name': 'enabled',
@ -256,7 +257,7 @@ config = [{
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Only scan new movie folder at remote XBMC servers. Works if movie location is the same.',
'description': ('Only scan new movie folder at remote XBMC servers.', 'Useful if the XBMC path is different from the path CPS uses.'),
},
{
'name': 'force_full_scan',
@ -264,11 +265,11 @@ config = [{
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Do a full scan instead of only the new movie. Useful if the XBMC path is different from the path CPS uses.',
'description': ('Do a full scan instead of only the new movie.', 'Useful if the XBMC path is different from the path CPS uses.'),
},
{
'name': 'on_snatch',
'default': 0,
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',

0
couchpotato/core/notifications/xmpp.py → couchpotato/core/notifications/xmpp_.py

41
couchpotato/core/plugins/base.py

@ -1,3 +1,4 @@
from urllib import quote
from urlparse import urlparse
import glob
import inspect
@ -5,7 +6,6 @@ import os.path
import re
import time
import traceback
import urllib2
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
@ -16,7 +16,6 @@ from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from scandir import scandir
from tornado import template
from tornado.web import StaticFileHandler
@ -41,7 +40,6 @@ class Plugin(object):
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
http_opener = requests.Session()
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
@ -113,7 +111,7 @@ class Plugin(object):
fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f)
def createFile(self, path, content, binary = False):
path = ss(path)
path = sp(path)
self.makeDir(os.path.dirname(path))
@ -131,7 +129,7 @@ class Plugin(object):
os.remove(path)
def makeDir(self, path):
path = ss(path)
path = sp(path)
try:
if not os.path.isdir(path):
os.makedirs(path, Env.getPermission('folder'))
@ -141,19 +139,25 @@ class Plugin(object):
return False
def deleteEmptyFolder(self, folder, show_error = True):
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for root, dirs, files in scandir.walk(folder):
for item in os.listdir(folder):
full_folder = os.path.join(folder, item)
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for root, dirs, files in os.walk(full_folder):
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
try:
os.rmdir(folder)
@ -163,7 +167,7 @@ class Plugin(object):
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True):
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
@ -179,7 +183,7 @@ class Plugin(object):
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = self.http_opener
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
@ -201,11 +205,12 @@ class Plugin(object):
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, verify = False, **kwargs)
response = r.request(method, url, **kwargs)
if response.status_code == requests.codes.ok:
data = response.content

15
couchpotato/core/plugins/dashboard.py

@ -79,7 +79,20 @@ class Dashboard(Plugin):
# Don't list older movies
if ((not late and (media['info']['year'] >= now_year - 1) and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or
(late and (media['info']['year'] < now_year - 1 or (eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200)))):
medias.append(media)
add = True
# Check if it doesn't have any releases
if late:
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
for release in media.get('releases'):
if release.get('status') in ['snatched', 'available', 'seeding', 'downloaded']:
add = False
break
if add:
medias.append(media)
if len(medias) >= limit:
break

7
couchpotato/core/plugins/file.py

@ -9,7 +9,6 @@ from couchpotato.core.helpers.variable import md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from scandir import scandir
from tornado.web import StaticFileHandler
@ -49,9 +48,9 @@ class FileManager(Plugin):
for x in file_dict.keys():
files.extend(file_dict[x])
for f in scandir.scandir(cache_dir):
if os.path.splitext(f.name)[1] in ['.png', '.jpg', '.jpeg']:
file_path = os.path.join(cache_dir, f.name)
for f in os.listdir(cache_dir):
if os.path.splitext(f)[1] in ['.png', '.jpg', '.jpeg']:
file_path = os.path.join(cache_dir, f)
if toUnicode(file_path) not in files:
os.remove(file_path)
except:

62
couchpotato/core/plugins/log/main.py

@ -1,9 +1,10 @@
import os
import re
import traceback
from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
@ -22,7 +23,11 @@ class Logging(Plugin):
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
'log': [{
'time': '03-12 09:12:59',
'type': 'INFO',
'message': 'Log message'
}, ..], //Log file
'total': int, //Total log files available
}"""}
})
@ -34,7 +39,11 @@ class Logging(Plugin):
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
'log': [{
'time': '03-12 09:12:59',
'type': 'INFO',
'message': 'Log message'
}, ..]
}"""}
})
addApiView('logging.clear', self.clear, docs = {
@ -71,16 +80,18 @@ class Logging(Plugin):
if current_path:
f = open(current_path, 'r')
log_content = f.read()
logs = self.toList(log_content)
return {
'success': True,
'log': toUnicode(log_content),
'log': logs,
'total': total,
}
def partial(self, type = 'all', lines = 30, **kwargs):
def partial(self, type = 'all', lines = 30, offset = 0, **kwargs):
total_lines = tryInt(lines)
offset = tryInt(offset)
log_lines = []
@ -93,28 +104,57 @@ class Logging(Plugin):
break
f = open(path, 'r')
reversed_lines = toUnicode(f.read()).split('[0m\n')
reversed_lines.reverse()
log_content = toUnicode(f.read())
raw_lines = self.toList(log_content)
raw_lines.reverse()
brk = False
for line in reversed_lines:
for line in raw_lines:
if type == 'all' or '%s ' % type.upper() in line:
if type == 'all' or line.get('type') == type.upper():
log_lines.append(line)
if len(log_lines) >= total_lines:
if len(log_lines) >= (total_lines + offset):
brk = True
break
if brk:
break
log_lines = log_lines[offset:]
log_lines.reverse()
return {
'success': True,
'log': '[0m\n'.join(log_lines),
'log': log_lines,
}
def toList(self, log_content = ''):
logs_raw = toUnicode(log_content).split('[0m\n')
logs = []
for log_line in logs_raw:
split = splitString(log_line, '\x1b')
if split:
try:
date, time, log_type = splitString(split[0], ' ')
timestamp = '%s %s' % (date, time)
except:
timestamp = 'UNKNOWN'
log_type = 'UNKNOWN'
message = ''.join(split[1]) if len(split) > 1 else split[0]
message = re.sub('\[\d+m\[', '[', message)
logs.append({
'time': timestamp,
'type': log_type,
'message': message
})
return logs
def clear(self, **kwargs):
for x in range(0, 50):

146
couchpotato/core/plugins/log/static/log.css

@ -16,10 +16,14 @@
display: inline-block;
padding: 5px 10px;
margin: 0;
}
.page.log .nav li.select,
.page.log .nav li.clear {
cursor: pointer;
}
.page.log .nav li:hover:not(.active) {
.page.log .nav li:hover:not(.active):not(.filter) {
background: rgba(255, 255, 255, 0.1);
}
@ -39,6 +43,19 @@
}
}
.page.log .nav li.hint {
text-align: center;
width: 400px;
left: 50%;
margin-left: -200px;
font-style: italic;
font-size: 11px;
position: absolute;
right: 20px;
opacity: .5;
bottom: 5px;
}
.page.log .loading {
text-align: center;
font-size: 20px;
@ -50,28 +67,133 @@
overflow: hidden;
line-height: 150%;
font-size: 11px;
font-family: Lucida Console, Monaco, Nimbus Mono L, monospace, serif;
color: #FFF;
}
.page.log .container .error {
color: #FFA4A4;
white-space: pre-wrap;
.page.log .container select {
vertical-align: top;
}
.page.log .container .debug { color: lightgrey; }
.page.log .container .time {
clear: both;
color: lightgrey;
padding: 3px 0;
font-size: 10px;
border-top: 1px solid rgba(255, 255, 255, 0.2);
border-top: 1px solid rgba(255, 255, 255, 0.1);
position: relative;
overflow: hidden;
padding: 0 3px;
font-family: Lucida Console, Monaco, Nimbus Mono L, monospace, serif;
}
.page.log .container .time.highlight {
background: rgba(255, 255, 255, 0.1);
}
.page.log .container .time span {
padding: 5px 0 3px;
display: inline-block;
vertical-align: middle;
}
.page.log .container .time:last-child { display: none; }
.page.log[data-filter=INFO] .error,
.page.log[data-filter=INFO] .debug,
.page.log[data-filter=ERROR] .debug,
.page.log[data-filter=ERROR] .info,
.page.log[data-filter=DEBUG] .info,
.page.log[data-filter=DEBUG] .error {
display: none;
}
.page.log .container .time span {
float: right;
width: 86%;
.page.log .container .type {
margin-left: 10px;
}
.page.log .container .message {
float: right;
width: 86%;
white-space: pre-wrap;
}
.page.log .container .error { color: #FFA4A4; }
.page.log .container .debug span { opacity: .6; }
.do_report {
position: absolute;
padding: 10px;
}
.page.log .report {
position: fixed;
width: 100%;
height: 100%;
background: rgba(0,0,0,.7);
left: 0;
top: 0;
z-index: 99999;
font-size: 14px;
}
.page.log .report .button {
display: inline-block;
margin: 10px 0;
padding: 10px;
}
.page.log .report .bug {
width: 800px;
height: 80%;
position: absolute;
left: 50%;
top: 50%;
margin: 0 0 0 -400px;
transform: translate(0, -50%);
}
.page.log .report .bug textarea {
display: block;
width: 100%;
background: #FFF;
padding: 20px;
overflow: auto;
color: #666;
height: 70%;
font-size: 12px;
}
.page.log .container .time ::-webkit-selection {
background-color: #000;
color: #FFF;
}
.page.log .container .time ::-moz-selection {
background-color: #000;
color: #FFF;
}
.page.log .container .time ::-ms-selection {
background-color: #000;
color: #FFF;
}
.page.log .container .time.highlight ::selection {
background-color: transparent;
color: inherit;
}
.page.log .container .time.highlight ::-webkit-selection {
background-color: transparent;
color: inherit;
}
.page.log .container .time.highlight ::-moz-selection {
background-color: transparent;
color: inherit;
}
.page.log .container .time.highlight ::-ms-selection {
background-color: transparent;
color: inherit;
}
.page.log .container .time.highlight ::selection {
background-color: transparent;
color: inherit;
}

275
couchpotato/core/plugins/log/static/log.js

@ -2,80 +2,295 @@ Page.Log = new Class({
Extends: PageBase,
order: 60,
name: 'log',
title: 'Show recent logs.',
has_tab: false,
indexAction: function(){
log_items: [],
report_text: '\
### Steps to reproduce:\n\
1. ..\n\
2. ..\n\
\n\
### Information:\n\
Movie(s) I have this with: ...\n\
Quality of the movie being searched: ...\n\
Providers I use: ...\n\
Version of CouchPotato: {version}\n\
Running on: ...\n\
\n\
### Logs:\n\
```\n{issue}```',
indexAction: function () {
var self = this;
self.getLogs(0);
},
getLogs: function(nr){
getLogs: function (nr) {
var self = this;
if(self.log) self.log.destroy();
if (self.log) self.log.destroy();
self.log = new Element('div.container.loading', {
'text': 'loading...'
'text': 'loading...',
'events': {
'mouseup:relay(.time)': function(e){
self.showSelectionButton.delay(100, self, e);
}
}
}).inject(self.el);
Api.request('logging.get', {
'data': {
'nr': nr
},
'onComplete': function(json){
self.log.set('html', self.addColors(json.log));
'onComplete': function (json) {
self.log.set('text', '');
self.log_items = self.createLogElements(json.log);
self.log.adopt(self.log_items);
self.log.removeClass('loading');
new Fx.Scroll(window, {'duration': 0}).toBottom();
var nav = new Element('ul.nav', {
'events': {
'click:relay(li.select)': function (e, el) {
self.getLogs(parseInt(el.get('text')) - 1);
}
}
});
var nav = new Element('ul.nav').inject(self.log, 'top');
for (var i = 0; i <= json.total; i++) {
new Element('li', {
'text': i+1,
'class': nr == i ? 'active': '',
// Type selection
new Element('li.filter').grab(
new Element('select', {
'events': {
'click': function(e){
self.getLogs(e.target.get('text')-1);
'change': function () {
var type_filter = this.getSelected()[0].get('value');
self.el.set('data-filter', type_filter);
self.scrollToBottom();
}
}
}).adopt(
new Element('option', {'value': 'ALL', 'text': 'Show all logs'}),
new Element('option', {'value': 'INFO', 'text': 'Show only INFO'}),
new Element('option', {'value': 'DEBUG', 'text': 'Show only DEBUG'}),
new Element('option', {'value': 'ERROR', 'text': 'Show only ERROR'})
)
).inject(nav);
// Selections
for (var i = 0; i <= json.total; i++) {
new Element('li', {
'text': i + 1,
'class': 'select ' + (nr == i ? 'active' : '')
}).inject(nav);
}
new Element('li', {
// Clear button
new Element('li.clear', {
'text': 'clear',
'events': {
'click': function(){
'click': function () {
Api.request('logging.clear', {
'onComplete': function(){
'onComplete': function () {
self.getLogs(0);
}
});
}
}
}).inject(nav)
}).inject(nav);
// Hint
new Element('li.hint', {
'text': 'Select multiple lines & report an issue'
}).inject(nav);
// Add to page
nav.inject(self.log, 'top');
self.scrollToBottom();
}
});
},
addColors: function(text){
createLogElements: function (logs) {
var elements = [];
logs.each(function (log) {
elements.include(new Element('div', {
'class': 'time ' + log.type.toLowerCase()
}).adopt(
new Element('span', {
'text': log.time
}),
new Element('span.type', {
'text': log.type
}),
new Element('span.message', {
'text': log.message
})
))
});
return elements;
},
scrollToBottom: function () {
new Fx.Scroll(window, {'duration': 0}).toBottom();
},
showSelectionButton: function(e){
var self = this,
selection = self.getSelected(),
start_node = selection.anchorNode,
parent_start = start_node.parentNode.getParent('.time'),
end_node = selection.focusNode.parentNode.getParent('.time'),
text = '';
var remove_button = function(){
self.log.getElements('.highlight').removeClass('highlight');
if(self.do_report)
self.do_report.destroy();
document.body.removeEvent('click', remove_button);
};
remove_button();
if(parent_start)
start_node = parent_start;
var index = {
'start': self.log_items.indexOf(start_node),
'end': self.log_items.indexOf(end_node)
};
if(index.start > index.end){
index = {
'start': index.end,
'end': index.start
};
}
var nodes = self.log_items.slice(index.start, index.end + 1);
nodes.each(function(node, nr){
node.addClass('highlight');
node.getElements('span').each(function(span){
text += self.spaceFill(span.get('text') + ' ', 6);
});
text += '\n';
});
self.do_report = new Element('a.do_report.button', {
'text': 'Report issue',
'styles': {
'top': e.page.y,
'left': e.page.x
},
'events': {
'click': function(e){
(e).stop();
self.showReport(text);
}
}
}).inject(document.body);
setTimeout(function(){
document.body.addEvent('click', remove_button);
}, 0);
},
showReport: function(text){
var self = this,
version = Updater.getInfo(),
body = self.report_text
.replace('{issue}', text)
.replace('{version}', version ? version.version.repr : '...'),
textarea;
var overlay = new Element('div.report', {
'method': 'post',
'events': {
'click': function(e){
overlay.destroy();
}
}
}).grab(
new Element('div.bug', {
'events': {
'click': function(e){
(e).stopPropagation();
}
}
}).adopt(
new Element('h1', {
'text': 'Report a bug'
}),
new Element('span').adopt(
new Element('span', {
'text': 'Read '
}),
new Element('a.button', {
'target': '_blank',
'text': 'the contributing guide',
'href': 'https://github.com/RuudBurger/CouchPotatoServer/blob/develop/contributing.md'
}),
new Element('span', {
'text': ' before posting, then copy the text below'
})
),
textarea = new Element('textarea', {
'text': body,
'events': {
'click': function(){
this.select();
}
}
}),
new Element('a.button', {
'target': '_blank',
'text': 'Create a new issue on GitHub with the text above',
'href': 'https://github.com/RuudBurger/CouchPotatoServer/issues/new',
'events': {
'click': function(e){
(e).stop();
var body = textarea.get('value'),
bdy = '?body=' + (body.length < 2000 ? encodeURIComponent(body) : 'Paste the text here'),
win = window.open(e.target.get('href') + bdy, '_blank');
win.focus();
}
}
})
)
);
overlay.inject(self.log);
},
getSelected: function(){
if (window.getSelection)
return window.getSelection();
else if (document.getSelection)
return document.getSelection();
else {
var selection = document.selection && document.selection.createRange();
if (selection.text)
return selection.text;
}
return false;
text = text
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/\u001b\[31m/gi, '</span><span class="error">')
.replace(/\u001b\[36m/gi, '</span><span class="debug">')
.replace(/\u001b\[33m/gi, '</span><span class="debug">')
.replace(/\u001b\[0m\n/gi, '</div><div class="time">')
.replace(/\u001b\[0m/gi, '</span><span>');
},
return '<div class="time">' + text + '</div>';
spaceFill: function( number, width ){
if ( number.toString().length >= width )
return number;
return ( new Array( width ).join( ' ' ) + number.toString() ).substr( -width );
}
});

63
couchpotato/core/plugins/manage.py

@ -4,7 +4,6 @@ import sys
import time
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent, fireEventAsync
from couchpotato.core.helpers.encoding import sp
@ -33,7 +32,7 @@ class Manage(Plugin):
# Add files after renaming
def after_rename(message = None, group = None):
if not group: group = {}
return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'])
return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download'])
addEvent('renamer.after', after_rename, priority = 110)
addApiView('manage.update', self.updateLibraryView, docs = {
@ -53,6 +52,20 @@ class Manage(Plugin):
if not Env.get('dev') and self.conf('startup_scan'):
addEvent('app.load', self.updateLibraryQuick)
addEvent('app.load', self.setCrons)
# Enable / disable interval
addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.remove', 'manage.update_library')
refresh = tryInt(self.conf('library_refresh_interval'))
if refresh > 0:
fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True)
return True
def getProgress(self, **kwargs):
return {
'progress': self.in_progress
@ -71,7 +84,8 @@ class Manage(Plugin):
return self.updateLibrary(full = False)
def updateLibrary(self, full = True):
last_update = float(Env.prop('manage.last_update', default = 0))
last_update_key = 'manage.last_update%s' % ('_full' if full else '')
last_update = float(Env.prop(last_update_key, default = 0))
if self.in_progress:
log.info('Already updating library: %s', self.in_progress)
@ -120,7 +134,7 @@ class Manage(Plugin):
if self.conf('cleanup') and full and not self.shuttingDown():
# Get movies with done status
total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', single = True)
total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True)
for done_movie in done_movies:
if getIdentifier(done_movie) not in added_identifiers:
@ -131,12 +145,16 @@ class Manage(Plugin):
for release in releases:
if release.get('files'):
brk = False
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
# Remove release not available anymore
if not os.path.isfile(sp(release_file)):
fireEvent('release.clean', release['_id'])
brk = True
break
if brk:
break
# Check if there are duplicate releases (different quality) use the last one, delete the rest
if len(releases) > 1:
@ -147,16 +165,22 @@ class Manage(Plugin):
already_used = used_files.get(release_file)
if already_used:
# delete current one
if already_used.get('last_edit', 0) < release.get('last_edit', 0):
fireEvent('release.delete', release['_id'], single = True) # delete current one
fireEvent('release.delete', release['_id'], single = True)
# delete previous one
else:
fireEvent('release.delete', already_used['_id'], single = True) # delete previous one
fireEvent('release.delete', already_used['_id'], single = True)
break
else:
used_files[release_file] = release
del used_files
Env.prop('manage.last_update', time.time())
# Break if CP wants to shut down
if self.shuttingDown():
break
Env.prop(last_update_key, time.time())
except:
log.error('Failed updating library: %s', (traceback.format_exc()))
@ -186,14 +210,14 @@ class Manage(Plugin):
'to_go': total_found,
})
self.updateProgress(folder, to_go)
if group['media'] and group['identifier']:
added_identifiers.append(group['identifier'])
# Add it to release and update the info
fireEvent('release.add', group = group, update_info = False)
fireEvent('movie.update_info', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier']))
else:
self.updateProgress(folder)
return addToLibrary
@ -204,7 +228,6 @@ class Manage(Plugin):
if not self.in_progress or self.shuttingDown():
return
self.updateProgress(folder)
total = self.in_progress[folder]['total']
movie_dict = fireEvent('media.get', identifier, single = True)
@ -212,10 +235,11 @@ class Manage(Plugin):
return afterUpdate
def updateProgress(self, folder):
def updateProgress(self, folder, to_go):
pr = self.in_progress[folder]
pr['to_go'] -= 1
if to_go < pr['to_go']:
pr['to_go'] = to_go
avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go'])
pr['eta'] = tryInt(avg * pr['to_go'])
@ -230,7 +254,7 @@ class Manage(Plugin):
return []
def scanFilesToLibrary(self, folder = None, files = None):
def scanFilesToLibrary(self, folder = None, files = None, release_download = None):
folder = os.path.normpath(folder)
@ -239,7 +263,10 @@ class Manage(Plugin):
if groups:
for group in groups.values():
if group.get('media'):
fireEvent('release.add', group = group)
if release_download and release_download.get('release_id'):
fireEvent('release.add', group = group, update_id = release_download.get('release_id'))
else:
fireEvent('release.add', group = group)
def getDiskSpace(self):
@ -302,6 +329,14 @@ config = [{
'advanced': True,
'description': 'Do a quick scan on startup. On slow systems better disable this.',
},
{
'label': 'Full library refresh',
'name': 'library_refresh_interval',
'type': 'int',
'default': 0,
'advanced': True,
'description': 'Do a full scan every X hours. (0 is disabled)',
},
],
},
],

15
couchpotato/core/plugins/profile/main.py

@ -34,13 +34,22 @@ class ProfilePlugin(Plugin):
})
addEvent('app.initialize', self.fill, priority = 90)
addEvent('app.load', self.forceDefaults)
addEvent('app.load', self.forceDefaults, priority = 110)
def forceDefaults(self):
db = get_db()
# Fill qualities and profiles if they are empty somehow..
if db.count(db.all, 'profile') == 0:
if db.count(db.all, 'quality') == 0:
fireEvent('quality.fill', single = True)
self.fill()
# Get all active movies without profile
try:
db = get_db()
medias = fireEvent('media.with_status', 'active', single = True)
profile_ids = [x.get('_id') for x in self.all()]
@ -87,7 +96,7 @@ class ProfilePlugin(Plugin):
order = 0
for type in kwargs.get('types', []):
profile['qualities'].append(type.get('quality'))
profile['wait_for'].append(tryInt(type.get('wait_for')))
profile['wait_for'].append(tryInt(kwargs.get('wait_for', 0)))
profile['finish'].append((tryInt(type.get('finish')) == 1) if order > 0 else True)
profile['3d'].append(tryInt(type.get('3d')))
order += 1

6
couchpotato/core/plugins/profile/static/profile.css

@ -159,9 +159,6 @@
}
#profile_ordering li {
cursor: -webkit-grab;
cursor: -moz-grab;
cursor: grab;
border-bottom: 1px solid rgba(255,255,255,0.2);
padding: 0 5px;
}
@ -183,6 +180,9 @@
background: url('../../images/handle.png') center;
width: 20px;
float: right;
cursor: -webkit-grab;
cursor: -moz-grab;
cursor: grab;
}
#profile_ordering .formHint {

11
couchpotato/core/plugins/profile/static/profile.js

@ -41,7 +41,7 @@ var Profile = new Class({
new Element('span', {'text':'Wait'}),
new Element('input.inlay.xsmall', {
'type':'text',
'value': data.types && data.types.length > 0 ? data.types[0].wait_for : 0
'value': data.wait_for && data.wait_for.length > 0 ? data.wait_for[0] : 0
}),
new Element('span', {'text':'day(s) for a better quality.'})
),
@ -63,8 +63,7 @@ var Profile = new Class({
data.types.include({
'quality': quality,
'finish': data.finish[nr] || false,
'3d': data['3d'] ? data['3d'][nr] || false : false,
'wait_for': data.wait_for[nr] || 0
'3d': data['3d'] ? data['3d'][nr] || false : false
})
});
}
@ -126,8 +125,7 @@ var Profile = new Class({
data.types.include({
'quality': type.getElement('select').get('value'),
'finish': +type.getElement('input.finish[type=checkbox]').checked,
'3d': +type.getElement('input.3d[type=checkbox]').checked,
'wait_for': 0
'3d': +type.getElement('input.3d[type=checkbox]').checked
});
});
@ -340,8 +338,7 @@ Profile.Type = new Class({
return {
'quality': self.qualities.get('value'),
'finish': +self.finish.checked,
'3d': +self['3d'].checked,
'wait_for': 0
'3d': +self['3d'].checked
}
},

191
couchpotato/core/plugins/quality/main.py

@ -1,5 +1,6 @@
import traceback
import re
from CodernityDB.database import RecordNotFound
from couchpotato import get_db
from couchpotato.api import addApiView
@ -21,11 +22,11 @@ class QualityPlugin(Plugin):
}
qualities = [
{'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']},
{'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts', 'x264', 'h264']},
{'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r')]},
{'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':[], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': ['webrip', ('web', 'rip')]},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':[]},
@ -35,9 +36,9 @@ class QualityPlugin(Plugin):
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
threed_tags = {
'hsbs': [('half', 'sbs')],
'fsbs': [('full', 'sbs')],
'3d': [],
'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'],
'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'],
'3d': ['2d3d', '3d2d', '3d'],
}
cached_qualities = None
@ -49,6 +50,9 @@ class QualityPlugin(Plugin):
addEvent('quality.guess', self.guess)
addEvent('quality.pre_releases', self.preReleases)
addEvent('quality.order', self.getOrder)
addEvent('quality.ishigher', self.isHigher)
addEvent('quality.isfinish', self.isFinish)
addEvent('quality.fill', self.fill)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
@ -150,24 +154,31 @@ class QualityPlugin(Plugin):
order = 0
for q in self.qualities:
db.insert({
'_t': 'quality',
'order': order,
'identifier': q.get('identifier'),
'size_min': tryInt(q.get('size')[0]),
'size_max': tryInt(q.get('size')[1]),
})
log.info('Creating profile: %s', q.get('label'))
db.insert({
'_t': 'profile',
'order': order + 20, # Make sure it goes behind other profiles
'core': True,
'qualities': [q.get('identifier')],
'label': toUnicode(q.get('label')),
'finish': [True],
'wait_for': [0],
})
existing = None
try:
existing = db.get('quality', q.get('identifier'))
except RecordNotFound:
pass
if not existing:
db.insert({
'_t': 'quality',
'order': order,
'identifier': q.get('identifier'),
'size_min': tryInt(q.get('size')[0]),
'size_max': tryInt(q.get('size')[1]),
})
log.info('Creating profile: %s', q.get('label'))
db.insert({
'_t': 'profile',
'order': order + 20, # Make sure it goes behind other profiles
'core': True,
'qualities': [q.get('identifier')],
'label': toUnicode(q.get('label')),
'finish': [True],
'wait_for': [0],
})
order += 1
@ -177,7 +188,7 @@ class QualityPlugin(Plugin):
return False
def guess(self, files, extra = None):
def guess(self, files, extra = None, size = None):
if not extra: extra = {}
# Create hash for cache
@ -205,15 +216,27 @@ class QualityPlugin(Plugin):
self.calcScore(score, quality, contains_score, threedscore)
# Try again with loose testing
size_scores = []
for quality in qualities:
# Evaluate score based on size
size_score = self.guessSizeScore(quality, size = size)
loose_score = self.guessLooseScore(quality, extra = extra)
self.calcScore(score, quality, loose_score)
# Return nothing if all scores are 0
if size_score > 0:
size_scores.append(quality)
self.calcScore(score, quality, size_score + loose_score, penalty = False)
# Add additional size score if only 1 size validated
if len(size_scores) == 1:
self.calcScore(score, size_scores[0], 10, penalty = False)
del size_scores
# Return nothing if all scores are <= 0
has_non_zero = 0
for s in score:
if score[s] > 0:
if score[s]['score'] > 0:
has_non_zero += 1
if not has_non_zero:
@ -276,14 +299,14 @@ class QualityPlugin(Plugin):
tags = self.threed_tags.get(key, [])
for tag in tags:
if (isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words)) or (isinstance(tag, (str, unicode)) and ss(tag.lower()) in cur_file.lower()):
if isinstance(tag, tuple):
if len(set(words) & set(tag)) == len(tag):
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
elif tag in words:
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
if list(set([key]) & set(words)):
log.debug('Found %s in %s', (tag, cur_file))
return 1, key
return 0, None
def guessLooseScore(self, quality, extra = None):
@ -308,7 +331,22 @@ class QualityPlugin(Plugin):
return score
def calcScore(self, score, quality, add_score, threedscore = (0, None)):
def guessSizeScore(self, quality, size = None):
score = 0
if size:
if tryInt(quality['size_min']) <= tryInt(size) <= tryInt(quality['size_max']):
log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], quality['size_min'], size, quality['size_max']))
score += 5
else:
score -= 5
return score
def calcScore(self, score, quality, add_score, threedscore = (0, None), penalty = True):
score[quality['identifier']]['score'] += add_score
@ -325,32 +363,85 @@ class QualityPlugin(Plugin):
for q in self.qualities:
self.cached_order[q.get('identifier')] = self.qualities.index(q)
if add_score != 0:
if penalty and add_score != 0:
for allow in quality.get('allow', []):
score[allow]['score'] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5
# Give panelty for all lower qualities
for q in self.qualities[self.order.index(quality.get('identifier'))+1:]:
if score.get(q.get('identifier')):
score[q.get('identifier')]['score'] -= 1
def isFinish(self, quality, profile):
if not isinstance(profile, dict) or not profile.get('qualities'):
return False
try:
quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0]
return profile['finish'][quality_order]
except:
return False
def isHigher(self, quality, compare_with, profile = None):
if not isinstance(profile, dict) or not profile.get('qualities'):
profile = {'qualities': self.order}
# Try to find quality in profile, if not found: a quality we do not want is lower than anything else
try:
quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0]
except:
log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \
[identifier + ('3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'lower'
# Try to find compare quality in profile, if not found: anything is higher than a not wanted quality
try:
compare_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == compare_with['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(compare_with.get('is_3d', 0))][0]
except:
log.debug('Compare quality %s not found in profile identifiers %s', (compare_with['identifier'] + (' 3D' if compare_with.get('is_3d', 0) else ''), \
[identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])]))
return 'higher'
# Note to self: a lower number means higher quality
if quality_order > compare_order:
return 'lower'
elif quality_order == compare_order:
return 'equal'
else:
return 'higher'
def doTest(self):
tests = {
'Movie Name (1999)-DVD-Rip.avi': 'dvdrip',
'Movie Name 1999 720p Bluray.mkv': '720p',
'Movie Name 1999 BR-Rip 720p.avi': 'brrip',
'Movie Name 1999 720p Web Rip.avi': 'scr',
'Movie Name 1999 Web DL.avi': 'brrip',
'Movie.Name.1999.1080p.WEBRip.H264-Group': 'scr',
'Movie.Name.1999.DVDRip-Group': 'dvdrip',
'Movie.Name.1999.DVD-Rip-Group': 'dvdrip',
'Movie.Name.1999.DVD-R-Group': 'dvdr',
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': '720p',
'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': '1080p',
'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': '1080p',
'Movie Name (1999)-DVD-Rip.avi': {'size': 700, 'quality': 'dvdrip'},
'Movie Name 1999 720p Bluray.mkv': {'size': 4200, 'quality': '720p'},
'Movie Name 1999 BR-Rip 720p.avi': {'size': 1000, 'quality': 'brrip'},
'Movie Name 1999 720p Web Rip.avi': {'size': 1200, 'quality': 'scr'},
'Movie Name 1999 Web DL.avi': {'size': 800, 'quality': 'brrip'},
'Movie.Name.1999.1080p.WEBRip.H264-Group': {'size': 1500, 'quality': 'scr'},
'Movie.Name.1999.DVDRip-Group': {'size': 750, 'quality': 'dvdrip'},
'Movie.Name.1999.DVD-Rip-Group': {'size': 700, 'quality': 'dvdrip'},
'Movie.Name.1999.DVD-R-Group': {'size': 4500, 'quality': 'dvdr'},
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': {'size': 5500, 'quality': '720p'},
'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': {'size': 8500, 'extra': {'resolution_width': 1920, 'resolution_height': 1080} , 'quality': '1080p'},
'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': {'size': 8000, 'quality': '1080p'},
'Movie.Name.2013.BR-Disk-Group.iso': {'size': 48000, 'quality': 'bd50'},
'Movie.Name.2013.2D+3D.BR-Disk-Group.iso': {'size': 52000, 'quality': 'bd50', 'is_3d': True},
'Movie.Rising.Name.Girl.2011.NTSC.DVD9-GroupDVD': {'size': 7200, 'quality': 'dvdr'},
'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True},
'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'},
'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'},
'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True},
'/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 4500, 'quality': '1080p', 'is_3d': False},
'/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 4500, 'quality': '1080p', 'is_3d': True}
}
correct = 0
for name in tests:
success = self.guess([name]).get('identifier') == tests[name]
test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None)) or {}
success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False)
if not success:
log.error('%s failed check, thinks it\'s %s', (name, self.guess([name]).get('identifier')))
log.error('%s failed check, thinks it\'s %s', (name, test_quality.get('identifier')))
correct += success

33
couchpotato/core/plugins/quality/static/quality.js

@ -29,9 +29,14 @@ var QualityBase = new Class({
},
getQuality: function(identifier){
return this.qualities.filter(function(q){
return q.identifier == identifier;
}).pick();
try {
return this.qualities.filter(function(q){
return q.identifier == identifier;
}).pick();
}
catch(e){}
return {}
},
addSettings: function(){
@ -104,7 +109,7 @@ var QualityBase = new Class({
var profile_list;
self.settings.createGroup({
'label': 'Profile Defaults',
'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)'
'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)'
}).adopt(
new Element('.ctrlHolder#profile_ordering').adopt(
new Element('label[text=Order]'),
@ -135,20 +140,28 @@ var QualityBase = new Class({
});
// Sortable
var sorted_changed = false;
self.profile_sortable = new Sortables(profile_list, {
'revert': true,
'handle': '',
'handle': '.handle',
'opacity': 0.5,
'onComplete': self.saveProfileOrdering.bind(self)
'onSort': function(){
sorted_changed = true;
},
'onComplete': function(){
if(sorted_changed){
self.saveProfileOrdering();
sorted_changed = false;
}
}
});
},
saveProfileOrdering: function(){
var self = this;
var ids = [];
var hidden = [];
var self = this,
ids = [],
hidden = [];
self.profile_sortable.list.getElements('li').each(function(el, nr){
ids.include(el.get('data-id'));

135
couchpotato/core/plugins/release/main.py

@ -3,10 +3,11 @@ import os
import time
import traceback
from CodernityDB.database import RecordDeleted
from couchpotato import md5, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toUnicode
from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@ -57,8 +58,8 @@ class Release(Plugin):
addEvent('release.for_media', self.forMedia)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanDone)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 4)
addEvent('app.load', self.cleanDone, priority = 1000)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12)
def cleanDone(self):
log.debug('Removing releases from dashboard')
@ -68,6 +69,24 @@ class Release(Plugin):
db = get_db()
# Get (and remove) parentless releases
releases = db.all('release', with_doc = True)
media_exist = []
for release in releases:
if release.get('key') in media_exist:
continue
try:
db.get('id', release.get('key'))
media_exist.append(release.get('key'))
except RecordDeleted:
db.delete(release['doc'])
log.debug('Deleted orphaned release: %s', release['doc'])
except:
log.debug('Failed cleaning up orphaned releases: %s', traceback.format_exc())
del media_exist
# get movies last_edit more than a week ago
medias = fireEvent('media.with_status', 'done', single = True)
@ -85,7 +104,7 @@ class Release(Plugin):
elif rel['status'] in ['snatched', 'downloaded']:
self.updateStatus(rel['_id'], status = 'ignore')
def add(self, group, update_info = True):
def add(self, group, update_info = True, update_id = None):
try:
db = get_db()
@ -101,29 +120,46 @@ class Release(Plugin):
'profile_id': None,
}, search_after = False, update_after = update_info, notify_after = False, status = 'done', single = True)
# Add Release
release = {
'_t': 'release',
'media_id': media['_id'],
'identifier': release_identifier,
'quality': group['meta_data']['quality'].get('identifier'),
'last_edit': int(time.time()),
'status': 'done'
}
try:
r = db.get('release_identifier', release_identifier, with_doc = True)['doc']
r['media_id'] = media['_id']
except:
r = db.insert(release)
release = None
if update_id:
try:
release = db.get('id', update_id)
release.update({
'identifier': release_identifier,
'last_edit': int(time.time()),
'status': 'done',
})
except:
log.error('Failed updating existing release: %s', traceback.format_exc())
else:
# Update with ref and _id
release.update({
'_id': r['_id'],
'_rev': r['_rev'],
})
# Add Release
if not release:
release = {
'_t': 'release',
'media_id': media['_id'],
'identifier': release_identifier,
'quality': group['meta_data']['quality'].get('identifier'),
'is_3d': group['meta_data']['quality'].get('is_3d', 0),
'last_edit': int(time.time()),
'status': 'done'
}
try:
r = db.get('release_identifier', release_identifier, with_doc = True)['doc']
r['media_id'] = media['_id']
except:
log.error('Failed updating release by identifier: %s', traceback.format_exc())
r = db.insert(release)
# Update with ref and _id
release.update({
'_id': r['_id'],
'_rev': r['_rev'],
})
# Empty out empty file groups
release['files'] = dict((k, v) for k, v in group['files'].items() if v)
release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v)
db.update(release)
fireEvent('media.restatus', media['_id'])
@ -147,6 +183,9 @@ class Release(Plugin):
rel = db.get('id', release_id)
db.delete(rel)
return True
except RecordDeleted:
log.error('Already deleted: %s', release_id)
return True
except:
log.error('Failed: %s', traceback.format_exc())
@ -157,15 +196,20 @@ class Release(Plugin):
try:
db = get_db()
rel = db.get('id', release_id)
raw_files = rel.get('files')
if len(rel.get('files')) == 0:
if len(raw_files) == 0:
self.delete(rel['_id'])
else:
files = []
for release_file in rel.get('files'):
if os.path.isfile(ss(release_file['path'])):
files.append(release_file)
files = {}
for file_type in raw_files:
for release_file in raw_files.get(file_type, []):
if os.path.isfile(sp(release_file)):
if file_type not in files:
files[file_type] = []
files[file_type].append(release_file)
rel['files'] = files
db.update(rel)
@ -313,12 +357,14 @@ class Release(Plugin):
return True
def tryDownloadResult(self, results, media, quality_custom, manual = False):
def tryDownloadResult(self, results, media, quality_custom):
wait_for = False
let_through = False
filtered_results = []
# If a single release comes through the "wait for", let through all
for rel in results:
if not quality_custom.get('finish', False) and quality_custom.get('wait_for', 0) > 0 and rel.get('age') <= quality_custom.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_custom.get('wait_for'), rel['name']))
continue
if rel['status'] in ['ignored', 'failed']:
log.info('Ignored: %s', rel['name'])
@ -328,13 +374,30 @@ class Release(Plugin):
log.info('Ignored, score to low: %s', rel['name'])
continue
downloaded = fireEvent('release.download', data = rel, media = media, manual = manual, single = True)
rel['wait_for'] = False
if quality_custom.get('index') != 0 and quality_custom.get('wait_for', 0) > 0 and rel.get('age') <= quality_custom.get('wait_for', 0):
rel['wait_for'] = True
else:
let_through = True
filtered_results.append(rel)
# Loop through filtered results
for rel in filtered_results:
# Only wait if not a single release is old enough
if rel.get('wait_for') and not let_through:
log.info('Ignored, waiting %s days: %s', (quality_custom.get('wait_for') - rel.get('age'), rel['name']))
wait_for = True
continue
downloaded = fireEvent('release.download', data = rel, media = media, single = True)
if downloaded is True:
return True
elif downloaded != 'try_next':
break
return False
return wait_for
def createFromSearch(self, search_results, media, quality):
@ -406,7 +469,7 @@ class Release(Plugin):
rel = db.get('id', release_id)
if rel and rel.get('status') != status:
release_name = rel.get('name')
release_name = rel['info'].get('name')
if rel.get('files'):
for file_type in rel.get('files', {}):
if file_type == 'movie':

102
couchpotato/core/plugins/renamer.py

@ -14,7 +14,6 @@ from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from scandir import scandir
from unrar2 import RarFile
import six
from six.moves import filter
@ -112,16 +111,13 @@ class Renamer(Plugin):
return
if not base_folder:
base_folder = self.conf('from')
base_folder = sp(self.conf('from'))
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Get media folder to process
media_folder = release_download.get('folder')
# Quality order for calculation quality priority
quality_order = fireEvent('quality.order', single = True)
media_folder = sp(release_download.get('folder'))
# Get all folders that should not be processed
no_process = [to_folder]
@ -149,9 +145,9 @@ class Renamer(Plugin):
# Update to the from folder
if len(release_download.get('files', [])) == 1:
new_media_folder = from_folder
new_media_folder = sp(from_folder)
else:
new_media_folder = os.path.join(from_folder, os.path.basename(media_folder))
new_media_folder = sp(os.path.join(from_folder, os.path.basename(media_folder)))
if not os.path.isdir(new_media_folder):
log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder)
@ -198,7 +194,7 @@ class Renamer(Plugin):
else:
# Get all files from the specified folder
try:
for root, folders, names in scandir.walk(media_folder):
for root, folders, names in os.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
@ -231,6 +227,7 @@ class Renamer(Plugin):
for group_identifier in groups:
group = groups[group_identifier]
group['release_download'] = None
rename_files = {}
remove_files = []
remove_releases = []
@ -290,8 +287,10 @@ class Renamer(Plugin):
# Put 'The' at the end
name_the = movie_name
if movie_name[:4].lower() == 'the ':
name_the = movie_name[4:] + ', The'
for prefix in ['the ', 'an ', 'a ']:
if prefix == movie_name[:len(prefix)].lower():
name_the = movie_name[len(prefix):] + ', ' + prefix.strip().capitalize()
break
replacements = {
'ext': 'mkv',
@ -312,9 +311,15 @@ class Renamer(Plugin):
'cd': '',
'cd_nr': '',
'mpaa': media['info'].get('mpaa', ''),
'mpaa_only': media['info'].get('mpaa', ''),
'category': category_label,
'3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '',
'3d_type': group['meta_data'].get('3d_type'),
}
if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'):
replacements['mpaa_only'] = 'Not Rated'
for file_type in group['files']:
# Move nfo depending on settings
@ -410,8 +415,12 @@ class Renamer(Plugin):
# Don't add language if multiple languages in 1 subtitle file
if len(sub_langs) == 1:
sub_name = sub_name.replace(replacements['ext'], '%s.%s' % (sub_langs[0], replacements['ext']))
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
sub_suffix = '%s.%s' % (sub_langs[0], replacements['ext'])
# Don't add language to subtitle file it it's already there
if not sub_name.endswith(sub_suffix):
sub_name = sub_name.replace(replacements['ext'], sub_suffix)
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_files = mergeDicts(rename_files, rename_extras)
@ -438,19 +447,17 @@ class Renamer(Plugin):
remove_leftovers = True
# Mark movie "done" once it's found the quality with the finish check
profile = None
try:
if media.get('status') == 'active' and media.get('profile_id'):
profile = db.get('id', media['profile_id'])
if group['meta_data']['quality']['identifier'] in profile.get('qualities', []):
nr = profile['qualities'].index(group['meta_data']['quality']['identifier'])
finish = profile['finish'][nr]
if finish:
mdia = db.get('id', media['_id'])
mdia['status'] = 'done'
mdia['last_edit'] = int(time.time())
db.update(mdia)
if fireEvent('quality.isfinish', group['meta_data']['quality'], profile, single = True):
mdia = db.get('id', media['_id'])
mdia['status'] = 'done'
mdia['last_edit'] = int(time.time())
db.update(mdia)
except Exception as e:
except:
log.error('Failed marking movie finished: %s', (traceback.format_exc()))
# Go over current movie releases
@ -459,18 +466,19 @@ class Renamer(Plugin):
# When a release already exists
if release.get('status') == 'done':
release_order = quality_order.index(release['quality'])
group_quality_order = quality_order.index(group['meta_data']['quality']['identifier'])
# This is where CP removes older, lesser quality releases or releases that are not wanted anymore
is_higher = fireEvent('quality.ishigher', \
group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, profile, single = True)
# This is where CP removes older, lesser quality releases
if release_order > group_quality_order:
log.info('Removing lesser quality %s for %s.', (media_title, release.get('quality')))
if is_higher == 'higher':
log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc
elif release_order == group_quality_order:
elif is_higher == 'equal':
log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
@ -497,13 +505,15 @@ class Renamer(Plugin):
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release['_id'], status = 'seeding', single = True)
elif release.get('identifier') == group['meta_data']['quality']['identifier']:
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
group['release_download'] = release_download
# Remove leftover files
if not remove_leftovers: # Don't remove anything
@ -601,7 +611,7 @@ class Renamer(Plugin):
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s)' % (media_title, replacements['quality'])
download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '')
try:
fireEvent('renamer.after', message = download_message, group = group, in_order = True)
except:
@ -652,11 +662,11 @@ Remove it if you want it to be renamed (again, or at least let it try again)
elif isinstance(release_download, dict):
# Tag download_files if they are known
if release_download.get('files', []):
tag_files = release_download.get('files', [])
tag_files = [filename for filename in release_download.get('files', []) if os.path.exists(filename)]
# Tag all files in release folder
elif release_download['folder']:
for root, folders, names in scandir.walk(release_download['folder']):
for root, folders, names in os.walk(sp(release_download['folder'])):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
@ -680,13 +690,13 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
folder = group['parentdir']
folder = sp(group['parentdir'])
if not group.get('dirname') or not os.path.isdir(folder):
return False
elif isinstance(release_download, dict):
folder = release_download['folder']
folder = sp(release_download['folder'])
if not os.path.isdir(folder):
return False
@ -696,7 +706,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Untag all files in release folder
else:
for root, folders, names in scandir.walk(folder):
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
if not folder:
@ -704,7 +714,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in scandir.walk(folder):
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and delete if found
@ -720,7 +730,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if not release_download:
return False
folder = release_download['folder']
folder = sp(release_download['folder'])
if not os.path.isdir(folder):
return False
@ -733,11 +743,11 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Find tag on all files in release folder
else:
for root, folders, names in scandir.walk(folder):
for root, folders, names in os.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in scandir.walk(folder):
for root, dirnames, filenames in os.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
@ -749,7 +759,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return False
def moveFile(self, old, dest, forcemove = False):
dest = ss(dest)
dest = sp(dest)
try:
if forcemove or self.conf('file_action') not in ['copy', 'link']:
try:
@ -822,7 +832,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
def replaceDoubles(self, string):
replaces = [
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '),
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'),
('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'),
]
@ -1054,6 +1064,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
release_download.update({
'imdb_id': getIdentifier(media),
'quality': rls['quality'],
'is_3d': rls['is_3d'],
'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'),
'release_id': rls['_id'],
})
@ -1093,7 +1104,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
check_file_date = False
if not files:
for root, folders, names in scandir.walk(folder):
for root, folders, names in os.walk(folder):
files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files
@ -1193,6 +1204,8 @@ rename_options = {
'first': 'First letter (M)',
'quality': 'Quality (720p)',
'quality_type': '(HD) or (SD)',
'3d': '3D',
'3d_type': '3D Type (Full SBS)',
'video': 'Video (x264)',
'audio': 'Audio (DTS)',
'group': 'Releasegroup name',
@ -1205,7 +1218,8 @@ rename_options = {
'imdb_id': 'IMDB id (tt0123456)',
'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA Rating',
'mpaa': 'MPAA or other certification',
'mpaa_only': 'MPAA only certification (G|PG|PG-13|R|NC-17|Not Rated)',
'category': 'Category label',
},
}

167
couchpotato/core/plugins/scanner.py

@ -6,14 +6,13 @@ import traceback
from couchpotato import get_db
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp, ss
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \
splitString, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from enzyme.exceptions import NoParserError, ParseError
from guessit import guess_movie_info
from scandir import scandir
from subliminal.videos import Video
import enzyme
from six.moves import filter, map, zip
@ -40,6 +39,17 @@ class Scanner(Plugin):
'trailer': ['mov', 'mp4', 'flv']
}
threed_types = {
'Half SBS': [('half', 'sbs'), ('h', 'sbs'), 'hsbs'],
'Full SBS': [('full', 'sbs'), ('f', 'sbs'), 'fsbs'],
'SBS': ['sbs'],
'Half OU': [('half', 'ou'), ('h', 'ou'), 'hou'],
'Full OU': [('full', 'ou'), ('h', 'ou'), 'fou'],
'OU': ['ou'],
'Frame Packed': ['mvc', ('complete', 'bluray')],
'3D': ['3d']
}
file_types = {
'subtitle': ('subtitle', 'subtitle'),
'subtitle_extra': ('subtitle', 'subtitle_extra'),
@ -60,33 +70,43 @@ class Scanner(Plugin):
}
codecs = {
'audio': ['dts', 'ac3', 'ac3d', 'mp3'],
'video': ['x264', 'h264', 'divx', 'xvid']
'audio': ['DTS', 'AC3', 'AC3D', 'MP3'],
'video': ['x264', 'H264', 'DivX', 'Xvid']
}
resolutions = {
'1080p': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78},
'1080i': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78},
'720p': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78},
'720i': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78},
'480p': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33},
'480i': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33},
'default': {'resolution_width': 0, 'resolution_height': 0, 'aspect': 1},
}
audio_codec_map = {
0x2000: 'ac3',
0x2001: 'dts',
0x0055: 'mp3',
0x0050: 'mp2',
0x0001: 'pcm',
0x003: 'pcm',
0x77a1: 'tta1',
0x5756: 'wav',
0x6750: 'vorbis',
0xF1AC: 'flac',
0x00ff: 'aac',
0x2000: 'AC3',
0x2001: 'DTS',
0x0055: 'MP3',
0x0050: 'MP2',
0x0001: 'PCM',
0x003: 'WAV',
0x77a1: 'TTA1',
0x5756: 'WAV',
0x6750: 'Vorbis',
0xF1AC: 'FLAC',
0x00ff: 'AAC',
}
source_media = {
'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'],
'hddvd': ['hddvd', 'hd-dvd'],
'dvd': ['dvd'],
'hdtv': ['hdtv']
'Blu-ray': ['bluray', 'blu-ray', 'brrip', 'br-rip'],
'HD DVD': ['hddvd', 'hd-dvd'],
'DVD': ['dvd'],
'HDTV': ['hdtv']
}
clean = '[ _\,\.\(\)\[\]\-]?(3d|hsbs|sbs|extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
clean = '([ _\,\.\(\)\[\]\-]|^)(3d|hsbs|sbs|ou|extended.cut|directors.cut|french|fr|swedisch|sw|danish|dutch|nl|swesub|subs|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|webdl|web.dl|webrip|web.rip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|hc|\[.*\])(?=[ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
@ -129,8 +149,8 @@ class Scanner(Plugin):
check_file_date = True
try:
files = []
for root, dirs, walk_files in scandir.walk(folder, followlinks=True):
files.extend([sp(os.path.join(root, filename)) for filename in walk_files])
for root, dirs, walk_files in os.walk(folder, followlinks=True):
files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files])
# Break if CP wants to shut down
if self.shuttingDown():
@ -164,7 +184,7 @@ class Scanner(Plugin):
identifiers = [identifier]
# Identifier with quality
quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'}
quality = fireEvent('quality.guess', files = [file_path], size = self.getFileSize(file_path), single = True) if not is_dvd_file else {'identifier':'dvdr'}
if quality:
identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', ''))
identifiers = [identifier_with_quality, identifier]
@ -344,6 +364,7 @@ class Scanner(Plugin):
if return_ignored is False and identifier in ignored_identifiers:
log.debug('Ignore file found, ignoring release: %s', identifier)
total_found -= 1
continue
# Group extra (and easy) files first
@ -364,6 +385,7 @@ class Scanner(Plugin):
if len(group['files']['movie']) == 0:
log.error('Couldn\'t find any movie files for %s', identifier)
total_found -= 1
continue
log.debug('Getting metadata for %s', identifier)
@ -409,7 +431,7 @@ class Scanner(Plugin):
# Notify parent & progress on something found
if on_found:
on_found(group, total_found, total_found - len(processed_movies))
on_found(group, total_found, len(valid_files))
# Wait for all the async events calm down a bit
while threading.activeCount() > 100 and not self.shuttingDown():
@ -431,28 +453,39 @@ class Scanner(Plugin):
for cur_file in files:
if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files
meta = self.getMeta(cur_file)
try:
data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video']))
data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio']))
data['resolution_width'] = meta.get('resolution_width', 720)
data['resolution_height'] = meta.get('resolution_height', 480)
data['audio_channels'] = meta.get('audio_channels', 2.0)
data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2)
except:
log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc()))
pass
if data.get('audio'): break
if not data.get('audio'): # Only get metadata from first media file
meta = self.getMeta(cur_file)
try:
data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video']))
data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio']))
data['audio_channels'] = meta.get('audio_channels', 2.0)
if meta.get('resolution_width'):
data['resolution_width'] = meta.get('resolution_width')
data['resolution_height'] = meta.get('resolution_height')
data['aspect'] = round(float(meta.get('resolution_width')) / meta.get('resolution_height', 1), 2)
else:
data.update(self.getResolution(cur_file))
except:
log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc()))
pass
data['size'] = data.get('size', 0) + self.getFileSize(cur_file)
# Use the quality guess first, if that failes use the quality we wanted to download
data['quality'] = None
quality = fireEvent('quality.guess', size = data.get('size'), files = files, extra = data, single = True)
# Use the quality that we snatched but check if it matches our guess
if release_download and release_download.get('quality'):
data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True)
data['quality']['is_3d'] = release_download.get('is_3d', 0)
if data['quality']['identifier'] != quality['identifier']:
log.info('Different quality snatched than detected for %s: %s vs. %s. Assuming snatched quality is correct.', (files[0], data['quality']['identifier'], quality['identifier']))
if data['quality']['is_3d'] != quality['is_3d']:
log.info('Different 3d snatched than detected for %s: %s vs. %s. Assuming snatched 3d is correct.', (files[0], data['quality']['is_3d'], quality['is_3d']))
if not data['quality']:
data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True)
data['quality'] = quality
if not data['quality']:
data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True)
@ -462,16 +495,32 @@ class Scanner(Plugin):
filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0])
data['group'] = self.getGroup(filename[len(folder):])
data['source'] = self.getSourceMedia(filename)
if data['quality'].get('is_3d', 0):
data['3d_type'] = self.get3dType(filename)
return data
def get3dType(self, filename):
filename = ss(filename)
words = re.split('\W+', filename.lower())
for key in self.threed_types:
tags = self.threed_types.get(key, [])
for tag in tags:
if (isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words)) or (isinstance(tag, (str, unicode)) and ss(tag.lower()) in words):
log.debug('Found %s in %s', (tag, filename))
return key
return ''
def getMeta(self, filename):
try:
p = enzyme.parse(filename)
# Video codec
vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower()
vc = ('H264' if p.video[0].codec == 'AVC1' else p.video[0].codec)
# Audio codec
ac = p.audio[0].codec
@ -708,19 +757,26 @@ class Scanner(Plugin):
if not file_size: file_size = []
try:
return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576)
return file_size.get('min', 0) < self.getFileSize(file) < file_size.get('max', 100000)
except:
log.error('Couldn\'t get filesize of %s.', file)
return False
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False):
def getFileSize(self, file):
try:
return os.path.getsize(file) / 1024 / 1024
except:
return None
year = self.findYear(file_path)
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False):
identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder
identifier = os.path.splitext(identifier)[0] # ext
# Make sure the identifier is lower case as all regex is with lower case tags
identifier = identifier.lower()
try:
path_split = splitString(identifier, os.path.sep)
identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename
@ -735,8 +791,13 @@ class Scanner(Plugin):
# remove cptag
identifier = self.removeCPTag(identifier)
# groups, release tags, scenename cleaner, regex isn't correct
identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':')
# simplify the string
identifier = simplifyString(identifier)
year = self.findYear(file_path)
# groups, release tags, scenename cleaner
identifier = re.sub(self.clean, '::', identifier).strip(':')
# Year
if year and identifier[:4] != year:
@ -785,6 +846,16 @@ class Scanner(Plugin):
except:
return ''
def getResolution(self, filename):
try:
for key in self.resolutions:
if key in filename.lower() and key != 'default':
return self.resolutions[key]
except:
pass
return self.resolutions['default']
def getGroup(self, file):
try:
match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I)

10
couchpotato/core/plugins/subtitle.py

@ -32,7 +32,7 @@ class Subtitle(Plugin):
for lang in self.getLanguages():
if lang not in available_languages:
download = subliminal.download_subtitles(files, multi = True, force = False, languages = [lang], services = self.services, cache_dir = Env.get('cache_dir'))
download = subliminal.download_subtitles(files, multi = True, force = self.conf('force'), languages = [lang], services = self.services, cache_dir = Env.get('cache_dir'))
for subtitle in download:
downloaded.extend(download[subtitle])
@ -72,6 +72,14 @@ config = [{
'name': 'languages',
'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">on Wikipedia</a>'),
},
{
'advanced': True,
'name': 'force',
'label': 'Force',
'description': ('Force download all languages (including embedded).', 'This will also <strong>overwrite</strong> all existing subtitles.'),
'default': False,
'type': 'bool',
},
],
},
],

1
couchpotato/core/plugins/userscript/static/userscript.js

@ -2,6 +2,7 @@ Page.Userscript = new Class({
Extends: PageBase,
order: 80,
name: 'userscript',
has_tab: false,

5
couchpotato/core/plugins/wizard/static/wizard.js

@ -2,6 +2,7 @@ Page.Wizard = new Class({
Extends: Page.Settings,
order: 70,
name: 'wizard',
has_tab: false,
wizard_only: true,
@ -89,7 +90,7 @@ Page.Wizard = new Class({
self.parent(action, params);
self.addEvent('create', function(){
self.order();
self.orderGroups();
});
self.initialized = true;
@ -105,7 +106,7 @@ Page.Wizard = new Class({
}).delay(1)
},
order: function(){
orderGroups: function(){
var self = this;
var form = self.el.getElement('.uniForm');

5
couchpotato/core/settings.py

@ -1,5 +1,4 @@
from __future__ import with_statement
import traceback
import ConfigParser
from hashlib import md5
@ -49,7 +48,7 @@ class Settings(object):
'desc': 'Save setting to config file (settings.conf)',
'params': {
'section': {'desc': 'The section name in settings.conf'},
'option': {'desc': 'The option name'},
'name': {'desc': 'The option name'},
'value': {'desc': 'The value you want to save'},
}
})
@ -234,7 +233,7 @@ class Settings(object):
propert = db.get('property', identifier, with_doc = True)
prop = propert['doc']['value']
except:
self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0)))
pass # self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0)))
return prop

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save