Browse Source

Merge branch 'refs/heads/develop' into tv

Conflicts:
	couchpotato/core/helpers/variable.py
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/media/movie/searcher/main.py
	couchpotato/core/plugins/quality/main.py
	couchpotato/core/plugins/release/main.py
	couchpotato/core/plugins/renamer/main.py
pull/2523/head
Ruud 12 years ago
parent
commit
3e035f84b1
  1. 1
      couchpotato/api.py
  2. 2
      couchpotato/core/_base/clientscript/main.py
  3. 2
      couchpotato/core/_base/scheduler/main.py
  4. 16
      couchpotato/core/_base/updater/main.py
  5. 2
      couchpotato/core/_base/updater/static/updater.js
  6. 35
      couchpotato/core/downloaders/base.py
  7. 2
      couchpotato/core/downloaders/blackhole/__init__.py
  8. 104
      couchpotato/core/downloaders/deluge/main.py
  9. 64
      couchpotato/core/downloaders/nzbget/main.py
  10. 34
      couchpotato/core/downloaders/nzbvortex/main.py
  11. 12
      couchpotato/core/downloaders/rtorrent/__init__.py
  12. 74
      couchpotato/core/downloaders/rtorrent/main.py
  13. 53
      couchpotato/core/downloaders/sabnzbd/main.py
  14. 7
      couchpotato/core/downloaders/synology/main.py
  15. 60
      couchpotato/core/downloaders/transmission/main.py
  16. 5
      couchpotato/core/downloaders/utorrent/__init__.py
  17. 152
      couchpotato/core/downloaders/utorrent/main.py
  18. 14
      couchpotato/core/helpers/encoding.py
  19. 12
      couchpotato/core/helpers/variable.py
  20. 2
      couchpotato/core/media/__init__.py
  21. 2
      couchpotato/core/media/_base/media/main.py
  22. 2
      couchpotato/core/media/_base/searcher/__init__.py
  23. 2
      couchpotato/core/media/_base/searcher/main.py
  24. 51
      couchpotato/core/media/movie/_base/main.py
  25. 7
      couchpotato/core/media/movie/_base/static/list.js
  26. 22
      couchpotato/core/media/movie/_base/static/movie.actions.js
  27. 2
      couchpotato/core/media/movie/_base/static/movie.css
  28. 83
      couchpotato/core/media/movie/_base/static/movie.js
  29. 2
      couchpotato/core/media/movie/_base/static/search.js
  30. 2
      couchpotato/core/media/movie/library/movie/main.py
  31. 12
      couchpotato/core/media/movie/searcher/main.py
  32. 47
      couchpotato/core/media/movie/suggestion/static/suggest.css
  33. 12
      couchpotato/core/media/movie/suggestion/static/suggest.js
  34. 2
      couchpotato/core/notifications/base.py
  35. 11
      couchpotato/core/notifications/core/main.py
  36. 10
      couchpotato/core/notifications/core/static/notification.js
  37. 11
      couchpotato/core/notifications/email/__init__.py
  38. 13
      couchpotato/core/notifications/email/main.py
  39. 39
      couchpotato/core/notifications/notifo/main.py
  40. 85
      couchpotato/core/notifications/plex/client.py
  41. 205
      couchpotato/core/notifications/plex/main.py
  42. 114
      couchpotato/core/notifications/plex/server.py
  43. 17
      couchpotato/core/notifications/pushbullet/__init__.py
  44. 86
      couchpotato/core/notifications/pushbullet/main.py
  45. 52
      couchpotato/core/notifications/xmpp/__init__.py
  46. 43
      couchpotato/core/notifications/xmpp/main.py
  47. 6
      couchpotato/core/plugins/base.py
  48. 1
      couchpotato/core/plugins/manage/main.py
  49. 167
      couchpotato/core/plugins/quality/main.py
  50. 174
      couchpotato/core/plugins/release/main.py
  51. 1
      couchpotato/core/plugins/renamer/__init__.py
  52. 460
      couchpotato/core/plugins/renamer/main.py
  53. 118
      couchpotato/core/plugins/scanner/main.py
  54. 7
      couchpotato/core/plugins/score/main.py
  55. 40
      couchpotato/core/plugins/score/scores.py
  56. 8
      couchpotato/core/plugins/subtitle/main.py
  57. 12
      couchpotato/core/plugins/userscript/static/userscript.css
  58. 2
      couchpotato/core/plugins/userscript/static/userscript.js
  59. 3
      couchpotato/core/providers/automation/flixster/main.py
  60. 9
      couchpotato/core/providers/automation/imdb/__init__.py
  61. 11
      couchpotato/core/providers/automation/imdb/main.py
  62. 3
      couchpotato/core/providers/automation/itunes/main.py
  63. 6
      couchpotato/core/providers/base.py
  64. 16
      couchpotato/core/providers/info/_modifier/main.py
  65. 13
      couchpotato/core/providers/info/couchpotatoapi/main.py
  66. 1
      couchpotato/core/providers/info/omdbapi/main.py
  67. 1
      couchpotato/core/providers/info/themoviedb/main.py
  68. 7
      couchpotato/core/providers/metadata/xbmc/main.py
  69. 2
      couchpotato/core/providers/nzb/binsearch/main.py
  70. 4
      couchpotato/core/providers/nzb/newznab/main.py
  71. 8
      couchpotato/core/providers/nzb/omgwtfnzbs/main.py
  72. 43
      couchpotato/core/providers/torrent/base.py
  73. 12
      couchpotato/core/providers/torrent/bithdtv/__init__.py
  74. 88
      couchpotato/core/providers/torrent/bithdtv/main.py
  75. 6
      couchpotato/core/providers/torrent/kickasstorrents/__init__.py
  76. 27
      couchpotato/core/providers/torrent/kickasstorrents/main.py
  77. 79
      couchpotato/core/providers/torrent/scenehd/main.py
  78. 41
      couchpotato/core/providers/torrent/thepiratebay/main.py
  79. 2
      couchpotato/core/providers/torrent/yify/main.py
  80. 6
      couchpotato/core/providers/userscript/flickchart/__init__.py
  81. 30
      couchpotato/core/providers/userscript/flickchart/main.py
  82. 76
      couchpotato/static/scripts/couchpotato.js
  83. 56
      couchpotato/static/scripts/library/Array.stableSort.js
  84. 955
      couchpotato/static/scripts/library/async.js
  85. 2
      couchpotato/static/scripts/page/about.js
  86. 15
      couchpotato/static/scripts/page/home.js
  87. 19
      couchpotato/static/scripts/page/manage.js
  88. 14
      couchpotato/static/scripts/page/settings.js
  89. 2
      couchpotato/templates/index.html
  90. 2
      init/fedora
  91. 2
      init/ubuntu
  92. 4
      libs/apscheduler/__init__.py
  93. 31
      libs/apscheduler/job.py
  94. 2
      libs/apscheduler/jobstores/ram_store.py
  95. 91
      libs/apscheduler/jobstores/redis_store.py
  96. 5
      libs/apscheduler/jobstores/shelve_store.py
  97. 18
      libs/apscheduler/jobstores/sqlalchemy_store.py
  98. 70
      libs/apscheduler/scheduler.py
  99. 16
      libs/apscheduler/triggers/cron/__init__.py
  100. 18
      libs/apscheduler/triggers/cron/expressions.py

1
couchpotato/api.py

@ -110,6 +110,7 @@ class ApiHandler(RequestHandler):
if jsonp_callback: if jsonp_callback:
self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')') self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')')
self.set_header("Content-Type", "text/javascript")
elif isinstance(result, tuple) and result[0] == 'redirect': elif isinstance(result, tuple) and result[0] == 'redirect':
self.redirect(result[1]) self.redirect(result[1])
else: else:

2
couchpotato/core/_base/clientscript/main.py

@ -34,6 +34,8 @@ class ClientScript(Plugin):
'scripts/library/question.js', 'scripts/library/question.js',
'scripts/library/scrollspy.js', 'scripts/library/scrollspy.js',
'scripts/library/spin.js', 'scripts/library/spin.js',
'scripts/library/Array.stableSort.js',
'scripts/library/async.js',
'scripts/couchpotato.js', 'scripts/couchpotato.js',
'scripts/api.js', 'scripts/api.js',
'scripts/library/history.js', 'scripts/library/history.js',

2
couchpotato/core/_base/scheduler/main.py

@ -31,8 +31,8 @@ class Scheduler(Plugin):
pass pass
def doShutdown(self): def doShutdown(self):
super(Scheduler, self).doShutdown()
self.stop() self.stop()
return super(Scheduler, self).doShutdown()
def stop(self): def stop(self):
if self.started: if self.started:

16
couchpotato/core/_base/updater/main.py

@ -298,6 +298,7 @@ class SourceUpdater(BaseUpdater):
def replaceWith(self, path): def replaceWith(self, path):
app_dir = ss(Env.get('app_dir')) app_dir = ss(Env.get('app_dir'))
data_dir = ss(Env.get('data_dir'))
# Get list of files we want to overwrite # Get list of files we want to overwrite
self.deletePyc() self.deletePyc()
@ -329,12 +330,15 @@ class SourceUpdater(BaseUpdater):
log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc())) log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc()))
return False return False
if Env.get('app_dir') not in Env.get('data_dir'): for still_exists in existing_files:
for still_exists in existing_files:
try: if data_dir in still_exists:
os.remove(still_exists) continue
except:
log.error('Failed removing non-used file: %s', traceback.format_exc()) try:
os.remove(still_exists)
except:
log.error('Failed removing non-used file: %s', traceback.format_exc())
return True return True

2
couchpotato/core/_base/updater/static/updater.js

@ -24,7 +24,7 @@ var UpdaterBase = new Class({
self.doUpdate(); self.doUpdate();
else { else {
App.unBlockPage(); App.unBlockPage();
App.fireEvent('message', 'No updates available'); App.on('message', 'No updates available');
} }
} }
}) })

35
couchpotato/core/downloaders/base.py

@ -66,36 +66,36 @@ class Downloader(Provider):
def getAllDownloadStatus(self): def getAllDownloadStatus(self):
return return
def _removeFailed(self, item): def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}): if self.isDisabled(manual = True, data = {}):
return return
if item and item.get('downloader') == self.getName(): if release_download and release_download.get('downloader') == self.getName():
if self.conf('delete_failed'): if self.conf('delete_failed'):
return self.removeFailed(item) return self.removeFailed(release_download)
return False return False
return return
def removeFailed(self, item): def removeFailed(self, release_download):
return return
def _processComplete(self, item): def _processComplete(self, release_download):
if self.isDisabled(manual = True, data = {}): if self.isDisabled(manual = True, data = {}):
return return
if item and item.get('downloader') == self.getName(): if release_download and release_download.get('downloader') == self.getName():
if self.conf('remove_complete', default = False): if self.conf('remove_complete', default = False):
return self.processComplete(item = item, delete_files = self.conf('delete_files', default = False)) return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False))
return False return False
return return
def processComplete(self, item, delete_files): def processComplete(self, release_download, delete_files):
return return
def isCorrectProtocol(self, item_protocol): def isCorrectProtocol(self, protocol):
is_correct = item_protocol in self.protocol is_correct = protocol in self.protocol
if not is_correct: if not is_correct:
log.debug("Downloader doesn't support this protocol") log.debug("Downloader doesn't support this protocol")
@ -151,20 +151,20 @@ class Downloader(Provider):
(d_manual and manual or d_manual is False) and \ (d_manual and manual or d_manual is False) and \
(not data or self.isCorrectProtocol(data.get('protocol'))) (not data or self.isCorrectProtocol(data.get('protocol')))
def _pause(self, item, pause = True): def _pause(self, release_download, pause = True):
if self.isDisabled(manual = True, data = {}): if self.isDisabled(manual = True, data = {}):
return return
if item and item.get('downloader') == self.getName(): if release_download and release_download.get('downloader') == self.getName():
self.pause(item, pause) self.pause(release_download, pause)
return True return True
return False return False
def pause(self, item, pause): def pause(self, release_download, pause):
return return
class StatusList(list): class ReleaseDownloadList(list):
provider = None provider = None
@ -173,7 +173,7 @@ class StatusList(list):
self.provider = provider self.provider = provider
self.kwargs = kwargs self.kwargs = kwargs
super(StatusList, self).__init__() super(ReleaseDownloadList, self).__init__()
def extend(self, results): def extend(self, results):
for r in results: for r in results:
@ -181,7 +181,7 @@ class StatusList(list):
def append(self, result): def append(self, result):
new_result = self.fillResult(result) new_result = self.fillResult(result)
super(StatusList, self).append(new_result) super(ReleaseDownloadList, self).append(new_result)
def fillResult(self, result): def fillResult(self, result):
@ -190,6 +190,7 @@ class StatusList(list):
'status': 'busy', 'status': 'busy',
'downloader': self.provider.getName(), 'downloader': self.provider.getName(),
'folder': '', 'folder': '',
'files': '',
} }
return mergeDicts(defaults, result) return mergeDicts(defaults, result)

2
couchpotato/core/downloaders/blackhole/__init__.py

@ -13,7 +13,7 @@ config = [{
'list': 'download_providers', 'list': 'download_providers',
'name': 'blackhole', 'name': 'blackhole',
'label': 'Black hole', 'label': 'Black hole',
'description': 'Download the NZB/Torrent to a specific folder.', 'description': 'Download the NZB/Torrent to a specific folder. <em>Note: Seeding and copying/linking features do <strong>not</strong> work with Black hole</em>.',
'wizard': True, 'wizard': True,
'options': [ 'options': [
{ {

104
couchpotato/core/downloaders/deluge/main.py

@ -1,12 +1,14 @@
from base64 import b64encode from base64 import b64encode, b16encode, b32decode
from couchpotato.core.downloaders.base import Downloader, StatusList from bencode import bencode as benc, bdecode
from couchpotato.core.helpers.encoding import isInt, ss from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta from datetime import timedelta
from hashlib import sha1
from synchronousdeluge import DelugeClient from synchronousdeluge import DelugeClient
import os.path import os.path
import re
import traceback import traceback
log = CPLog(__name__) log = CPLog(__name__)
@ -72,7 +74,7 @@ class Deluge(Downloader):
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options) remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else: else:
filename = self.createFileName(data, filedata, movie) filename = self.createFileName(data, filedata, movie)
remote_torrent = self.drpc.add_torrent_file(filename, b64encode(filedata), options) remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent: if not remote_torrent:
log.error('Failed sending torrent to Deluge') log.error('Failed sending torrent to Deluge')
@ -85,14 +87,10 @@ class Deluge(Downloader):
log.debug('Checking Deluge download status.') log.debug('Checking Deluge download status.')
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if not self.connect(): if not self.connect():
return False return False
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents() queue = self.drpc.get_alltorrents()
@ -101,50 +99,55 @@ class Deluge(Downloader):
return False return False
for torrent_id in queue: for torrent_id in queue:
item = queue[torrent_id] torrent = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (item['name'], item['hash'], item['save_path'], item['move_completed_path'], item['hash'], item['progress'], item['state'], item['eta'], item['ratio'], item['stop_ratio'], item['is_seed'], item['is_finished'], item['paused'])) log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing. # Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed' #status = 'failed'
status = 'busy' status = 'busy'
if item['is_seed'] and tryFloat(item['ratio']) < tryFloat(item['stop_ratio']): if torrent['is_seed'] and tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio']):
# We have item['seeding_time'] to work out what the seeding time is, but we do not # We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it # have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio. # when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download(). # See above comment in download().
status = 'seeding' status = 'seeding'
elif item['is_seed'] and item['is_finished'] and item['paused'] and item['state'] == 'Paused': elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
status = 'completed' status = 'completed'
download_dir = item['save_path'] download_dir = sp(torrent['save_path'])
if item['move_on_completed']: if torrent['move_on_completed']:
download_dir = item['move_completed_path'] download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
statuses.append({ release_downloads.append({
'id': item['hash'], 'id': torrent['hash'],
'name': item['name'], 'name': torrent['name'],
'status': status, 'status': status,
'original_status': item['state'], 'original_status': torrent['state'],
'seed_ratio': item['ratio'], 'seed_ratio': torrent['ratio'],
'timeleft': str(timedelta(seconds = item['eta'])), 'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': ss(os.path.join(download_dir, item['name'])), 'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
'files': '|'.join(torrent_files),
}) })
return statuses return release_downloads
def pause(self, item, pause = True): def pause(self, release_download, pause = True):
if pause: if pause:
return self.drpc.pause_torrent([item['id']]) return self.drpc.pause_torrent([release_download['id']])
else: else:
return self.drpc.resume_torrent([item['id']]) return self.drpc.resume_torrent([release_download['id']])
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
return self.drpc.remove_torrent(item['id'], True) return self.drpc.remove_torrent(release_download['id'], True)
def processComplete(self, item, delete_files = False): def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else '')) log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(item['id'], remove_local_data = delete_files) return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
class DelugeRPC(object): class DelugeRPC(object):
@ -171,7 +174,10 @@ class DelugeRPC(object):
try: try:
self.connect() self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options).get() torrent_id = self.client.core.add_torrent_magnet(torrent, options).get()
if options['label']: if not torrent_id:
torrent_id = self._check_torrent(True, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get() self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err: except Exception, err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc())) log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
@ -185,8 +191,11 @@ class DelugeRPC(object):
torrent_id = False torrent_id = False
try: try:
self.connect() self.connect()
torrent_id = self.client.core.add_torrent_file(filename, torrent, options).get() torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get()
if options['label']: if not torrent_id:
torrent_id = self._check_torrent(False, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label']).get() self.client.label.set_torrent(torrent_id, options['label']).get()
except Exception, err: except Exception, err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc())) log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
@ -242,3 +251,22 @@ class DelugeRPC(object):
def disconnect(self): def disconnect(self):
self.client.disconnect() self.client.disconnect()
def _check_torrent(self, magnet, torrent):
# Torrent not added, check if it already existed.
if magnet:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
else:
info = bdecode(torrent)["info"]
torrent_hash = sha1(benc(info)).hexdigest()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
torrent_hash = torrent_hash.lower()
torrent_check = self.client.core.get_torrent_status(torrent_hash, {}).get()
if torrent_check['hash']:
return torrent_hash
return False

64
couchpotato/core/downloaders/nzbget/main.py

@ -1,6 +1,6 @@
from base64 import standard_b64encode from base64 import standard_b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.encoding import ss, sp
from couchpotato.core.helpers.variable import tryInt, md5 from couchpotato.core.helpers.variable import tryInt, md5
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from datetime import timedelta from datetime import timedelta
@ -99,60 +99,60 @@ class NZBGet(Downloader):
log.error('Failed getting data: %s', traceback.format_exc(1)) log.error('Failed getting data: %s', traceback.format_exc(1))
return False return False
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
for item in groups: for nzb in groups:
log.debug('Found %s in NZBGet download queue', item['NZBFilename']) log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
try: try:
nzb_id = [param['Value'] for param in item['Parameters'] if param['Name'] == 'couchpotato'][0] nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except: except:
nzb_id = item['NZBID'] nzb_id = nzb['NZBID']
timeleft = -1 timeleft = -1
try: try:
if item['ActiveDownloads'] > 0 and item['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']): if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = item['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20)) timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except: except:
pass pass
statuses.append({ release_downloads.append({
'id': nzb_id, 'id': nzb_id,
'name': item['NZBFilename'], 'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if item['ActiveDownloads'] > 0 else 'QUEUED', 'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item # Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft, 'timeleft': timeleft,
}) })
for item in queue: # 'Parameters' is not passed in rpc.postqueue for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
log.debug('Found %s in NZBGet postprocessing queue', item['NZBFilename']) log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
statuses.append({ release_downloads.append({
'id': item['NZBID'], 'id': nzb['NZBID'],
'name': item['NZBFilename'], 'name': nzb['NZBFilename'],
'original_status': item['Stage'], 'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1, 'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
}) })
for item in history: for nzb in history:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (item['NZBFilename'] , item['ParStatus'], item['ScriptStatus'] , item['Log'])) log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
try: try:
nzb_id = [param['Value'] for param in item['Parameters'] if param['Name'] == 'couchpotato'][0] nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except: except:
nzb_id = item['NZBID'] nzb_id = nzb['NZBID']
statuses.append({ release_downloads.append({
'id': nzb_id, 'id': nzb_id,
'name': item['NZBFilename'], 'name': nzb['NZBFilename'],
'status': 'completed' if item['ParStatus'] in ['SUCCESS','NONE'] and item['ScriptStatus'] in ['SUCCESS','NONE'] else 'failed', 'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': item['ParStatus'] + ', ' + item['ScriptStatus'], 'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)), 'timeleft': str(timedelta(seconds = 0)),
'folder': ss(item['DestDir']) 'folder': sp(nzb['DestDir'])
}) })
return statuses return release_downloads
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')} url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
@ -179,9 +179,9 @@ class NZBGet(Downloader):
for hist in history: for hist in history:
for param in hist['Parameters']: for param in hist['Parameters']:
if param['Name'] == 'couchpotato' and param['Value'] == item['id']: if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']:
nzb_id = hist['ID'] nzb_id = hist['ID']
path = hist['DestDir'] path = hist['DestDir']
if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]): if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
shutil.rmtree(path, True) shutil.rmtree(path, True)

34
couchpotato/core/downloaders/nzbvortex/main.py

@ -1,6 +1,6 @@
from base64 import b64encode from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, ss from couchpotato.core.helpers.encoding import tryUrlencode, sp
from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from urllib2 import URLError from urllib2 import URLError
@ -30,10 +30,10 @@ class NZBVortex(Downloader):
# Send the nzb # Send the nzb
try: try:
nzb_filename = self.createFileName(data, filedata, movie) nzb_filename = self.createFileName(data, filedata, movie)
self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True) self.call('nzb/add', params = {'file': (nzb_filename, filedata)}, multipart = True)
raw_statuses = self.call('nzb') raw_statuses = self.call('nzb')
nzb_id = [item['id'] for item in raw_statuses.get('nzbs', []) if item['name'] == nzb_filename][0] nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if nzb['name'] == nzb_filename][0]
return self.downloadReturnId(nzb_id) return self.downloadReturnId(nzb_id)
except: except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc()) log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
@ -43,33 +43,33 @@ class NZBVortex(Downloader):
raw_statuses = self.call('nzb') raw_statuses = self.call('nzb')
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
for item in raw_statuses.get('nzbs', []): for nzb in raw_statuses.get('nzbs', []):
# Check status # Check status
status = 'busy' status = 'busy'
if item['state'] == 20: if nzb['state'] == 20:
status = 'completed' status = 'completed'
elif item['state'] in [21, 22, 24]: elif nzb['state'] in [21, 22, 24]:
status = 'failed' status = 'failed'
statuses.append({ release_downloads.append({
'id': item['id'], 'id': nzb['id'],
'name': item['uiTitle'], 'name': nzb['uiTitle'],
'status': status, 'status': status,
'original_status': item['state'], 'original_status': nzb['state'],
'timeleft':-1, 'timeleft':-1,
'folder': ss(item['destinationPath']), 'folder': sp(nzb['destinationPath']),
}) })
return statuses return release_downloads
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
try: try:
self.call('nzb/%s/cancel' % item['id']) self.call('nzb/%s/cancel' % release_download['id'])
except: except:
log.error('Failed deleting: %s', traceback.format_exc(0)) log.error('Failed deleting: %s', traceback.format_exc(0))
return False return False

12
couchpotato/core/downloaders/rtorrent/__init__.py

@ -23,6 +23,8 @@ config = [{
{ {
'name': 'url', 'name': 'url',
'default': 'http://localhost:80/RPC2', 'default': 'http://localhost:80/RPC2',
'description': 'XML-RPC Endpoint URI. Usually <strong>scgi://localhost:5000</strong> '
'or <strong>http://localhost:80/RPC2</strong>'
}, },
{ {
'name': 'username', 'name': 'username',
@ -38,7 +40,7 @@ config = [{
{ {
'name': 'directory', 'name': 'directory',
'type': 'directory', 'type': 'directory',
'description': 'Directory where rtorrent should download the files too.', 'description': 'Download to this directory. Keep empty for default rTorrent download directory.',
}, },
{ {
'name': 'remove_complete', 'name': 'remove_complete',
@ -49,14 +51,6 @@ config = [{
'description': 'Remove the torrent after it finishes seeding.', 'description': 'Remove the torrent after it finishes seeding.',
}, },
{ {
'name': 'append_label',
'label': 'Append Label',
'default': False,
'advanced': True,
'type': 'bool',
'description': 'Append label to download location. Requires you to set the download location above.',
},
{
'name': 'delete_files', 'name': 'delete_files',
'label': 'Remove files', 'label': 'Remove files',
'default': True, 'default': True,

74
couchpotato/core/downloaders/rtorrent/main.py

@ -1,13 +1,13 @@
from base64 import b16encode, b32decode from base64 import b16encode, b32decode
from bencode import bencode, bdecode from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.encoding import sp
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from datetime import timedelta from datetime import timedelta
from hashlib import sha1 from hashlib import sha1
from rtorrent import RTorrent from rtorrent import RTorrent
from rtorrent.err import MethodError from rtorrent.err import MethodError
import shutil, os import os
log = CPLog(__name__) log = CPLog(__name__)
@ -71,7 +71,7 @@ class rTorrent(Downloader):
group.set_command() group.set_command()
group.disable() group.disable()
except MethodError, err: except MethodError, err:
log.error('Unable to set group options: %s', err.message) log.error('Unable to set group options: %s', err.msg)
return False return False
return True return True
@ -125,9 +125,7 @@ class rTorrent(Downloader):
if self.conf('label'): if self.conf('label'):
torrent.set_custom(1, self.conf('label')) torrent.set_custom(1, self.conf('label'))
if self.conf('directory') and self.conf('append_label'): if self.conf('directory'):
torrent.set_directory(os.path.join(self.conf('directory'), self.conf('label')))
elif self.conf('directory'):
torrent.set_directory(self.conf('directory')) torrent.set_directory(self.conf('directory'))
# Set Ratio Group # Set Ratio Group
@ -151,37 +149,42 @@ class rTorrent(Downloader):
try: try:
torrents = self.rt.get_torrents() torrents = self.rt.get_torrents()
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
for torrent in torrents:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
for item in torrents:
status = 'busy' status = 'busy'
if item.complete: if torrent.complete:
if item.active: if torrent.active:
status = 'seeding' status = 'seeding'
else: else:
status = 'completed' status = 'completed'
statuses.append({ release_downloads.append({
'id': item.info_hash, 'id': torrent.info_hash,
'name': item.name, 'name': torrent.name,
'status': status, 'status': status,
'seed_ratio': item.ratio, 'seed_ratio': torrent.ratio,
'original_status': item.state, 'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(item.left_bytes) / item.down_rate)) if item.down_rate > 0 else -1, 'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': ss(item.directory) 'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
}) })
return statuses return release_downloads
except Exception, err: except Exception, err:
log.error('Failed to get status from rTorrent: %s', err) log.error('Failed to get status from rTorrent: %s', err)
return False return False
def pause(self, download_info, pause = True): def pause(self, release_download, pause = True):
if not self.connect(): if not self.connect():
return False return False
torrent = self.rt.find_torrent(download_info['id']) torrent = self.rt.find_torrent(release_download['id'])
if torrent is None: if torrent is None:
return False return False
@ -189,23 +192,34 @@ class rTorrent(Downloader):
return torrent.pause() return torrent.pause()
return torrent.resume() return torrent.resume()
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
return self.processComplete(item, delete_files = True) return self.processComplete(release_download, delete_files = True)
def processComplete(self, item, delete_files): def processComplete(self, release_download, delete_files):
log.debug('Requesting rTorrent to remove the torrent %s%s.', log.debug('Requesting rTorrent to remove the torrent %s%s.',
(item['name'], ' and cleanup the downloaded files' if delete_files else '')) (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect(): if not self.connect():
return False return False
torrent = self.rt.find_torrent(item['id']) torrent = self.rt.find_torrent(release_download['id'])
if torrent is None: if torrent is None:
return False return False
torrent.erase() # just removes the torrent, doesn't delete data
if delete_files: if delete_files:
shutil.rmtree(item['folder'], True) for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir
os.unlink(os.path.join(torrent.directory, file_item.path))
if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
# Remove empty directories bottom up
try:
for path, _, _ in os.walk(torrent.directory, topdown = False):
os.rmdir(path)
except OSError:
log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)
torrent.erase() # just removes the torrent, doesn't delete data
return True return True

53
couchpotato/core/downloaders/sabnzbd/main.py

@ -1,11 +1,12 @@
from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, ss from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp
from couchpotato.core.helpers.variable import cleanHost, mergeDicts from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.environment import Env from couchpotato.environment import Env
from datetime import timedelta from datetime import timedelta
from urllib2 import URLError from urllib2 import URLError
import json import json
import os
import traceback import traceback
log = CPLog(__name__) log = CPLog(__name__)
@ -86,58 +87,58 @@ class Sabnzbd(Downloader):
log.error('Failed getting history json: %s', traceback.format_exc(1)) log.error('Failed getting history json: %s', traceback.format_exc(1))
return False return False
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
# Get busy releases # Get busy releases
for item in queue.get('slots', []): for nzb in queue.get('slots', []):
status = 'busy' status = 'busy'
if 'ENCRYPTED / ' in item['filename']: if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed' status = 'failed'
statuses.append({ release_downloads.append({
'id': item['nzo_id'], 'id': nzb['nzo_id'],
'name': item['filename'], 'name': nzb['filename'],
'status': status, 'status': status,
'original_status': item['status'], 'original_status': nzb['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1, 'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
}) })
# Get old releases # Get old releases
for item in history.get('slots', []): for nzb in history.get('slots', []):
status = 'busy' status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()): if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed' status = 'failed'
elif item['status'] == 'Completed': elif nzb['status'] == 'Completed':
status = 'completed' status = 'completed'
statuses.append({ release_downloads.append({
'id': item['nzo_id'], 'id': nzb['nzo_id'],
'name': item['name'], 'name': nzb['name'],
'status': status, 'status': status,
'original_status': item['status'], 'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)), 'timeleft': str(timedelta(seconds = 0)),
'folder': ss(item['storage']), 'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
}) })
return statuses return release_downloads
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
try: try:
self.call({ self.call({
'mode': 'queue', 'mode': 'queue',
'name': 'delete', 'name': 'delete',
'del_files': '1', 'del_files': '1',
'value': item['id'] 'value': release_download['id']
}, use_json = False) }, use_json = False)
self.call({ self.call({
'mode': 'history', 'mode': 'history',
'name': 'delete', 'name': 'delete',
'del_files': '1', 'del_files': '1',
'value': item['id'] 'value': release_download['id']
}, use_json = False) }, use_json = False)
except: except:
log.error('Failed deleting: %s', traceback.format_exc(0)) log.error('Failed deleting: %s', traceback.format_exc(0))
@ -145,15 +146,15 @@ class Sabnzbd(Downloader):
return True return True
def processComplete(self, item, delete_files = False): def processComplete(self, release_download, delete_files = False):
log.debug('Requesting SabNZBd to remove the NZB %s.', item['name']) log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name'])
try: try:
self.call({ self.call({
'mode': 'history', 'mode': 'history',
'name': 'delete', 'name': 'delete',
'del_files': '0', 'del_files': '0',
'value': item['id'] 'value': release_download['id']
}, use_json = False) }, use_json = False)
except: except:
log.error('Failed removing: %s', traceback.format_exc(0)) log.error('Failed removing: %s', traceback.format_exc(0))

7
couchpotato/core/downloaders/synology/main.py

@ -3,6 +3,7 @@ from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
import json import json
import requests import requests
import traceback
log = CPLog(__name__) log = CPLog(__name__)
@ -34,12 +35,12 @@ class Synology(Downloader):
elif data['protocol'] in ['nzb', 'torrent']: elif data['protocol'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['protocol']) log.info('Adding %s' % data['protocol'])
if not filedata: if not filedata:
log.error('No %s data found' % data['protocol']) log.error('No %s data found', data['protocol'])
else: else:
filename = data['name'] + '.' + data['protocol'] filename = data['name'] + '.' + data['protocol']
response = srpc.create_task(filename = filename, filedata = filedata) response = srpc.create_task(filename = filename, filedata = filedata)
except Exception, err: except:
log.error('Exception while adding torrent: %s', err) log.error('Exception while adding torrent: %s', traceback.format_exc())
finally: finally:
return response return response

60
couchpotato/core/downloaders/transmission/main.py

@ -1,9 +1,8 @@
from base64 import b64encode from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, ss from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta from datetime import timedelta
import httplib import httplib
import json import json
@ -89,10 +88,10 @@ class Transmission(Downloader):
if not self.connect(): if not self.connect():
return False return False
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
return_params = { return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit'] 'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
} }
queue = self.trpc.get_alltorrents(return_params) queue = self.trpc.get_alltorrents(return_params)
@ -100,47 +99,48 @@ class Transmission(Downloader):
log.debug('Nothing in queue or error') log.debug('Nothing in queue or error')
return False return False
for item in queue['torrents']: for torrent in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s', log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], item['isFinished'])) (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')): torrent_files = []
log.error('Renamer "from" folder doesn\'t to exist.') for file_item in torrent['files']:
return torrent_files.append(sp(os.path.join(torrent['downloadDir'], file_item['name'])))
status = 'busy' status = 'busy'
if item['isStalled'] and self.conf('stalled_as_failed'): if torrent.get('isStalled') and self.conf('stalled_as_failed'):
status = 'failed' status = 'failed'
elif item['status'] == 0 and item['percentDone'] == 1: elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed' status = 'completed'
elif item['status'] in [5, 6]: elif torrent['status'] in [5, 6]:
status = 'seeding' status = 'seeding'
statuses.append({ release_downloads.append({
'id': item['hashString'], 'id': torrent['hashString'],
'name': item['name'], 'name': torrent['name'],
'status': status, 'status': status,
'original_status': item['status'], 'original_status': torrent['status'],
'seed_ratio': item['uploadRatio'], 'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = item['eta'])), 'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': ss(os.path.join(item['downloadDir'], item['name'])), 'folder': sp(torrent['downloadDir'] if len(torrent_files) == 1 else os.path.join(torrent['downloadDir'], torrent['name'])),
'files': '|'.join(torrent_files)
}) })
return statuses return release_downloads
def pause(self, item, pause = True): def pause(self, release_download, pause = True):
if pause: if pause:
return self.trpc.stop_torrent(item['id']) return self.trpc.stop_torrent(release_download['id'])
else: else:
return self.trpc.start_torrent(item['id']) return self.trpc.start_torrent(release_download['id'])
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
return self.trpc.remove_torrent(item['id'], True) return self.trpc.remove_torrent(release_download['id'], True)
def processComplete(self, item, delete_files = False): def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else '')) log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(item['id'], delete_files) return self.trpc.remove_torrent(release_download['id'], delete_files)
class TransmissionRPC(object): class TransmissionRPC(object):

5
couchpotato/core/downloaders/utorrent/__init__.py

@ -37,6 +37,11 @@ config = [{
'description': 'Label to add torrent as.', 'description': 'Label to add torrent as.',
}, },
{ {
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default uTorrent download directory.',
},
{
'name': 'remove_complete', 'name': 'remove_complete',
'label': 'Remove torrent', 'label': 'Remove torrent',
'default': True, 'default': True,

152
couchpotato/core/downloaders/utorrent/main.py

@ -1,7 +1,7 @@
from base64 import b16encode, b32decode from base64 import b16encode, b32decode
from bencode import bencode as benc, bdecode from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, StatusList from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, ss from couchpotato.core.helpers.encoding import isInt, ss, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from datetime import timedelta from datetime import timedelta
@ -77,6 +77,7 @@ class uTorrent(Downloader):
else: else:
info = bdecode(filedata)["info"] info = bdecode(filedata)["info"]
torrent_hash = sha1(benc(info)).hexdigest().upper() torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, movie) torrent_filename = self.createFileName(data, filedata, movie)
if data.get('seed_ratio'): if data.get('seed_ratio'):
@ -91,50 +92,23 @@ class uTorrent(Downloader):
if len(torrent_hash) == 32: if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)) torrent_hash = b16encode(b32decode(torrent_hash))
# Set download directory
if self.conf('directory'):
directory = self.conf('directory')
else:
directory = False
# Send request to uTorrent # Send request to uTorrent
if data.get('protocol') == 'torrent_magnet': if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url')) self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'), directory)
else: else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata) self.utorrent_api.add_torrent_file(torrent_filename, filedata, directory)
# Change settings of added torrent # Change settings of added torrent
self.utorrent_api.set_torrent(torrent_hash, torrent_params) self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0): if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash) self.utorrent_api.pause_torrent(torrent_hash)
count = 0
while True:
count += 1
# Check if torrent is saved in subfolder of torrent name
getfiles_data = self.utorrent_api.get_files(torrent_hash)
torrent_files = json.loads(getfiles_data)
if torrent_files.get('error'):
log.error('Error getting data from uTorrent: %s', torrent_files.get('error'))
return False
if (torrent_files.get('files') and len(torrent_files['files'][1]) > 0) or count > 60:
break
time.sleep(1)
# Torrent has only one file, so uTorrent wont create a folder for it
if len(torrent_files['files'][1]) == 1:
# Remove torrent and try again
self.utorrent_api.remove_torrent(torrent_hash, remove_data = True)
# Send request to uTorrent
if data.get('protocol') == 'torrent_magnet':
self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'), add_folder = True)
else:
self.utorrent_api.add_torrent_file(torrent_filename, filedata, add_folder = True)
# Change settings of added torrent
self.utorrent_api.set_torrent(torrent_hash, torrent_params)
if self.conf('paused', default = 0):
self.utorrent_api.pause_torrent(torrent_hash)
return self.downloadReturnId(torrent_hash) return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self): def getAllDownloadStatus(self):
@ -144,7 +118,7 @@ class uTorrent(Downloader):
if not self.connect(): if not self.connect():
return False return False
statuses = StatusList(self) release_downloads = ReleaseDownloadList(self)
data = self.utorrent_api.get_status() data = self.utorrent_api.get_status()
if not data: if not data:
@ -161,52 +135,74 @@ class uTorrent(Downloader):
return False return False
# Get torrents # Get torrents
for item in queue['torrents']: for torrent in queue['torrents']:
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status_flags = {
"STARTED" : 1,
"CHECKING" : 2,
"CHECK-START" : 4,
"CHECKED" : 8,
"ERROR" : 16,
"PAUSED" : 32,
"QUEUED" : 64,
"LOADED" : 128
}
# item[21] = Paused | Downloading | Seeding | Finished
status = 'busy' status = 'busy'
if 'Finished' in item[21]: if (torrent[1] & status_flags["STARTED"] or torrent[1] & status_flags["QUEUED"]) and torrent[4] == 1000:
status = 'completed'
self.removeReadOnly(item[26])
elif 'Seeding' in item[21]:
status = 'seeding' status = 'seeding'
self.removeReadOnly(item[26]) elif (torrent[1] & status_flags["ERROR"]):
status = 'failed'
statuses.append({ elif torrent[4] == 1000:
'id': item[0], status = 'completed'
'name': item[2],
'status': status, if not status == 'busy':
'seed_ratio': float(item[7]) / 1000, self.removeReadOnly(torrent_files)
'original_status': item[1],
'timeleft': str(timedelta(seconds = item[10])), release_downloads.append({
'folder': ss(item[26]), 'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
}) })
return statuses return release_downloads
def pause(self, item, pause = True): def pause(self, release_download, pause = True):
if not self.connect(): if not self.connect():
return False return False
return self.utorrent_api.pause_torrent(item['id'], pause) return self.utorrent_api.pause_torrent(release_download['id'], pause)
def removeFailed(self, item): def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', item['name']) log.info('%s failed downloading, deleting...', release_download['name'])
if not self.connect(): if not self.connect():
return False return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = True) return self.utorrent_api.remove_torrent(release_download['id'], remove_data = True)
def processComplete(self, item, delete_files = False): def processComplete(self, release_download, delete_files = False):
log.debug('Requesting uTorrent to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else '')) log.debug('Requesting uTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
if not self.connect(): if not self.connect():
return False return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = delete_files) return self.utorrent_api.remove_torrent(release_download['id'], remove_data = delete_files)
def removeReadOnly(self, folder): def removeReadOnly(self, files):
#Removes all read-only flags in a folder #Removes all read-on ly flags in a for all files
if folder and os.path.isdir(folder): for filepath in files:
for root, folders, filenames in os.walk(folder): if os.path.isfile(filepath):
for filename in filenames: #Windows only needs S_IWRITE, but we bitwise-or with current perms to preserve other permission bits on Linux
os.chmod(os.path.join(root, filename), stat.S_IWRITE) os.chmod(filepath, stat.S_IWRITE | os.stat(filepath).st_mode)
class uTorrentAPI(object): class uTorrentAPI(object):
@ -260,13 +256,13 @@ class uTorrentAPI(object):
def add_torrent_uri(self, filename, torrent, add_folder = False): def add_torrent_uri(self, filename, torrent, add_folder = False):
action = "action=add-url&s=%s" % urllib.quote(torrent) action = "action=add-url&s=%s" % urllib.quote(torrent)
if add_folder: if add_folder:
action += "&path=%s" % urllib.quote(filename) action += "&path=%s" % urllib.quote(add_folder)
return self._request(action) return self._request(action)
def add_torrent_file(self, filename, filedata, add_folder = False): def add_torrent_file(self, filename, filedata, add_folder = False):
action = "action=add-file" action = "action=add-file"
if add_folder: if add_folder:
action += "&path=%s" % urllib.quote(filename) action += "&path=%s" % urllib.quote(add_folder)
return self._request(action, {"torrent_file": (ss(filename), filedata)}) return self._request(action, {"torrent_file": (ss(filename), filedata)})
def set_torrent(self, hash, params): def set_torrent(self, hash, params):
@ -304,13 +300,13 @@ class uTorrentAPI(object):
utorrent_settings = json.loads(self._request(action)) utorrent_settings = json.loads(self._request(action))
# Create settings dict # Create settings dict
for item in utorrent_settings['settings']: for setting in utorrent_settings['settings']:
if item[1] == 0: # int if setting[1] == 0: # int
settings_dict[item[0]] = int(item[2] if not item[2].strip() == '' else '0') settings_dict[setting[0]] = int(setting[2] if not setting[2].strip() == '' else '0')
elif item[1] == 1: # bool elif setting[1] == 1: # bool
settings_dict[item[0]] = True if item[2] == 'true' else False settings_dict[setting[0]] = True if setting[2] == 'true' else False
elif item[1] == 2: # string elif setting[1] == 2: # string
settings_dict[item[0]] = item[2] settings_dict[setting[0]] = setting[2]
#log.debug('uTorrent settings: %s', settings_dict) #log.debug('uTorrent settings: %s', settings_dict)

14
couchpotato/core/helpers/encoding.py

@ -1,6 +1,7 @@
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from string import ascii_letters, digits from string import ascii_letters, digits
from urllib import quote_plus from urllib import quote_plus
import os
import re import re
import traceback import traceback
import unicodedata import unicodedata
@ -47,6 +48,19 @@ def ss(original, *args):
log.debug('Failed ss encoding char, force UTF8: %s', e) log.debug('Failed ss encoding char, force UTF8: %s', e)
return u_original.encode('UTF-8') return u_original.encode('UTF-8')
def sp(path, *args):
# Standardise encoding, normalise case, path and strip trailing '/' or '\'
if not path or len(path) == 0:
return path
path = os.path.normcase(os.path.normpath(ss(path, *args)))
if path != os.path.sep:
path = path.rstrip(os.path.sep)
return path
def ek(original, *args): def ek(original, *args):
if isinstance(original, (str, unicode)): if isinstance(original, (str, unicode)):
try: try:

12
couchpotato/core/helpers/variable.py

@ -1,6 +1,6 @@
import collections
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
import collections
import hashlib import hashlib
import os.path import os.path
import platform import platform
@ -137,16 +137,18 @@ def getImdb(txt, check_inside = False, multiple = False):
output.close() output.close()
try: try:
ids = re.findall('(tt\d{7})', txt) ids = re.findall('(tt\d{4,7})', txt)
if multiple: if multiple:
return list(set(ids)) if len(ids) > 0 else [] return list(set(['tt%07d' % tryInt(x[2:]) for x in ids])) if len(ids) > 0 else []
return ids[0]
return 'tt%07d' % tryInt(ids[0][2:])
except IndexError: except IndexError:
pass pass
return False return False
def tryInt(s, default=0): def tryInt(s, default = 0):
try: return int(s) try: return int(s)
except: return default except: return default

2
couchpotato/core/media/__init__.py

@ -38,7 +38,7 @@ class MediaBase(Plugin):
def notifyFront(): def notifyFront():
db = get_session() db = get_session()
media = db.query(Media).filter_by(id = media_id).first() media = db.query(Media).filter_by(id = media_id).first()
fireEvent('notify.frontend', type = '%s.update.%s' % (media.type, media.id), data = media.to_dict(self.default_dict)) fireEvent('notify.frontend', type = '%s.update' % media.type, data = media.to_dict(self.default_dict))
db.expire_all() db.expire_all()
return notifyFront return notifyFront

2
couchpotato/core/media/_base/media/main.py

@ -34,7 +34,7 @@ class MediaPlugin(MediaBase):
for title in media.library.titles: for title in media.library.titles:
if title.default: default_title = title.title if title.default: default_title = title.title
fireEvent('notify.frontend', type = '%s.busy.%s' % (media.type, x), data = True) fireEvent('notify.frontend', type = '%s.busy' % media.type, data = {'id': x})
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x)) fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
db.expire_all() db.expire_all()

2
couchpotato/core/media/_base/searcher/__init__.py

@ -47,7 +47,7 @@ config = [{
{ {
'name': 'ignored_words', 'name': 'ignored_words',
'label': 'Ignored', 'label': 'Ignored',
'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs', 'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs, vain',
'description': 'Ignores releases that match any of these sets. (Works like explained above)' 'description': 'Ignores releases that match any of these sets. (Works like explained above)'
}, },
], ],

2
couchpotato/core/media/_base/searcher/main.py

@ -260,7 +260,7 @@ class Searcher(SearcherBase):
except: pass except: pass
# Match longest name between [] # Match longest name between []
try: check_names.append(max(check_name.split('['), key = len)) try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip())
except: pass except: pass
for check_name in list(set(check_names)): for check_name in list(set(check_names)):

51
couchpotato/core/media/movie/_base/main.py

@ -1,14 +1,13 @@
from couchpotato import get_session from couchpotato import get_session
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \ from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \
mergeDicts mergeDicts
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Library, LibraryTitle, Media, \ from couchpotato.core.settings.model import Library, LibraryTitle, Media, \
Release Release
from couchpotato.environment import Env
from sqlalchemy.orm import joinedload_all from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase from string import ascii_lowercase
@ -54,6 +53,7 @@ class MovieBase(MovieTypeBase):
'params': { 'params': {
'identifier': {'desc': 'IMDB id of the movie your want to add.'}, 'identifier': {'desc': 'IMDB id of the movie your want to add.'},
'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'}, 'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, 'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
} }
}) })
@ -79,34 +79,6 @@ class MovieBase(MovieTypeBase):
addEvent('movie.list', self.list) addEvent('movie.list', self.list)
addEvent('movie.restatus', self.restatus) addEvent('movie.restatus', self.restatus)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanReleases)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanReleases, hours = 4)
def cleanReleases(self):
log.debug('Removing releases from dashboard')
now = time.time()
week = 262080
done_status, available_status, snatched_status = \
fireEvent('status.get', ['done', 'available', 'snatched'], single = True)
db = get_session()
# get movies last_edit more than a week ago
movies = db.query(Media) \
.filter(Media.status_id == done_status.get('id'), Media.last_edit < (now - week)) \
.all()
for movie in movies:
for rel in movie.releases:
if rel.status_id in [available_status.get('id'), snatched_status.get('id')]:
fireEvent('release.delete', id = rel.id, single = True)
db.expire_all()
def getView(self, id = None, **kwargs): def getView(self, id = None, **kwargs):
movie = self.get(id) if id else None movie = self.get(id) if id else None
@ -343,25 +315,6 @@ class MovieBase(MovieTypeBase):
'chars': chars, 'chars': chars,
} }
def search(self, q = '', **kwargs):
cache_key = u'%s/%s' % (__name__, simplifyString(q))
movies = Env.get('cache').get(cache_key)
if not movies:
if getImdb(q):
movies = [fireEvent('movie.info', identifier = q, merge = True)]
else:
movies = fireEvent('movie.search', q = q, merge = True)
Env.get('cache').set(cache_key, movies)
return {
'success': True,
'empty': len(movies) == 0 if movies else 0,
'movies': movies,
}
def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None): def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None):
if not params: params = {} if not params: params = {}

7
couchpotato/core/media/movie/_base/static/list.js

@ -52,8 +52,8 @@ var MovieList = new Class({
self.getMovies(); self.getMovies();
App.addEvent('movie.added', self.movieAdded.bind(self)) App.on('movie.added', self.movieAdded.bind(self))
App.addEvent('movie.deleted', self.movieDeleted.bind(self)) App.on('movie.deleted', self.movieDeleted.bind(self))
}, },
movieDeleted: function(notification){ movieDeleted: function(notification){
@ -65,6 +65,7 @@ var MovieList = new Class({
movie.destroy(); movie.destroy();
delete self.movies_added[notification.data.id]; delete self.movies_added[notification.data.id];
self.setCounter(self.counter_count-1); self.setCounter(self.counter_count-1);
self.total_movies--;
} }
}) })
} }
@ -75,6 +76,7 @@ var MovieList = new Class({
movieAdded: function(notification){ movieAdded: function(notification){
var self = this; var self = this;
self.fireEvent('movieAdded', notification);
if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){ if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){
window.scroll(0,0); window.scroll(0,0);
self.createMovie(notification.data, 'top'); self.createMovie(notification.data, 'top');
@ -390,6 +392,7 @@ var MovieList = new Class({
self.movies.erase(movie); self.movies.erase(movie);
movie.destroy(); movie.destroy();
self.setCounter(self.counter_count-1); self.setCounter(self.counter_count-1);
self.total_movies--;
}); });
self.calculateSelected(); self.calculateSelected();

22
couchpotato/core/media/movie/_base/static/movie.actions.js

@ -126,7 +126,9 @@ MA.Release = new Class({
else else
self.showHelper(); self.showHelper();
App.addEvent('movie.searcher.ended.'+self.movie.data.id, function(notification){ App.on('movie.searcher.ended', function(notification){
if(self.movie.data.id != notification.data.id) return;
self.releases = null; self.releases = null;
if(self.options_container){ if(self.options_container){
self.options_container.destroy(); self.options_container.destroy();
@ -250,12 +252,14 @@ MA.Release = new Class({
else if(!self.next_release && status.identifier == 'available'){ else if(!self.next_release && status.identifier == 'available'){
self.next_release = release; self.next_release = release;
} }
var update_handle = function(notification) { var update_handle = function(notification) {
var q = self.movie.quality.getElement('.q_id' + release.quality_id), if(notification.data.id != release.id) return;
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
status = Status.get(release.status_id), status = Status.get(release.status_id),
new_status = Status.get(notification.data); new_status = Status.get(notification.data.status_id);
release.status_id = new_status.id release.status_id = new_status.id
release.el.set('class', 'item ' + new_status.identifier); release.el.set('class', 'item ' + new_status.identifier);
@ -272,7 +276,7 @@ MA.Release = new Class({
} }
} }
App.addEvent('release.update_status.' + release.id, update_handle); App.on('release.update_status', update_handle);
}); });
@ -285,7 +289,7 @@ MA.Release = new Class({
if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){ if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){
self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top'); self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top');
var nr = self.next_release, var nr = self.next_release,
lr = self.last_release; lr = self.last_release;
@ -381,7 +385,7 @@ MA.Release = new Class({
}, },
get: function(release, type){ get: function(release, type){
return release.info[type] || 'n/a' return release.info[type] !== undefined ? release.info[type] : 'n/a'
}, },
download: function(release){ download: function(release){
@ -393,7 +397,7 @@ MA.Release = new Class({
if(icon) if(icon)
icon.addClass('icon spinner').removeClass('download'); icon.addClass('icon spinner').removeClass('download');
Api.request('release.download', { Api.request('release.manual_download', {
'data': { 'data': {
'id': release.id 'id': release.id
}, },

2
couchpotato/core/media/movie/_base/static/movie.css

@ -1036,7 +1036,7 @@
text-overflow: ellipsis; text-overflow: ellipsis;
overflow: hidden; overflow: hidden;
width: 85%; width: 85%;
direction: rtl; direction: ltr;
vertical-align: middle; vertical-align: middle;
} }

83
couchpotato/core/media/movie/_base/static/movie.js

@ -23,23 +23,49 @@ var Movie = new Class({
addEvents: function(){ addEvents: function(){
var self = this; var self = this;
App.addEvent('movie.update.'+self.data.id, function(notification){ self.global_events = {}
// Do refresh with new data
self.global_events['movie.update'] = function(notification){
if(self.data.id != notification.data.id) return;
self.busy(false); self.busy(false);
self.removeView(); self.removeView();
self.update.delay(2000, self, notification); self.update.delay(2000, self, notification);
}); }
App.on('movie.update', self.global_events['movie.update']);
// Add spinner on load / search
['movie.busy', 'movie.searcher.started'].each(function(listener){ ['movie.busy', 'movie.searcher.started'].each(function(listener){
App.addEvent(listener+'.'+self.data.id, function(notification){ self.global_events[listener] = function(notification){
if(notification.data) if(notification.data && self.data.id == notification.data.id)
self.busy(true) self.busy(true)
}); }
App.on(listener, self.global_events[listener]);
}) })
App.addEvent('movie.searcher.ended.'+self.data.id, function(notification){ // Remove spinner
if(notification.data) self.global_events['movie.searcher.ended'] = function(notification){
if(notification.data && self.data.id == notification.data.id)
self.busy(false) self.busy(false)
}); }
App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
// Reload when releases have updated
self.global_events['release.update_status'] = function(notification){
var data = notification.data
if(data && self.data.id == data.movie_id){
if(!self.data.releases)
self.data.releases = [];
self.data.releases.push({'quality_id': data.quality_id, 'status_id': data.status_id});
self.updateReleases();
}
}
App.on('release.update_status', self.global_events['release.update_status']);
}, },
destroy: function(){ destroy: function(){
@ -52,9 +78,8 @@ var Movie = new Class({
self.list.checkIfEmpty(); self.list.checkIfEmpty();
// Remove events // Remove events
App.removeEvents('movie.update.'+self.data.id); self.global_events.each(function(handle, listener){
['movie.busy', 'movie.searcher.started'].each(function(listener){ App.off(listener, handle);
App.removeEvents(listener+'.'+self.data.id);
}) })
}, },
@ -179,21 +204,7 @@ var Movie = new Class({
}); });
// Add releases // Add releases
if(self.data.releases) self.updateReleases();
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
Object.each(self.options.actions, function(action, key){ Object.each(self.options.actions, function(action, key){
self.action[key.toLowerCase()] = action = new self.options.actions[key](self) self.action[key.toLowerCase()] = action = new self.options.actions[key](self)
@ -203,6 +214,26 @@ var Movie = new Class({
}, },
updateReleases: function(){
var self = this;
if(!self.data.releases || self.data.releases.length == 0) return;
self.data.releases.each(function(release){
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){
q.addClass(status.identifier);
q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label)
}
});
},
addQuality: function(quality_id){ addQuality: function(quality_id){
var self = this; var self = this;

2
couchpotato/core/media/movie/_base/static/search.js

@ -107,7 +107,7 @@ Block.Search.MovieItem = new Class({
self.options_el.empty(); self.options_el.empty();
self.options_el.adopt( self.options_el.adopt(
new Element('div.message', { new Element('div.message', {
'text': json.added ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs' 'text': json.success ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs'
}) })
); );
self.mask.fade('out'); self.mask.fade('out');

2
couchpotato/core/media/movie/library/movie/main.py

@ -154,7 +154,7 @@ class MovieLibraryPlugin(LibraryBase):
else: else:
dates = library.info.get('release_date') dates = library.info.get('release_date')
if dates and dates.get('expires', 0) < time.time() or not dates: if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
dates = fireEvent('movie.release_date', identifier = identifier, merge = True) dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
library.info.update({'release_date': dates }) library.info.update({'release_date': dates })
db.commit() db.commit()

12
couchpotato/core/media/movie/searcher/main.py

@ -117,6 +117,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
def single(self, movie, search_protocols = None, manual = False): def single(self, movie, search_protocols = None, manual = False):
# movies don't contain 'type' yet, so just set to default here
if not movie.has_key('type'):
movie['type'] = 'movie'
# Find out search type # Find out search type
try: try:
if not search_protocols: if not search_protocols:
@ -145,7 +149,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
fireEvent('movie.delete', movie['id'], single = True) fireEvent('movie.delete', movie['id'], single = True)
return return
fireEvent('notify.frontend', type = 'movie.searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title) fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'id': movie['id']}, message = 'Searching for "%s"' % default_title)
ret = False ret = False
@ -167,7 +171,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
log.info('Search for %s in %s', (default_title, quality_type['quality']['label'])) log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True) quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or []
if len(results) == 0: if len(results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label'])) log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
@ -179,7 +183,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
found_releases += fireEvent('release.create_from_search', results, movie, quality_type, single = True) found_releases += fireEvent('release.create_from_search', results, movie, quality_type, single = True)
# Try find a valid result and download it # Try find a valid result and download it
if fireEvent('searcher.try_download_result', results, movie, quality_type, manual, single = True): if fireEvent('release.try_download_result', results, movie, quality_type, manual, single = True):
ret = True ret = True
# Remove releases that aren't found anymore # Remove releases that aren't found anymore
@ -199,7 +203,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
if len(too_early_to_search) > 0: if len(too_early_to_search) > 0:
log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) log.info2('Too early to search for %s, %s', (too_early_to_search, default_title))
fireEvent('notify.frontend', type = 'movie.searcher.ended.%s' % movie['id'], data = True) fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'id': movie['id']})
return ret return ret

47
couchpotato/core/media/movie/suggestion/static/suggest.css

@ -30,10 +30,10 @@
} }
.suggestions .media_result .data .info { .suggestions .media_result .data .info {
top: 15px; top: 10px;
left: 15px; left: 15px;
right: 15px; right: 15px;
bottom: 15px; bottom: 10px;
overflow: hidden; overflow: hidden;
} }
@ -74,9 +74,45 @@
font-size: 11px; font-size: 11px;
font-style: italic; font-style: italic;
text-align: right; text-align: right;
} }
.suggestions .media_result .data .info .plot {
display: block;
font-size: 11px;
overflow: hidden;
text-align: justify;
height: 100%;
z-index: 2;
top: 64px;
position: absolute;
background: #4e5969;
cursor: pointer;
transition: all .4s ease-in-out;
padding: 0 3px 10px 0;
}
.suggestions .media_result .data:before {
bottom: 0;
content: '';
display: block;
height: 10px;
right: 0;
left: 0;
bottom: 10px;
position: absolute;
background: linear-gradient(
0deg,
rgba(78, 89, 105, 1) 0%,
rgba(78, 89, 105, 0) 100%
);
z-index: 3;
pointer-events: none;
}
.suggestions .media_result .data .info .plot.full {
top: 0;
overflow: auto;
}
.suggestions .media_result .data { .suggestions .media_result .data {
cursor: default; cursor: default;
} }
@ -102,7 +138,7 @@
.suggestions .media_result .actions { .suggestions .media_result .actions {
position: absolute; position: absolute;
bottom: 10px; top: 10px;
right: 10px; right: 10px;
display: none; display: none;
width: 140px; width: 140px;
@ -110,6 +146,9 @@
.suggestions .media_result:hover .actions { .suggestions .media_result:hover .actions {
display: block; display: block;
} }
.suggestions .media_result:hover h2 .title {
opacity: 0;
}
.suggestions .media_result .data.open .actions { .suggestions .media_result .data.open .actions {
display: none; display: none;
} }

12
couchpotato/core/media/movie/suggestion/static/suggest.js

@ -95,6 +95,10 @@ var SuggestList = new Class({
); );
m.data_container.removeEvents('click'); m.data_container.removeEvents('click');
var plot = false;
if(m.info.plot && m.info.plot.length > 0)
plot = m.info.plot;
// Add rating // Add rating
m.info_container.adopt( m.info_container.adopt(
m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', { m.rating = m.info.rating && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', {
@ -103,6 +107,14 @@ var SuggestList = new Class({
}) : null, }) : null,
m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', { m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', {
'text': m.info.genres.slice(0, 3).join(', ') 'text': m.info.genres.slice(0, 3).join(', ')
}) : null,
m.plot = plot ? new Element('span.plot', {
'text': plot,
'events': {
'click': function(){
this.toggleClass('full')
}
}
}) : null }) : null
) )

2
couchpotato/core/notifications/base.py

@ -17,7 +17,7 @@ class Notification(Provider):
listen_to = [ listen_to = [
'renamer.after', 'movie.snatched', 'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated', 'updater.available', 'updater.updated',
'core.message', 'core.message.important',
] ]
dont_listen_to = [] dont_listen_to = []

11
couchpotato/core/notifications/core/main.py

@ -21,6 +21,12 @@ class CoreNotifier(Notification):
m_lock = None m_lock = None
listen_to = [
'renamer.after', 'movie.snatched',
'updater.available', 'updater.updated',
'core.message', 'core.message.important',
]
def __init__(self): def __init__(self):
super(CoreNotifier, self).__init__() super(CoreNotifier, self).__init__()
@ -121,7 +127,10 @@ class CoreNotifier(Notification):
for message in messages: for message in messages:
if message.get('time') > last_check: if message.get('time') > last_check:
fireEvent('core.message', message = message.get('message'), data = message) message['sticky'] = True # Always sticky core messages
message_type = 'core.message.important' if message.get('important') else 'core.message'
fireEvent(message_type, message = message.get('message'), data = message)
if last_check < message.get('time'): if last_check < message.get('time'):
last_check = message.get('time') last_check = message.get('time')

10
couchpotato/core/notifications/core/static/notification.js

@ -10,8 +10,8 @@ var NotificationBase = new Class({
// Listener // Listener
App.addEvent('unload', self.stopPoll.bind(self)); App.addEvent('unload', self.stopPoll.bind(self));
App.addEvent('reload', self.startInterval.bind(self, [true])); App.addEvent('reload', self.startInterval.bind(self, [true]));
App.addEvent('notification', self.notify.bind(self)); App.on('notification', self.notify.bind(self));
App.addEvent('message', self.showMessage.bind(self)); App.on('message', self.showMessage.bind(self));
// Add test buttons to settings page // Add test buttons to settings page
App.addEvent('load', self.addTestButtons.bind(self)); App.addEvent('load', self.addTestButtons.bind(self));
@ -50,9 +50,9 @@ var NotificationBase = new Class({
, 'top'); , 'top');
self.notifications.include(result); self.notifications.include(result);
if(result.data.important !== undefined && !result.read){ if((result.data.important !== undefined || result.data.sticky !== undefined) && !result.read){
var sticky = true var sticky = true
App.fireEvent('message', [result.message, sticky, result]) App.trigger('message', [result.message, sticky, result])
} }
else if(!result.read){ else if(!result.read){
self.setBadge(self.notifications.filter(function(n){ return !n.read}).length) self.setBadge(self.notifications.filter(function(n){ return !n.read}).length)
@ -147,7 +147,7 @@ var NotificationBase = new Class({
// Process data // Process data
if(json){ if(json){
Array.each(json.result, function(result){ Array.each(json.result, function(result){
App.fireEvent(result.type, result); App.trigger(result.type, result);
if(result.message && result.read === undefined) if(result.message && result.read === undefined)
self.showMessage(result.message); self.showMessage(result.message);
}) })

11
couchpotato/core/notifications/email/__init__.py

@ -28,6 +28,11 @@ config = [{
'name': 'smtp_server', 'name': 'smtp_server',
'label': 'SMTP server', 'label': 'SMTP server',
}, },
{ 'name': 'smtp_port',
'label': 'SMTP server port',
'default': '25',
'type': 'int',
},
{ {
'name': 'ssl', 'name': 'ssl',
'label': 'Enable SSL', 'label': 'Enable SSL',
@ -35,6 +40,12 @@ config = [{
'type': 'bool', 'type': 'bool',
}, },
{ {
'name': 'starttls',
'label': 'Enable StartTLS',
'default': 0,
'type': 'bool',
},
{
'name': 'smtp_user', 'name': 'smtp_user',
'label': 'SMTP user', 'label': 'SMTP user',
}, },

13
couchpotato/core/notifications/email/main.py

@ -4,6 +4,7 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env from couchpotato.environment import Env
from email.mime.text import MIMEText from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
import smtplib import smtplib
import traceback import traceback
@ -22,18 +23,30 @@ class Email(Notification):
smtp_server = self.conf('smtp_server') smtp_server = self.conf('smtp_server')
smtp_user = self.conf('smtp_user') smtp_user = self.conf('smtp_user')
smtp_pass = self.conf('smtp_pass') smtp_pass = self.conf('smtp_pass')
smtp_port = self.conf('smtp_port')
starttls = self.conf('starttls')
# Make the basic message # Make the basic message
message = MIMEText(toUnicode(message), _charset = Env.get('encoding')) message = MIMEText(toUnicode(message), _charset = Env.get('encoding'))
message['Subject'] = self.default_title message['Subject'] = self.default_title
message['From'] = from_address message['From'] = from_address
message['To'] = to_address message['To'] = to_address
message['Date'] = formatdate(localtime = 1)
message['Message-ID'] = make_msgid()
try: try:
# Open the SMTP connection, via SSL if requested # Open the SMTP connection, via SSL if requested
log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port))
log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled")) log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled"))
mailserver = smtplib.SMTP_SSL(smtp_server) if ssl == 1 else smtplib.SMTP(smtp_server) mailserver = smtplib.SMTP_SSL(smtp_server) if ssl == 1 else smtplib.SMTP(smtp_server)
if (starttls):
log.debug("Using StartTLS to initiate the connection with the SMTP server")
mailserver.starttls()
# Say hello to the server
mailserver.ehlo()
# Check too see if an login attempt should be attempted # Check too see if an login attempt should be attempted
if len(smtp_user) > 0: if len(smtp_user) > 0:
log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else "")) log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else ""))

39
couchpotato/core/notifications/notifo/main.py

@ -1,39 +0,0 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import base64
import json
import traceback
log = CPLog(__name__)
class Notifo(Notification):
url = 'https://api.notifo.com/v1/send_notification'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
params = {
'label': self.default_title,
'msg': toUnicode(message),
}
headers = {
'Authorization': "Basic %s" % base64.encodestring('%s:%s' % (self.conf('username'), self.conf('api_key')))[:-1]
}
handle = self.urlopen(self.url, params = params, headers = headers)
result = json.loads(handle)
if result['status'] != 'success' or result['response_message'] != 'OK':
raise Exception
except:
log.error('Notification failed: %s', traceback.format_exc())
return False
log.info('Notifo notification successful.')
return True

85
couchpotato/core/notifications/plex/client.py

@ -0,0 +1,85 @@
import json
from couchpotato import CPLog
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
import requests
log = CPLog(__name__)
class PlexClientProtocol(object):
def __init__(self, plex):
self.plex = plex
addEvent('notify.plex.notifyClient', self.notify)
def notify(self, client, message):
raise NotImplementedError()
class PlexClientHTTP(PlexClientProtocol):
def request(self, command, client):
url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % (
client['address'],
client['port'],
tryUrlencode(command)
)
headers = {}
try:
self.plex.urlopen(url, headers = headers, timeout = 3, show_error = False)
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
return True
def notify(self, client, message):
if client.get('protocol') != 'xbmchttp':
return None
data = {
'command': 'ExecBuiltIn',
'parameter': 'Notification(CouchPotato, %s)' % message
}
return self.request(data, client)
class PlexClientJSON(PlexClientProtocol):
def request(self, method, params, client):
log.debug('sendJSON("%s", %s, %s)', (method, params, client))
url = 'http://%s:%s/jsonrpc' % (
client['address'],
client['port']
)
headers = {
'Content-Type': 'application/json'
}
request = {
'id': 1,
'jsonrpc': '2.0',
'method': method,
'params': params
}
try:
requests.post(url, headers = headers, timeout = 3, data = json.dumps(request))
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
return True
def notify(self, client, message):
if client.get('protocol') not in ['xbmcjson', 'plex']:
return None
params = {
'title': 'CouchPotato',
'message': message
}
return self.request('GUI.ShowNotification', params, client)

205
couchpotato/core/notifications/plex/main.py

@ -1,183 +1,64 @@
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification from couchpotato.core.notifications.base import Notification
from datetime import datetime from .client import PlexClientHTTP, PlexClientJSON
from urlparse import urlparse from .server import PlexServer
from xml.dom import minidom
import json
import requests
import traceback
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__) log = CPLog(__name__)
class Plex(Notification): class Plex(Notification):
client_update_time = 5 * 60
http_time_between_calls = 0 http_time_between_calls = 0
def __init__(self): def __init__(self):
super(Plex, self).__init__() super(Plex, self).__init__()
self.clients = {} self.server = PlexServer(self)
self.clients_updated = None
addEvent('renamer.after', self.addToLibrary) self.client_protocols = {
'http': PlexClientHTTP(self),
def updateClients(self, force = False): 'json': PlexClientJSON(self)
if not self.conf('media_server'): }
log.warning("Plex media server hostname is required")
return
since_update = ((datetime.now() - self.clients_updated).total_seconds())\
if self.clients_updated is not None else None
if force or self.clients_updated is None or since_update > self.client_update_time:
self.clients = {}
data = self.urlopen('%s/clients' % self.createHost(self.conf('media_server'), port = 32400))
client_result = etree.fromstring(data)
clients = [x.strip().lower() for x in self.conf('clients').split(',')]
for server in client_result.findall('Server'):
if server.get('name').lower() in clients:
clients.remove(server.get('name').lower())
protocol = server.get('protocol', 'xbmchttp')
if protocol in ['plex', 'xbmcjson', 'xbmchttp']:
self.clients[server.get('name')] = {
'name': server.get('name'),
'address': server.get('address'),
'port': server.get('port'),
'protocol': protocol
}
if len(clients) > 0:
log.info2('Unable to find plex clients: %s', ', '.join(clients))
log.info2('Found hosts: %s', ', '.join(self.clients.keys()))
self.clients_updated = datetime.now() addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = {}): def addToLibrary(self, message = None, group = {}):
if self.isDisabled(): return if self.isDisabled(): return
log.info('Sending notification to Plex') return self.server.refresh()
source_type = ['movie']
base_url = '%s/library/sections' % self.createHost(self.conf('media_server'), port = 32400)
refresh_url = '%s/%%s/refresh' % base_url
try:
sections_xml = self.urlopen(base_url)
xml_sections = minidom.parseString(sections_xml)
sections = xml_sections.getElementsByTagName('Directory')
for s in sections:
if s.getAttribute('type') in source_type:
url = refresh_url % s.getAttribute('key')
x = self.urlopen(url)
except:
log.error('Plex library update failed for %s, Media Server not running: %s',
(self.conf('media_server'), traceback.format_exc(1)))
return False
return True
def sendHTTP(self, command, client):
url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % (
client['address'],
client['port'],
tryUrlencode(command)
)
headers = {}
try:
self.urlopen(url, headers = headers, timeout = 3, show_error = False)
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
return True
def notifyHTTP(self, message = '', data = {}, listener = None):
total = 0
successful = 0
data = {
'command': 'ExecBuiltIn',
'parameter': 'Notification(CouchPotato, %s)' % message
}
for name, client in self.clients.items(): def getClientNames(self):
if client['protocol'] == 'xbmchttp': return [
total += 1 x.strip().lower()
if self.sendHTTP(data, client): for x in self.conf('clients').split(',')
successful += 1 ]
return successful == total def notifyClients(self, message, client_names):
success = True
def sendJSON(self, method, params, client): for client_name in client_names:
log.debug('sendJSON("%s", %s, %s)', (method, params, client))
url = 'http://%s:%s/jsonrpc' % (
client['address'],
client['port']
)
headers = { client_success = False
'Content-Type': 'application/json' client = self.server.clients.get(client_name)
}
request = { if client and client['found']:
'id':1, client_success = fireEvent('notify.plex.notifyClient', client, message, single = True)
'jsonrpc': '2.0',
'method': method,
'params': params
}
try: if not client_success:
requests.post(url, headers = headers, timeout = 3, data = json.dumps(request)) if self.server.staleClients() or not client:
except Exception, err: log.info('Failed to send notification to client "%s". '
log.error("Couldn't sent command to Plex: %s", err) 'Client list is stale, updating the client list and retrying.', client_name)
return False self.server.updateClients(self.getClientNames())
else:
log.warning('Failed to send notification to client %s, skipping this time', client_name)
success = False
return True return success
def notifyJSON(self, message = '', data = {}, listener = None): def notify(self, message = '', data = {}, listener = None):
total = 0 return self.notifyClients(message, self.getClientNames())
successful = 0
params = {
'title': 'CouchPotato',
'message': message
}
for name, client in self.clients.items():
if client['protocol'] in ['xbmcjson', 'plex']:
total += 1
if self.sendJSON('GUI.ShowNotification', params, client):
successful += 1
return successful == total
def notify(self, message = '', data = {}, listener = None, force = False):
self.updateClients(force)
http_result = self.notifyHTTP(message, data, listener)
json_result = self.notifyJSON(message, data, listener)
return http_result and json_result
def test(self, **kwargs): def test(self, **kwargs):
@ -185,24 +66,12 @@ class Plex(Notification):
log.info('Sending test to %s', test_type) log.info('Sending test to %s', test_type)
success = self.notify( notify_success = self.notify(
message = self.test_message, message = self.test_message,
data = {}, data = {},
listener = 'test', listener = 'test'
force = True
) )
success2 = self.addToLibrary()
return {
'success': success or success2
}
def createHost(self, host, port = None):
h = cleanHost(host) refresh_success = self.addToLibrary()
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
return h return {'success': notify_success or refresh_success}

114
couchpotato/core/notifications/plex/server.py

@ -0,0 +1,114 @@
from datetime import timedelta, datetime
from couchpotato.core.helpers.variable import cleanHost
from couchpotato import CPLog
from urlparse import urlparse
import traceback
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
class PlexServer(object):
def __init__(self, plex):
self.plex = plex
self.clients = {}
self.last_clients_update = None
def staleClients(self):
if not self.last_clients_update:
return True
return self.last_clients_update + timedelta(minutes=15) < datetime.now()
def request(self, path, data_type='xml'):
if not self.plex.conf('media_server'):
log.warning("Plex media server hostname is required")
return None
if path.startswith('/'):
path = path[1:]
data = self.plex.urlopen('%s/%s' % (
self.createHost(self.plex.conf('media_server'), port = 32400),
path
))
if data_type == 'xml':
return etree.fromstring(data)
else:
return data
def updateClients(self, client_names):
log.info('Searching for clients on Plex Media Server')
self.clients = {}
result = self.request('clients')
if not result:
return
found_clients = [
c for c in result.findall('Server')
if c.get('name') and c.get('name').lower() in client_names
]
# Store client details in cache
for client in found_clients:
name = client.get('name').lower()
self.clients[name] = {
'name': client.get('name'),
'found': True,
'address': client.get('address'),
'port': client.get('port'),
'protocol': client.get('protocol', 'xbmchttp')
}
client_names.remove(name)
# Store dummy info for missing clients
for client_name in client_names:
self.clients[client_name] = {
'found': False
}
if len(client_names) > 0:
log.debug('Unable to find clients: %s', ', '.join(client_names))
self.last_clients_update = datetime.now()
def refresh(self, section_types=None):
if not section_types:
section_types = ['movie']
sections = self.request('library/sections')
try:
for section in sections.findall('Directory'):
if section.get('type') not in section_types:
continue
self.request('library/sections/%s/refresh' % section.get('key'), 'text')
except:
log.error('Plex library update failed for %s, Media Server not running: %s',
(self.plex.conf('media_server'), traceback.format_exc(1)))
return False
return True
def createHost(self, host, port = None):
h = cleanHost(host)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
return h

17
couchpotato/core/notifications/notifo/__init__.py → couchpotato/core/notifications/pushbullet/__init__.py

@ -1,16 +1,15 @@
from .main import Notifo from .main import Pushbullet
def start(): def start():
return Notifo() return Pushbullet()
config = [{ config = [{
'name': 'notifo', 'name': 'pushbullet',
'groups': [ 'groups': [
{ {
'tab': 'notifications', 'tab': 'notifications',
'list': 'notification_providers', 'list': 'notification_providers',
'name': 'notifo', 'name': 'pushbullet',
'description': 'Keep in mind that Notifo service will end soon.',
'options': [ 'options': [
{ {
'name': 'enabled', 'name': 'enabled',
@ -18,10 +17,14 @@ config = [{
'type': 'enabler', 'type': 'enabler',
}, },
{ {
'name': 'username', 'name': 'api_key',
'label': 'User API Key'
}, },
{ {
'name': 'api_key', 'name': 'devices',
'default': '',
'advanced': True,
'description': 'IDs of devices to send notifications to, empty = all devices'
}, },
{ {
'name': 'on_snatch', 'name': 'on_snatch',

86
couchpotato/core/notifications/pushbullet/main.py

@ -0,0 +1,86 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import base64
import json
log = CPLog(__name__)
class Pushbullet(Notification):
url = 'https://api.pushbullet.com/api/%s'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
devices = self.getDevices()
if devices is None:
return False
# Get all the device IDs linked to this user
if not len(devices):
response = self.request('devices')
if not response:
return False
devices += [device.get('id') for device in response['devices']]
successful = 0
for device in devices:
response = self.request(
'pushes',
cache = False,
device_id = device,
type = 'note',
title = self.default_title,
body = toUnicode(message)
)
if response:
successful += 1
else:
log.error('Unable to push notification to Pushbullet device with ID %s' % device)
return successful == len(devices)
def getDevices(self):
devices = [d.strip() for d in self.conf('devices').split(',')]
# Remove empty items
devices = [d for d in devices if len(d)]
# Break on any ids that aren't integers
valid_devices = []
for device_id in devices:
d = tryInt(device_id, None)
if not d:
log.error('Device ID "%s" is not valid', device_id)
return None
valid_devices.append(d)
return valid_devices
def request(self, method, cache = True, **kwargs):
try:
base64string = base64.encodestring('%s:' % self.conf('api_key'))[:-1]
headers = {
"Authorization": "Basic %s" % base64string
}
if cache:
return self.getJsonData(self.url % method, headers = headers, params = kwargs)
else:
data = self.urlopen(self.url % method, headers = headers, params = kwargs)
return json.loads(data)
except Exception, ex:
log.error('Pushbullet request failed')
log.debug(ex)
return None

52
couchpotato/core/notifications/xmpp/__init__.py

@ -0,0 +1,52 @@
from .main import Xmpp
def start():
return Xmpp()
config = [{
'name': 'xmpp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'xmpp',
'label': 'XMPP',
'description`': 'for Jabber, Hangouts (Google Talk), AIM...',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'username',
'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.',
},
{
'name': 'password',
'type': 'Password',
},
{
'name': 'hostname',
'default': 'talk.google.com',
},
{
'name': 'to',
'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.',
},
{
'name': 'port',
'type': 'int',
'default': 5222,
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

43
couchpotato/core/notifications/xmpp/main.py

@ -0,0 +1,43 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from time import sleep
import traceback
import xmpp
log = CPLog(__name__)
class Xmpp(Notification):
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
try:
jid = xmpp.protocol.JID(self.conf('username'))
client = xmpp.Client(jid.getDomain(), debug = [])
# Connect
if not client.connect(server = (self.conf('hostname'), self.conf('port'))):
log.error('XMPP failed: Connection to server failed.')
return False
# Authenticate
if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()):
log.error('XMPP failed: Failed to authenticate.')
return False
# Send message
client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat'))
# Disconnect
# some older servers will not send the message if you disconnect immediately after sending
sleep(1)
client.disconnect()
log.info('XMPP notifications sent.')
return True
except:
log.error('XMPP failed: %s', traceback.format_exc())
return False

6
couchpotato/core/plugins/base.py

@ -1,7 +1,7 @@
from StringIO import StringIO from StringIO import StringIO
from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \ from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \
toUnicode toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP from couchpotato.core.helpers.variable import getExt, md5, isLocalIP
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.environment import Env from couchpotato.environment import Env
@ -291,10 +291,10 @@ class Plugin(object):
def createNzbName(self, data, movie): def createNzbName(self, data, movie):
tag = self.cpTag(movie) tag = self.cpTag(movie)
return '%s%s' % (toSafeString(data.get('name')[:127 - len(tag)]), tag) return '%s%s' % (toSafeString(toUnicode(data.get('name'))[:127 - len(tag)]), tag)
def createFileName(self, data, filedata, movie): def createFileName(self, data, filedata, movie):
name = os.path.join(self.createNzbName(data, movie)) name = sp(os.path.join(self.createNzbName(data, movie)))
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata: if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar') return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol')) return '%s.%s' % (name, data.get('protocol'))

1
couchpotato/core/plugins/manage/main.py

@ -79,6 +79,7 @@ class Manage(Plugin):
try: try:
directories = self.directories() directories = self.directories()
directories.sort()
added_identifiers = [] added_identifiers = []
# Add some progress # Add some progress

167
couchpotato/core/plugins/quality/main.py

@ -2,7 +2,7 @@ from couchpotato import get_session
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode, ss from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt from couchpotato.core.helpers.variable import mergeDicts, getExt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Quality, Profile, ProfileType from couchpotato.core.settings.model import Quality, Profile, ProfileType
@ -38,6 +38,9 @@ class QualityPlugin(Plugin):
] ]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr'] pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
cached_qualities = None
cached_order = None
def __init__(self): def __init__(self):
addEvent('quality.all', self.all) addEvent('quality.all', self.all)
addEvent('quality.single', self.single) addEvent('quality.single', self.single)
@ -55,6 +58,8 @@ class QualityPlugin(Plugin):
addEvent('app.initialize', self.fill, priority = 10) addEvent('app.initialize', self.fill, priority = 10)
addEvent('app.test', self.doTest)
def preReleases(self): def preReleases(self):
return self.pre_releases return self.pre_releases
@ -67,6 +72,9 @@ class QualityPlugin(Plugin):
def all(self): def all(self):
if self.cached_qualities:
return self.cached_qualities
db = get_session() db = get_session()
qualities = db.query(Quality).all() qualities = db.query(Quality).all()
@ -76,6 +84,7 @@ class QualityPlugin(Plugin):
q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict()) q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict())
temp.append(q) temp.append(q)
self.cached_qualities = temp
return temp return temp
def single(self, identifier = ''): def single(self, identifier = ''):
@ -104,6 +113,8 @@ class QualityPlugin(Plugin):
setattr(quality, kwargs.get('value_type'), kwargs.get('value')) setattr(quality, kwargs.get('value_type'), kwargs.get('value'))
db.commit() db.commit()
self.cached_qualities = None
return { return {
'success': True 'success': True
} }
@ -164,77 +175,149 @@ class QualityPlugin(Plugin):
if not extra: extra = {} if not extra: extra = {}
# Create hash for cache # Create hash for cache
cache_key = md5(str([f.replace('.' + getExt(f), '') for f in files])) cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files])
cached = self.getCache(cache_key) cached = self.getCache(cache_key)
if cached and len(extra) == 0: return cached if cached and len(extra) == 0:
return cached
qualities = self.all() qualities = self.all()
# Start with 0
score = {}
for quality in qualities:
score[quality.get('identifier')] = 0
for cur_file in files: for cur_file in files:
words = re.split('\W+', cur_file.lower()) words = re.split('\W+', cur_file.lower())
found = {}
for quality in qualities: for quality in qualities:
contains = self.containsTag(quality, words, cur_file) contains_score = self.containsTagScore(quality, words, cur_file)
if contains: self.calcScore(score, quality, contains_score)
found[quality['identifier']] = True
for quality in qualities: # Try again with loose testing
for quality in qualities:
loose_score = self.guessLooseScore(quality, files = files, extra = extra)
self.calcScore(score, quality, loose_score)
# Check identifier
if quality['identifier'] in words:
if len(found) == 0 or len(found) == 1 and found.get(quality['identifier']):
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(cache_key, quality)
# Check alt and tags # Return nothing if all scores are 0
contains = self.containsTag(quality, words, cur_file) has_non_zero = 0
if contains: for s in score:
return self.setCache(cache_key, quality) if score[s] > 0:
has_non_zero += 1
# Try again with loose testing if not has_non_zero:
quality = self.guessLoose(cache_key, files = files, extra = extra) return None
if quality:
return self.setCache(cache_key, quality) heighest_quality = max(score, key = score.get)
if heighest_quality:
for quality in qualities:
if quality.get('identifier') == heighest_quality:
return self.setCache(cache_key, quality)
log.debug('Could not identify quality for: %s', files)
return None return None
def containsTag(self, quality, words, cur_file = ''): def containsTagScore(self, quality, words, cur_file = ''):
cur_file = ss(cur_file) cur_file = ss(cur_file)
score = 0
points = {
'identifier': 10,
'label': 10,
'alternative': 9,
'tags': 9,
'ext': 3,
}
# Check alt and tags # Check alt and tags
for tag_type in ['alternative', 'tags', 'label']: for tag_type in ['identifier', 'alternative', 'tags', 'label']:
qualities = quality.get(tag_type, []) qualities = quality.get(tag_type, [])
qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities
for alt in qualities: for alt in qualities:
if (isinstance(alt, tuple) and '.'.join(alt) in '.'.join(words)) or (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()): if (isinstance(alt, tuple)):
if len(set(words) & set(alt)) == len(alt):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
score += points.get(tag_type)
if (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True score += points.get(tag_type) / 2
if list(set(qualities) & set(words)): if list(set(qualities) & set(words)):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True score += points.get(tag_type)
return # Check extention
for ext in quality.get('ext', []):
if ext == words[-1]:
log.debug('Found %s extension in %s', (ext, cur_file))
score += points['ext']
def guessLoose(self, cache_key, files = None, extra = None): return score
def guessLooseScore(self, quality, files = None, extra = None):
score = 0
if extra: if extra:
for quality in self.all():
# Check width resolution, range 20 # Check width resolution, range 20
if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20): if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0))) log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0)))
return self.setCache(cache_key, quality) score += 5
# Check height resolution, range 20 # Check height resolution, range 20
if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20): if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0))) log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0)))
return self.setCache(cache_key, quality) score += 5
if quality.get('identifier') == 'dvdrip' and 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Add point for correct dvdrip resolutions')
score += 1
return score
def calcScore(self, score, quality, add_score):
score[quality['identifier']] += add_score
# Set order for allow calculation (and cache)
if not self.cached_order:
self.cached_order = {}
for q in self.qualities:
self.cached_order[q.get('identifier')] = self.qualities.index(q)
if add_score != 0:
for allow in quality.get('allow', []):
score[allow] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5
def doTest(self):
tests = {
'Movie Name (1999)-DVD-Rip.avi': 'dvdrip',
'Movie Name 1999 720p Bluray.mkv': '720p',
'Movie Name 1999 BR-Rip 720p.avi': 'brrip',
'Movie Name 1999 720p Web Rip.avi': 'scr',
'Movie Name 1999 Web DL.avi': 'brrip',
'Movie.Name.1999.1080p.WEBRip.H264-Group': 'scr',
'Movie.Name.1999.DVDRip-Group': 'dvdrip',
'Movie.Name.1999.DVD-Rip-Group': 'dvdrip',
'Movie.Name.1999.DVD-R-Group': 'dvdr',
}
correct = 0
for name in tests:
success = self.guess([name]).get('identifier') == tests[name]
if not success:
log.error('%s failed check, thinks it\'s %s', (name, self.guess([name]).get('identifier')))
correct += success
if correct == len(tests):
log.info('Quality test successful')
return True
else:
log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests)))
if 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Found as dvdrip')
return self.setCache(cache_key, self.single('dvdrip'))
return None

174
couchpotato/core/plugins/release/main.py

@ -2,10 +2,14 @@ from couchpotato import get_session, md5
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toUnicode from couchpotato.core.helpers.encoding import ss, toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.scanner.main import Scanner from couchpotato.core.plugins.scanner.main import Scanner
from couchpotato.core.settings.model import File, Release as Relea, Media, ReleaseInfo from couchpotato.core.settings.model import File, Release as Relea, Media, \
ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError from sqlalchemy.exc import InterfaceError
from sqlalchemy.orm import joinedload_all from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import and_, or_ from sqlalchemy.sql.expression import and_, or_
@ -21,7 +25,7 @@ class Release(Plugin):
def __init__(self): def __init__(self):
addEvent('release.add', self.add) addEvent('release.add', self.add)
addApiView('release.download', self.download, docs = { addApiView('release.manual_download', self.manualDownload, docs = {
'desc': 'Send a release manually to the downloaders', 'desc': 'Send a release manually to the downloaders',
'params': { 'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
@ -46,12 +50,46 @@ class Release(Plugin):
} }
}) })
addEvent('release.download', self.download)
addEvent('release.try_download_result', self.tryDownloadResult)
addEvent('release.create_from_search', self.createFromSearch) addEvent('release.create_from_search', self.createFromSearch)
addEvent('release.for_movie', self.forMovie) addEvent('release.for_movie', self.forMovie)
addEvent('release.delete', self.delete) addEvent('release.delete', self.delete)
addEvent('release.clean', self.clean) addEvent('release.clean', self.clean)
addEvent('release.update_status', self.updateStatus) addEvent('release.update_status', self.updateStatus)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanDone)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 4)
def cleanDone(self):
log.debug('Removing releases from dashboard')
now = time.time()
week = 262080
done_status, available_status, snatched_status, downloaded_status, ignored_status = \
fireEvent('status.get', ['done', 'available', 'snatched', 'downloaded', 'ignored'], single = True)
db = get_session()
# get movies last_edit more than a week ago
media = db.query(Media) \
.filter(Media.status_id == done_status.get('id'), Media.last_edit < (now - week)) \
.all()
for item in media:
for rel in item.releases:
# Remove all available releases
if rel.status_id in [available_status.get('id')]:
fireEvent('release.delete', id = rel.id, single = True)
# Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the move
elif rel.status_id in [snatched_status.get('id'), downloaded_status.get('id')]:
self.updateStatus(id = rel.id, status = ignored_status)
db.expire_all()
def add(self, group): def add(self, group):
db = get_session() db = get_session()
@ -108,7 +146,6 @@ class Release(Plugin):
return True return True
def saveFile(self, filepath, type = 'unknown', include_media_info = False): def saveFile(self, filepath, type = 'unknown', include_media_info = False):
properties = {} properties = {}
@ -169,19 +206,17 @@ class Release(Plugin):
'success': True 'success': True
} }
def download(self, id = None, **kwargs): def manualDownload(self, id = None, **kwargs):
db = get_session() db = get_session()
snatched_status, done_status = fireEvent('status.get', ['snatched', 'done'], single = True)
rel = db.query(Relea).filter_by(id = id).first() rel = db.query(Relea).filter_by(id = id).first()
if rel: if rel:
item = {} item = {}
for info in rel.info: for info in rel.info:
item[info.identifier] = info.value item[info.identifier] = info.value
fireEvent('notify.frontend', type = 'release.download', data = True, message = 'Snatching "%s"' % item['name']) fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
# Get matching provider # Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
@ -193,18 +228,18 @@ class Release(Plugin):
if item.get('protocol') != 'torrent_magnet': if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
success = fireEvent('searcher.download', data = item, media = rel.media.to_dict({ success = self.download(data = item, media = rel.movie.to_dict({
'profile': {'types': {'quality': {}}}, 'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}}, 'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}}, 'library': {'titles': {}, 'files':{}},
'files': {} 'files': {}
}), manual = True, single = True) }), manual = True)
if success: if success:
db.expunge_all() db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again?? rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
fireEvent('notify.frontend', type = 'release.download', data = True, message = 'Successfully snatched "%s"' % item['name']) fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return { return {
'success': success 'success': success
} }
@ -215,9 +250,108 @@ class Release(Plugin):
'success': False 'success': False
} }
def download(self, data, media, manual = False):
if not data.get('protocol'):
data['protocol'] = data['type']
data['type'] = 'movie'
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if downloader_enabled:
snatched_status, done_status, active_status = fireEvent('status.get', ['snatched', 'done', 'active'], single = True)
# Download release to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = media, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark media done
if not renamer_enabled:
try:
if media['status_id'] == active_status.get('id'):
for profile_type in media['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls.id, status = done_status)
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking media finished, renamer disabled: %s', traceback.format_exc())
else:
self.updateStatus(rls.id, status = snatched_status)
except:
log.error('Failed marking media finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol')))
return False
def tryDownloadResult(self, results, media, quality_type, manual = False):
ignored_status, failed_status = fireEvent('status.get', ['ignored', 'failed'], single = True)
for rel in results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and rel.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), rel['name']))
continue
if rel['status_id'] in [ignored_status.get('id'), failed_status.get('id')]:
log.info('Ignored: %s', rel['name'])
continue
if rel['score'] <= 0:
log.info('Ignored, score to low: %s', rel['name'])
continue
downloaded = fireEvent('release.download', data = rel, media = media, manual = manual, single = True)
if downloaded is True:
return True
elif downloaded != 'try_next':
break
return False
def createFromSearch(self, search_results, media, quality_type): def createFromSearch(self, search_results, media, quality_type):
available_status, ignored_status, failed_status = fireEvent('status.get', ['available', 'ignored', 'failed'], single = True) available_status = fireEvent('status.get', ['available'], single = True)
db = get_session() db = get_session()
found_releases = [] found_releases = []
@ -231,7 +365,8 @@ class Release(Plugin):
if not rls: if not rls:
rls = Relea( rls = Relea(
identifier = rel_identifier, identifier = rel_identifier,
media_id = media.get('id'), movie_id = media.get('id'),
#media_id = media.get('id'),
quality_id = quality_type.get('quality_id'), quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id') status_id = available_status.get('id')
) )
@ -286,7 +421,7 @@ class Release(Plugin):
} }
def updateStatus(self, id, status = None): def updateStatus(self, id, status = None):
if not status: return if not status: return False
db = get_session() db = get_session()
@ -297,11 +432,20 @@ class Release(Plugin):
for info in rel.info: for info in rel.info:
item[info.identifier] = info.value item[info.identifier] = info.value
if rel.files:
for file_item in rel.files:
if file_item.type.identifier == 'movie':
release_name = os.path.basename(file_item.path)
break
else:
release_name = item['name']
#update status in Db #update status in Db
log.debug('Marking release %s as %s', (item['name'], status.get("label"))) log.debug('Marking release %s as %s', (release_name, status.get("label")))
rel.status_id = status.get('id') rel.status_id = status.get('id')
rel.last_edit = int(time.time()) rel.last_edit = int(time.time())
db.commit() db.commit()
#Update all movie info as there is no release update function #Update all movie info as there is no release update function
fireEvent('notify.frontend', type = 'release.update_status.%s' % rel.id, data = status.get('id')) fireEvent('notify.frontend', type = 'release.update_status', data = rel.to_dict())
return True

1
couchpotato/core/plugins/renamer/__init__.py

@ -28,6 +28,7 @@ rename_options = {
'cd': 'CD number (cd1)', 'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)', 'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA Rating', 'mpaa': 'MPAA Rating',
'category': 'Category label',
}, },
} }

460
couchpotato/core/plugins/renamer/main.py

@ -1,9 +1,9 @@
from couchpotato import get_session from couchpotato import get_session
from couchpotato.api import addApiView from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt getImdb, link, symlink, tryInt, splitString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, File, Profile, Release, \ from couchpotato.core.settings.model import Library, File, Profile, Release, \
@ -31,8 +31,10 @@ class Renamer(Plugin):
'params': { 'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'}, 'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'movie_folder': {'desc': 'Optional: The folder of the movie to scan. Keep empty for default renamer folder.'}, 'movie_folder': {'desc': 'Optional: The folder of the movie to scan. Keep empty for default renamer folder.'},
'downloader' : {'desc': 'Optional: The downloader this movie has been downloaded with'}, 'files': {'desc': 'Optional: Provide the release files if more releases are in the same movie_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'download_id': {'desc': 'Optional: The downloader\'s nzb/torrent ID'}, 'downloader' : {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in movie_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
}, },
}) })
@ -62,23 +64,26 @@ class Renamer(Plugin):
def scanView(self, **kwargs): def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0)) async = tryInt(kwargs.get('async', 0))
movie_folder = kwargs.get('movie_folder') movie_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader') downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id') download_id = kwargs.get('download_id')
files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')])
status = kwargs.get('status', 'completed')
download_info = {'folder': movie_folder} if movie_folder else None release_download = {'folder': movie_folder} if movie_folder else None
if download_info: if release_download:
download_info.update({'id': download_id, 'downloader': downloader} if download_id else {}) release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {})
fire_handle = fireEvent if not async else fireEventAsync fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', download_info) fire_handle('renamer.scan', release_download)
return { return {
'success': True 'success': True
} }
def scan(self, download_info = None): def scan(self, release_download = None):
if not release_download: release_download = {}
if self.isDisabled(): if self.isDisabled():
return return
@ -87,22 +92,66 @@ class Renamer(Plugin):
log.info('Renamer is already running, if you see this often, check the logs above for errors.') log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return return
movie_folder = download_info and download_info.get('folder') from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Check to see if the "to" folder is inside the "from" folder. # Get movie folder to process
if movie_folder and not os.path.isdir(movie_folder) or not os.path.isdir(self.conf('from')) or not os.path.isdir(self.conf('to')): movie_folder = release_download.get('folder')
l = log.debug if movie_folder else log.error
l('Both the "To" and "From" have to exist.') # Get all folders that should not be processed
return no_process = [to_folder]
elif self.conf('from') in self.conf('to'): cat_list = fireEvent('category.all', single = True) or []
log.error('The "to" can\'t be inside of the "from" folder. You\'ll get an infinite loop.') no_process.extend([item['destination'] for item in cat_list])
return try:
elif movie_folder and movie_folder in [self.conf('to'), self.conf('from')]: if Env.setting('library', section = 'manage').strip():
log.error('The "to" and "from" folders can\'t be inside of or the same as the provided movie folder.') no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')])
except:
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(from_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" have to exist.')
return return
else:
for item in no_process:
if from_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the "from" folder.')
return
# Check to see if the no_process folders are inside the provided movie_folder
if movie_folder and not os.path.isdir(movie_folder):
log.debug('The provided movie folder %s does not exist. Trying to find it in the \'from\' folder.', movie_folder)
# Update to the from folder
if len(splitString(release_download.get('files'), '|')) == 1:
new_movie_folder = from_folder
else:
new_movie_folder = os.path.join(from_folder, os.path.basename(movie_folder))
if not os.path.isdir(new_movie_folder):
log.error('The provided movie folder %s does not exist and could also not be found in the \'from\' folder.', movie_folder)
return
# Update the files
new_files = [os.path.join(new_movie_folder, os.path.relpath(filename, movie_folder)) for filename in splitString(release_download.get('files'), '|')]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided movie folder %s does not exist and its files could also not be found in the \'from\' folder.', movie_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', movie_folder)
release_download['folder'] = new_movie_folder
release_download['files'] = '|'.join(new_files)
movie_folder = new_movie_folder
if movie_folder:
for item in no_process:
if movie_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the provided movie folder.')
return
# Make sure a checkSnatched marked all downloads/seeds as such # Make sure a checkSnatched marked all downloads/seeds as such
if not download_info and self.conf('run_every') > 0: if not release_download and self.conf('run_every') > 0:
fireEvent('renamer.check_snatched') fireEvent('renamer.check_snatched')
self.renaming_started = True self.renaming_started = True
@ -112,29 +161,35 @@ class Renamer(Plugin):
files = [] files = []
if movie_folder: if movie_folder:
log.info('Scanning movie folder %s...', movie_folder) log.info('Scanning movie folder %s...', movie_folder)
movie_folder = movie_folder.rstrip(os.path.sep)
folder = os.path.dirname(movie_folder) folder = os.path.dirname(movie_folder)
# Get all files from the specified folder if release_download.get('files', ''):
try: files = splitString(release_download['files'], '|')
for root, folders, names in os.walk(movie_folder):
files.extend([os.path.join(root, name) for name in names]) # If there is only one file in the torrent, the downloader did not create a subfolder
except: if len(files) == 1:
log.error('Failed getting files from %s: %s', (movie_folder, traceback.format_exc())) folder = movie_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in os.walk(movie_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (movie_folder, traceback.format_exc()))
db = get_session() db = get_session()
# Extend the download info with info stored in the downloaded release # Extend the download info with info stored in the downloaded release
download_info = self.extendDownloadInfo(download_info) release_download = self.extendReleaseDownload(release_download)
# Unpack any archives # Unpack any archives
extr_files = None extr_files = None
if self.conf('unrar'): if self.conf('unrar'):
folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files, folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(download_info)) cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download))
groups = fireEvent('scanner.scan', folder = folder if folder else self.conf('from'), groups = fireEvent('scanner.scan', folder = folder if folder else from_folder,
files = files, download_info = download_info, return_ignored = False, single = True) files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name') folder_name = self.conf('folder_name')
file_name = self.conf('file_name') file_name = self.conf('file_name')
@ -142,9 +197,9 @@ class Renamer(Plugin):
nfo_name = self.conf('nfo_name') nfo_name = self.conf('nfo_name')
separator = self.conf('separator') separator = self.conf('separator')
# Statusses # Statuses
done_status, active_status, downloaded_status, snatched_status = \ done_status, active_status, downloaded_status, snatched_status, seeding_status = \
fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched'], single = True) fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched', 'seeding'], single = True)
for group_identifier in groups: for group_identifier in groups:
@ -157,7 +212,7 @@ class Renamer(Plugin):
# Add _UNKNOWN_ if no library item is connected # Add _UNKNOWN_ if no library item is connected
if not group['library'] or not movie_title: if not group['library'] or not movie_title:
self.tagDir(group, 'unknown') self.tagRelease(group = group, tag = 'unknown')
continue continue
# Rename the files using the library data # Rename the files using the library data
else: else:
@ -172,8 +227,13 @@ class Renamer(Plugin):
movie_title = getTitle(library) movie_title = getTitle(library)
# Overwrite destination when set in category # Overwrite destination when set in category
destination = self.conf('to') destination = to_folder
for movie in library_ent.media: category_label = ''
for movie in library_ent.movies:
if movie.category and movie.category.label:
category_label = movie.category.label
if movie.category and movie.category.destination and len(movie.category.destination) > 0 and movie.category.destination != 'None': if movie.category and movie.category.destination and len(movie.category.destination) > 0 and movie.category.destination != 'None':
destination = movie.category.destination destination = movie.category.destination
log.debug('Setting category destination for "%s": %s' % (movie_title, destination)) log.debug('Setting category destination for "%s": %s' % (movie_title, destination))
@ -190,7 +250,7 @@ class Renamer(Plugin):
if extr_files: if extr_files:
group['before_rename'].extend(extr_files) group['before_rename'].extend(extr_files)
# Remove weird chars from moviename # Remove weird chars from movie name
movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title) movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title)
# Put 'The' at the end # Put 'The' at the end
@ -217,6 +277,7 @@ class Renamer(Plugin):
'cd': '', 'cd': '',
'cd_nr': '', 'cd_nr': '',
'mpaa': library['info'].get('mpaa', ''), 'mpaa': library['info'].get('mpaa', ''),
'category': category_label,
} }
for file_type in group['files']: for file_type in group['files']:
@ -225,7 +286,7 @@ class Renamer(Plugin):
if file_type is 'nfo' and not self.conf('rename_nfo'): if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type) log.debug('Skipping, renaming of %s disabled', file_type)
for current_file in group['files'][file_type]: for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not self.downloadIsTorrent(download_info) or self.fileIsAdded(current_file, group)): if self.conf('cleanup') and (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file) remove_files.append(current_file)
continue continue
@ -385,7 +446,7 @@ class Renamer(Plugin):
log.info('Better quality release already exists for %s, with quality %s', (movie.library.titles[0].title, release.quality.label)) log.info('Better quality release already exists for %s, with quality %s', (movie.library.titles[0].title, release.quality.label))
# Add exists tag to the .ignore file # Add exists tag to the .ignore file
self.tagDir(group, 'exists') self.tagRelease(group = group, tag = 'exists')
# Notify on rename fail # Notify on rename fail
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label) download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label)
@ -393,10 +454,20 @@ class Renamer(Plugin):
remove_leftovers = False remove_leftovers = False
break break
elif release.status_id is snatched_status.get('id'):
if release.quality.id is group['meta_data']['quality']['id']: elif release.status_id in [snatched_status.get('id'), seeding_status.get('id')]:
# Set the release to downloaded if release_download and release_download.get('rls_id'):
fireEvent('release.update_status', release.id, status = downloaded_status, single = True) if release_download['rls_id'] == release.id:
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release.id, status = downloaded_status, single = True)
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release.id, status = seeding_status, single = True)
elif release.quality.id is group['meta_data']['quality']['id']:
# Set the release to downloaded
fireEvent('release.update_status', release.id, status = downloaded_status, single = True)
# Remove leftover files # Remove leftover files
if not remove_leftovers: # Don't remove anything if not remove_leftovers: # Don't remove anything
@ -405,7 +476,7 @@ class Renamer(Plugin):
log.debug('Removing leftover files') log.debug('Removing leftover files')
for current_file in group['files']['leftover']: for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \ if self.conf('cleanup') and not self.conf('move_leftover') and \
(not self.downloadIsTorrent(download_info) or self.fileIsAdded(current_file, group)): (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file) remove_files.append(current_file)
# Remove files # Remove files
@ -421,17 +492,17 @@ class Renamer(Plugin):
log.info('Removing "%s"', src) log.info('Removing "%s"', src)
try: try:
src = ss(src) src = sp(src)
if os.path.isfile(src): if os.path.isfile(src):
os.remove(src) os.remove(src)
parent_dir = os.path.normpath(os.path.dirname(src)) parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not parent_dir in [destination, movie_folder] and not self.conf('from') in parent_dir: if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not parent_dir in [destination, movie_folder] and not from_folder in parent_dir:
delete_folders.append(parent_dir) delete_folders.append(parent_dir)
except: except:
log.error('Failed removing %s: %s', (src, traceback.format_exc())) log.error('Failed removing %s: %s', (src, traceback.format_exc()))
self.tagDir(group, 'failed_remove') self.tagRelease(group = group, tag = 'failed_remove')
# Delete leftover folder from older releases # Delete leftover folder from older releases
for delete_folder in delete_folders: for delete_folder in delete_folders:
@ -451,15 +522,15 @@ class Renamer(Plugin):
self.makeDir(os.path.dirname(dst)) self.makeDir(os.path.dirname(dst))
try: try:
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(download_info) or self.fileIsAdded(src, group)) self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group))
group['renamed_files'].append(dst) group['renamed_files'].append(dst)
except: except:
log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
self.tagDir(group, 'failed_rename') self.tagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent # Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(download_info): if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(release_download):
self.tagDir(group, 'renamed_already') self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases # Remove matching releases
for release in remove_releases: for release in remove_releases:
@ -469,13 +540,13 @@ class Renamer(Plugin):
except: except:
log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc())) log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(download_info): if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download):
if movie_folder: if movie_folder:
# Delete the movie folder # Delete the movie folder
group_folder = movie_folder group_folder = movie_folder
else: else:
# Delete the first empty subfolder in the tree relative to the 'from' folder # Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = os.path.join(self.conf('from'), os.path.relpath(group['parentdir'], self.conf('from')).split(os.path.sep)[0]) group_folder = sp(os.path.join(from_folder, os.path.relpath(group['parentdir'], from_folder).split(os.path.sep)[0]))
try: try:
log.info('Deleting folder: %s', group_folder) log.info('Deleting folder: %s', group_folder)
@ -516,18 +587,9 @@ class Renamer(Plugin):
return rename_files return rename_files
# This adds a file to ignore / tag a release so it is ignored later # This adds a file to ignore / tag a release so it is ignored later
def tagDir(self, group, tag): def tagRelease(self, tag, group = None, release_download = None):
if not tag:
ignore_file = None return
if isinstance(group, dict):
for movie_file in sorted(list(group['files']['movie'])):
ignore_file = '%s.%s.ignore' % (os.path.splitext(movie_file)[0], tag)
break
else:
if not os.path.isdir(group) or not tag:
return
ignore_file = os.path.join(group, '%s.ignore' % tag)
text = """This file is from CouchPotato text = """This file is from CouchPotato
It has marked this release as "%s" It has marked this release as "%s"
@ -535,25 +597,88 @@ This file hides the release from the renamer
Remove it if you want it to be renamed (again, or at least let it try again) Remove it if you want it to be renamed (again, or at least let it try again)
""" % tag """ % tag
if ignore_file: tag_files = []
self.createFile(ignore_file, text)
def untagDir(self, folder, tag = ''): # Tag movie files if they are known
if not os.path.isdir(folder): if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
elif isinstance(release_download, dict):
# Tag download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Tag all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
def untagRelease(self, release_download, tag = ''):
if not release_download:
return return
# Remove any .ignore files tag_files = []
folder = release_download['folder']
if not os.path.isdir(folder):
return False
# Untag download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Untag all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in os.walk(folder): for root, dirnames, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, '*%s.ignore' % tag): ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
os.remove((os.path.join(root, filename)))
def hastagDir(self, folder, tag = ''): # Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (re.escape(os.path.splitext(tag_file)[0]), tag if tag else '*'))
for filename in ignore_file:
try:
os.remove(filename)
except:
log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc()))
def hastagRelease(self, release_download, tag = ''):
if not release_download:
return False
folder = release_download['folder']
if not os.path.isdir(folder): if not os.path.isdir(folder):
return False return False
# Find any .ignore files tag_files = []
ignore_files = []
# Find tag on download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Find tag on all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in os.walk(folder): for root, dirnames, filenames in os.walk(folder):
if fnmatch.filter(filenames, '*%s.ignore' % tag): ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))
if ignore_file:
return True return True
return False return False
@ -572,7 +697,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
link(old, dest) link(old, dest)
except: except:
# Try to simlink next # Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s. ', (old, dest, traceback.format_exc())) log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
shutil.copy(old, dest) shutil.copy(old, dest)
try: try:
symlink(dest, old + '.link') symlink(dest, old + '.link')
@ -616,22 +741,38 @@ Remove it if you want it to be renamed (again, or at least let it try again)
replaced = toUnicode(string) replaced = toUnicode(string)
for x, r in replacements.iteritems(): for x, r in replacements.iteritems():
if x in ['thename', 'namethe']:
continue
if r is not None: if r is not None:
replaced = replaced.replace(u'<%s>' % toUnicode(x), toUnicode(r)) replaced = replaced.replace(u'<%s>' % toUnicode(x), toUnicode(r))
else: else:
#If information is not available, we don't want the tag in the filename #If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '') replaced = replaced.replace('<' + x + '>', '')
replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.iteritems():
if x in ['thename', 'namethe']:
replaced = replaced.replace(u'<%s>' % toUnicode(x), toUnicode(r))
replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced) replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced)
sep = self.conf('foldersep') if folder else self.conf('separator') sep = self.conf('foldersep') if folder else self.conf('separator')
return self.replaceDoubles(replaced.lstrip('. ')).replace(' ', ' ' if not sep else sep) return replaced.replace(' ', ' ' if not sep else sep)
def replaceDoubles(self, string): def replaceDoubles(self, string):
return string.replace(' ', ' ').replace(' .', '.')
replaces = [
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '),
('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'),
]
for r in replaces:
reg, replace_with = r
string = re.sub(reg, replace_with, string)
return string
def deleteEmptyFolder(self, folder, show_error = True): def deleteEmptyFolder(self, folder, show_error = True):
folder = ss(folder) folder = sp(folder)
loge = log.error if show_error else log.debug loge = log.error if show_error else log.debug
for root, dirs, files in os.walk(folder): for root, dirs, files in os.walk(folder):
@ -657,22 +798,22 @@ Remove it if you want it to be renamed (again, or at least let it try again)
self.checking_snatched = True self.checking_snatched = True
snatched_status, ignored_status, failed_status, done_status, seeding_status, downloaded_status, missing_status = \ snatched_status, ignored_status, failed_status, seeding_status, downloaded_status, missing_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done', 'seeding', 'downloaded', 'missing'], single = True) fireEvent('status.get', ['snatched', 'ignored', 'failed', 'seeding', 'downloaded', 'missing'], single = True)
db = get_session() db = get_session()
rels = db.query(Release).filter( rels = db.query(Release).filter(
Release.status_id.in_([snatched_status.get('id'), seeding_status.get('id'), missing_status.get('id')]) Release.status_id.in_([snatched_status.get('id'), seeding_status.get('id'), missing_status.get('id')])
).all() ).all()
scan_items = [] scan_releases = []
scan_required = False scan_required = False
if rels: if rels:
log.debug('Checking status snatched releases...') log.debug('Checking status snatched releases...')
statuses = fireEvent('download.status', merge = True) release_downloads = fireEvent('download.status', merge = True)
if not statuses: if not release_downloads:
log.debug('Download status functionality is not implemented for active downloaders.') log.debug('Download status functionality is not implemented for active downloaders.')
scan_required = True scan_required = True
else: else:
@ -680,91 +821,91 @@ Remove it if you want it to be renamed (again, or at least let it try again)
for rel in rels: for rel in rels:
rel_dict = rel.to_dict({'info': {}}) rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('movie.get', rel.media_id, single = True) if not isinstance(rel_dict['info'], (dict)):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
continue
# check status # check status
nzbname = self.createNzbName(rel_dict['info'], movie_dict) nzbname = self.createNzbName(rel_dict['info'], movie_dict)
found = False found = False
for item in statuses: for release_download in release_downloads:
found_release = False found_release = False
if rel_dict['info'].get('download_id'): if rel_dict['info'].get('download_id'):
if item['id'] == rel_dict['info']['download_id'] and item['downloader'] == rel_dict['info']['download_downloader']: if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', item['id']) log.debug('Found release by id: %s', release_download['id'])
found_release = True found_release = True
else: else:
if item['name'] == nzbname or rel_dict['info']['name'] in item['name'] or getImdb(item['name']) == movie_dict['library']['identifier']: if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']:
found_release = True found_release = True
if found_release: if found_release:
timeleft = 'N/A' if item['timeleft'] == -1 else item['timeleft'] timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (item['name'], item['status'].upper(), timeleft)) log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
if item['status'] == 'busy': if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before # Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True) fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if item['folder'] and self.conf('from') in item['folder']: if self.movieInFromFolder(release_download['folder']):
self.tagDir(item['folder'], 'downloading') self.tagRelease(release_download = release_download, tag = 'downloading')
elif item['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release #If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(item): if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (item['name'], item['seed_ratio'])) log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag # Remove the downloading tag
self.untagDir(item['folder'], 'downloading') self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required # Scan and set the torrent to paused if required
item.update({'pause': True, 'scan': True, 'process_complete': False}) release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_items.append(item) scan_releases.append(release_download)
else: else:
#let it seed #let it seed
log.debug('%s is seeding with ratio: %s', (item['name'], item['seed_ratio'])) log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif item['status'] == 'failed': elif release_download['status'] == 'failed':
# Set the release to failed # Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True) fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', item, single = True) fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'): if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.media_id) fireEvent('movie.searcher.try_next_release', movie_id = rel.movie_id)
elif item['status'] == 'completed': elif release_download['status'] == 'completed':
log.info('Download of %s completed!', item['name']) log.info('Download of %s completed!', release_download['name'])
if self.statusInfoComplete(item): if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done # If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'): if rel.status_id == seeding_status.get('id'):
if rel.movie.status_id == done_status.get('id'): if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed # Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True) fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up # Allow the downloader to clean-up
item.update({'pause': False, 'scan': False, 'process_complete': True}) release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_items.append(item) scan_releases.append(release_download)
else: else:
# Set the release to snatched so that the renamer can process the release as if it was never seeding
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Scan and Allow the downloader to clean-up # Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True}) release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item) scan_releases.append(release_download)
else: else:
# Set the release to snatched if it was missing before # Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True) fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag # Remove the downloading tag
self.untagDir(item['folder'], 'downloading') self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up # Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True}) release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item) scan_releases.append(release_download)
else: else:
scan_required = True scan_required = True
@ -786,21 +927,21 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.error('Failed checking for release in downloader: %s', traceback.format_exc()) log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go # The following can either be done here, or inside the scanner if we pass it scan_items in one go
for item in scan_items: for release_download in scan_releases:
# Ask the renamer to scan the item # Ask the renamer to scan the item
if item['scan']: if release_download['scan']:
if item['pause'] and self.conf('file_action') == 'link': if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', item = item, pause = True, single = True) fireEvent('download.pause', release_download = release_download, pause = True, single = True)
fireEvent('renamer.scan', download_info = item) fireEvent('renamer.scan', release_download = release_download)
if item['pause'] and self.conf('file_action') == 'link': if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', item = item, pause = False, single = True) fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if item['process_complete']: if release_download['process_complete']:
#First make sure the files were succesfully processed #First make sure the files were succesfully processed
if not self.hastagDir(item['folder'], 'failed_rename'): if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'):
# Remove the seeding tag if it exists # Remove the seeding tag if it exists
self.untagDir(item['folder'], 'renamed_already') self.untagRelease(release_download = release_download, tag = 'renamed_already')
# Ask the downloader to process the item # Ask the downloader to process the item
fireEvent('download.process_complete', item = item, single = True) fireEvent('download.process_complete', release_download = release_download, single = True)
if scan_required: if scan_required:
fireEvent('renamer.scan') fireEvent('renamer.scan')
@ -809,16 +950,16 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return True return True
def extendDownloadInfo(self, download_info): def extendReleaseDownload(self, release_download):
rls = None rls = None
if download_info and download_info.get('id') and download_info.get('downloader'): if release_download and release_download.get('id') and release_download.get('downloader'):
db = get_session() db = get_session()
rlsnfo_dwnlds = db.query(ReleaseInfo).filter_by(identifier = 'download_downloader', value = download_info.get('downloader')).all() rlsnfo_dwnlds = db.query(ReleaseInfo).filter_by(identifier = 'download_downloader', value = release_download.get('downloader')).all()
rlsnfo_ids = db.query(ReleaseInfo).filter_by(identifier = 'download_id', value = download_info.get('id')).all() rlsnfo_ids = db.query(ReleaseInfo).filter_by(identifier = 'download_id', value = release_download.get('id')).all()
for rlsnfo_dwnld in rlsnfo_dwnlds: for rlsnfo_dwnld in rlsnfo_dwnlds:
for rlsnfo_id in rlsnfo_ids: for rlsnfo_id in rlsnfo_ids:
@ -828,32 +969,33 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if rls: break if rls: break
if not rls: if not rls:
log.error('Download ID %s from downloader %s not found in releases', (download_info.get('id'), download_info.get('downloader'))) log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader')))
if rls: if rls:
rls_dict = rls.to_dict({'info':{}}) rls_dict = rls.to_dict({'info':{}})
download_info.update({ release_download.update({
'imdb_id': rls.movie.library.identifier, 'imdb_id': rls.movie.library.identifier,
'quality': rls.quality.identifier, 'quality': rls.quality.identifier,
'protocol': rls_dict.get('info', {}).get('protocol') or rls_dict.get('info', {}).get('type'), 'protocol': rls_dict.get('info', {}).get('protocol') or rls_dict.get('info', {}).get('type'),
'rls_id': rls.id,
}) })
return download_info return release_download
def downloadIsTorrent(self, download_info): def downloadIsTorrent(self, release_download):
return download_info and download_info.get('protocol') in ['torrent', 'torrent_magnet'] return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet']
def fileIsAdded(self, src, group): def fileIsAdded(self, src, group):
if not group or not group.get('before_rename'): if not group or not group.get('before_rename'):
return False return False
return src in group['before_rename'] return src in group['before_rename']
def statusInfoComplete(self, item): def statusInfoComplete(self, release_download):
return item['id'] and item['downloader'] and item['folder'] return release_download['id'] and release_download['downloader'] and release_download['folder']
def movieInFromFolder(self, movie_folder): def movieInFromFolder(self, movie_folder):
return movie_folder and self.conf('from') in movie_folder or not movie_folder return movie_folder and sp(self.conf('from')) in sp(movie_folder) or not movie_folder
def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False): def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False):
if not files: files = [] if not files: files = []
@ -863,9 +1005,11 @@ Remove it if you want it to be renamed (again, or at least let it try again)
restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))' restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))'
extr_files = [] extr_files = []
from_folder = sp(self.conf('from'))
# Check input variables # Check input variables
if not folder: if not folder:
folder = self.conf('from') folder = from_folder
check_file_date = True check_file_date = True
if movie_folder: if movie_folder:
@ -873,7 +1017,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if not files: if not files:
for root, folders, names in os.walk(folder): for root, folders, names in os.walk(folder):
files.extend([os.path.join(root, name) for name in names]) files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files # Find all archive files
archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)] archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)]
@ -881,7 +1025,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
#Extract all found archives #Extract all found archives
for archive in archives: for archive in archives:
# Check if it has already been processed by CPS # Check if it has already been processed by CPS
if self.hastagDir(os.path.dirname(archive['file'])): if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}):
continue continue
# Find all related archive files # Find all related archive files
@ -919,13 +1063,13 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.info('Archive %s found. Extracting...', os.path.basename(archive['file'])) log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try: try:
rar_handle = RarFile(archive['file']) rar_handle = RarFile(archive['file'])
extr_path = os.path.join(self.conf('from'), os.path.relpath(os.path.dirname(archive['file']), folder)) extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path) self.makeDir(extr_path)
for packedinfo in rar_handle.infolist(): for packedinfo in rar_handle.infolist():
if not packedinfo.isdir and not os.path.isfile(os.path.join(extr_path, os.path.basename(packedinfo.filename))): if not packedinfo.isdir and not os.path.isfile(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))):
log.debug('Extracting %s...', packedinfo.filename) log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False) rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
extr_files.append(os.path.join(extr_path, os.path.basename(packedinfo.filename))) extr_files.append(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename))))
del rar_handle del rar_handle
except Exception, e: except Exception, e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc())) log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
@ -942,9 +1086,9 @@ Remove it if you want it to be renamed (again, or at least let it try again)
files.remove(filename) files.remove(filename)
# Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided) # Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided)
if extr_files and os.path.normpath(os.path.normcase(folder)) != os.path.normpath(os.path.normcase(self.conf('from'))): if extr_files and folder != from_folder:
for leftoverfile in list(files): for leftoverfile in list(files):
move_to = os.path.join(self.conf('from'), os.path.relpath(leftoverfile, folder)) move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder))
try: try:
self.makeDir(os.path.dirname(move_to)) self.makeDir(os.path.dirname(move_to))
@ -967,8 +1111,8 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.debug('Removing old movie folder %s...', movie_folder) log.debug('Removing old movie folder %s...', movie_folder)
self.deleteEmptyFolder(movie_folder) self.deleteEmptyFolder(movie_folder)
movie_folder = os.path.join(self.conf('from'), os.path.relpath(movie_folder, folder)) movie_folder = os.path.join(from_folder, os.path.relpath(movie_folder, folder))
folder = self.conf('from') folder = from_folder
if extr_files: if extr_files:
files.extend(extr_files) files.extend(extr_files)

118
couchpotato/core/plugins/scanner/main.py

@ -1,6 +1,6 @@
from couchpotato import get_session from couchpotato import get_session
from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss, sp
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \
splitString splitString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
@ -21,10 +21,6 @@ log = CPLog(__name__)
class Scanner(Plugin): class Scanner(Plugin):
minimal_filesize = {
'media': 314572800, # 300MB
'trailer': 1048576, # 1MB
}
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_',
'_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo', '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo',
'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files
@ -52,6 +48,12 @@ class Scanner(Plugin):
'leftover': ('leftover', 'leftover'), 'leftover': ('leftover', 'leftover'),
} }
file_sizes = { # in MB
'movie': {'min': 300},
'trailer': {'min': 2, 'max': 250},
'backdrop': {'min': 0, 'max': 5},
}
codecs = { codecs = {
'audio': ['dts', 'ac3', 'ac3d', 'mp3'], 'audio': ['dts', 'ac3', 'ac3d', 'mp3'],
'video': ['x264', 'h264', 'divx', 'xvid'] 'video': ['x264', 'h264', 'divx', 'xvid']
@ -104,9 +106,9 @@ class Scanner(Plugin):
addEvent('scanner.name_year', self.getReleaseNameYear) addEvent('scanner.name_year', self.getReleaseNameYear)
addEvent('scanner.partnumber', self.getPartNumber) addEvent('scanner.partnumber', self.getPartNumber)
def scan(self, folder = None, files = None, download_info = None, simple = False, newer_than = 0, return_ignored = True, on_found = None): def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, on_found = None):
folder = ss(os.path.normpath(folder)) folder = sp(folder)
if not folder or not os.path.isdir(folder): if not folder or not os.path.isdir(folder):
log.error('Folder doesn\'t exists: %s', folder) log.error('Folder doesn\'t exists: %s', folder)
@ -122,7 +124,7 @@ class Scanner(Plugin):
try: try:
files = [] files = []
for root, dirs, walk_files in os.walk(folder): for root, dirs, walk_files in os.walk(folder):
files.extend(os.path.join(root, filename) for filename in walk_files) files.extend([sp(os.path.join(root, filename)) for filename in walk_files])
# Break if CP wants to shut down # Break if CP wants to shut down
if self.shuttingDown(): if self.shuttingDown():
@ -132,7 +134,7 @@ class Scanner(Plugin):
log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) log.error('Failed getting files from %s: %s', (folder, traceback.format_exc()))
else: else:
check_file_date = False check_file_date = False
files = [ss(x) for x in files] files = [sp(x) for x in files]
for file_path in files: for file_path in files:
@ -148,7 +150,7 @@ class Scanner(Plugin):
continue continue
is_dvd_file = self.isDVDFile(file_path) is_dvd_file = self.isDVDFile(file_path)
if os.path.getsize(file_path) > self.minimal_filesize['media'] or is_dvd_file: # Minimal 300MB files or is DVD file if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file
# Normal identifier # Normal identifier
identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file)
@ -182,7 +184,6 @@ class Scanner(Plugin):
# files will be grouped first. # files will be grouped first.
leftovers = set(sorted(leftovers, reverse = True)) leftovers = set(sorted(leftovers, reverse = True))
# Group files minus extension # Group files minus extension
ignored_identifiers = [] ignored_identifiers = []
for identifier, group in movie_files.iteritems(): for identifier, group in movie_files.iteritems():
@ -191,7 +192,7 @@ class Scanner(Plugin):
log.debug('Grouping files: %s', identifier) log.debug('Grouping files: %s', identifier)
has_ignored = 0 has_ignored = 0
for file_path in group['unsorted_files']: for file_path in list(group['unsorted_files']):
ext = getExt(file_path) ext = getExt(file_path)
wo_ext = file_path[:-(len(ext) + 1)] wo_ext = file_path[:-(len(ext) + 1)]
found_files = set([i for i in leftovers if wo_ext in i]) found_files = set([i for i in leftovers if wo_ext in i])
@ -200,6 +201,11 @@ class Scanner(Plugin):
has_ignored += 1 if ext == 'ignore' else 0 has_ignored += 1 if ext == 'ignore' else 0
if has_ignored == 0:
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
has_ignored += 1 if ext == 'ignore' else 0
if has_ignored > 0: if has_ignored > 0:
ignored_identifiers.append(identifier) ignored_identifiers.append(identifier)
@ -232,10 +238,6 @@ class Scanner(Plugin):
# Remove the found files from the leftover stack # Remove the found files from the leftover stack
leftovers = leftovers - set(found_files) leftovers = leftovers - set(found_files)
exts = [getExt(ff) for ff in found_files]
if 'ignore' in exts:
ignored_identifiers.append(identifier)
# Break if CP wants to shut down # Break if CP wants to shut down
if self.shuttingDown(): if self.shuttingDown():
break break
@ -262,14 +264,14 @@ class Scanner(Plugin):
# Remove the found files from the leftover stack # Remove the found files from the leftover stack
leftovers = leftovers - set([ff]) leftovers = leftovers - set([ff])
ext = getExt(ff)
if ext == 'ignore':
ignored_identifiers.append(new_identifier)
# Break if CP wants to shut down # Break if CP wants to shut down
if self.shuttingDown(): if self.shuttingDown():
break break
# leftovers should be empty
if leftovers:
log.debug('Some files are still left over: %s', leftovers)
# Cleaning up used # Cleaning up used
for identifier in delete_identifiers: for identifier in delete_identifiers:
if path_identifiers.get(identifier): if path_identifiers.get(identifier):
@ -339,11 +341,11 @@ class Scanner(Plugin):
total_found = len(valid_files) total_found = len(valid_files)
# Make sure only one movie was found if a download ID is provided # Make sure only one movie was found if a download ID is provided
if download_info and total_found == 0: if release_download and total_found == 0:
log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', download_info.get('imdb_id')) log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id'))
elif download_info and total_found > 1: elif release_download and total_found > 1:
log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (download_info.get('imdb_id'), len(valid_files))) log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files)))
download_info = None release_download = None
# Determine file types # Determine file types
db = get_session() db = get_session()
@ -379,7 +381,7 @@ class Scanner(Plugin):
continue continue
log.debug('Getting metadata for %s', identifier) log.debug('Getting metadata for %s', identifier)
group['meta_data'] = self.getMetaData(group, folder = folder, download_info = download_info) group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download)
# Subtitle meta # Subtitle meta
group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {}
@ -411,7 +413,7 @@ class Scanner(Plugin):
del group['unsorted_files'] del group['unsorted_files']
# Determine movie # Determine movie
group['library'] = self.determineMovie(group, download_info = download_info) group['library'] = self.determineMovie(group, release_download = release_download)
if not group['library']: if not group['library']:
log.error('Unable to determine movie: %s', group['identifiers']) log.error('Unable to determine movie: %s', group['identifiers'])
else: else:
@ -436,13 +438,13 @@ class Scanner(Plugin):
return processed_movies return processed_movies
def getMetaData(self, group, folder = '', download_info = None): def getMetaData(self, group, folder = '', release_download = None):
data = {} data = {}
files = list(group['files']['movie']) files = list(group['files']['movie'])
for cur_file in files: for cur_file in files:
if os.path.getsize(cur_file) < self.minimal_filesize['media']: continue # Ignore smaller files if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files
meta = self.getMeta(cur_file) meta = self.getMeta(cur_file)
@ -461,8 +463,8 @@ class Scanner(Plugin):
# Use the quality guess first, if that failes use the quality we wanted to download # Use the quality guess first, if that failes use the quality we wanted to download
data['quality'] = None data['quality'] = None
if download_info and download_info.get('quality'): if release_download and release_download.get('quality'):
data['quality'] = fireEvent('quality.single', download_info.get('quality'), single = True) data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True)
if not data['quality']: if not data['quality']:
data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True) data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True)
@ -546,12 +548,12 @@ class Scanner(Plugin):
return detected_languages return detected_languages
def determineMovie(self, group, download_info = None): def determineMovie(self, group, release_download = None):
# Get imdb id from downloader # Get imdb id from downloader
imdb_id = download_info and download_info.get('imdb_id') imdb_id = release_download and release_download.get('imdb_id')
if imdb_id: if imdb_id:
log.debug('Found movie via imdb id from it\'s download id: %s', download_info.get('imdb_id')) log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id'))
files = group['files'] files = group['files']
@ -652,7 +654,7 @@ class Scanner(Plugin):
def getMediaFiles(self, files): def getMediaFiles(self, files):
def test(s): def test(s):
return self.filesizeBetween(s, 300, 100000) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s) return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s)
return set(filter(test, files)) return set(filter(test, files))
@ -677,7 +679,7 @@ class Scanner(Plugin):
def getTrailers(self, files): def getTrailers(self, files):
def test(s): def test(s):
return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, 2, 250) return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer'])
return set(filter(test, files)) return set(filter(test, files))
@ -688,7 +690,7 @@ class Scanner(Plugin):
files = set(filter(test, files)) files = set(filter(test, files))
images = { images = {
'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files)) 'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files))
} }
# Rest # Rest
@ -716,16 +718,6 @@ class Scanner(Plugin):
log.debug('Ignored "%s" contains "%s".', (filename, i)) log.debug('Ignored "%s" contains "%s".', (filename, i))
return False return False
# Sample file
if self.isSampleFile(filename):
log.debug('Is sample file "%s".', filename)
return False
# Minimal size
if self.filesizeBetween(filename, self.minimal_filesize['media']):
log.debug('File to small: %s', filename)
return False
# All is OK # All is OK
return True return True
@ -734,9 +726,9 @@ class Scanner(Plugin):
if is_sample: log.debug('Is sample file: %s', filename) if is_sample: log.debug('Is sample file: %s', filename)
return is_sample return is_sample
def filesizeBetween(self, file, min = 0, max = 100000): def filesizeBetween(self, file, file_size = []):
try: try:
return (min * 1048576) < os.path.getsize(file) < (max * 1048576) return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576)
except: except:
log.error('Couldn\'t get filesize of %s.', file) log.error('Couldn\'t get filesize of %s.', file)
@ -830,19 +822,21 @@ class Scanner(Plugin):
def findYear(self, text): def findYear(self, text):
# Search year inside () or [] first # Search year inside () or [] first
matches = re.search('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text) matches = re.findall('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text)
if matches: if matches:
return matches.group('year') return matches[-1][1]
# Search normal # Search normal
matches = re.search('(?P<year>19[0-9]{2}|20[0-9]{2})', text) matches = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', text)
if matches: if matches:
return matches.group('year') return matches[-1]
return '' return ''
def getReleaseNameYear(self, release_name, file_name = None): def getReleaseNameYear(self, release_name, file_name = None):
release_name = release_name.strip(' .-_')
# Use guessit first # Use guessit first
guess = {} guess = {}
if file_name: if file_name:
@ -860,7 +854,7 @@ class Scanner(Plugin):
cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
cleaned = re.sub(self.clean, ' ', cleaned) cleaned = re.sub(self.clean, ' ', cleaned)
for year_str in [file_name, cleaned]: for year_str in [file_name, release_name, cleaned]:
if not year_str: continue if not year_str: continue
year = self.findYear(year_str) year = self.findYear(year_str)
if year: if year:
@ -870,19 +864,21 @@ class Scanner(Plugin):
if year: # Split name on year if year: # Split name on year
try: try:
movie_name = cleaned.split(year).pop(0).strip() movie_name = cleaned.rsplit(year, 1).pop(0).strip()
cp_guess = { if movie_name:
'name': movie_name, cp_guess = {
'year': int(year), 'name': movie_name,
} 'year': int(year),
}
except: except:
pass pass
else: # Split name on multiple spaces
if not cp_guess: # Split name on multiple spaces
try: try:
movie_name = cleaned.split(' ').pop(0).strip() movie_name = cleaned.split(' ').pop(0).strip()
cp_guess = { cp_guess = {
'name': movie_name, 'name': movie_name,
'year': int(year), 'year': int(year) if movie_name[:4] != year else 0,
} }
except: except:
pass pass

7
couchpotato/core/plugins/score/main.py

@ -1,11 +1,11 @@
from couchpotato.core.event import addEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle, splitString from couchpotato.core.helpers.variable import getTitle, splitString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \ from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \
sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \ sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \
halfMultipartScore halfMultipartScore, sceneScore
from couchpotato.environment import Env from couchpotato.environment import Env
log = CPLog(__name__) log = CPLog(__name__)
@ -62,4 +62,7 @@ class Score(Plugin):
if extra_score: if extra_score:
score += extra_score(nzb) score += extra_score(nzb)
# Scene / Nuke scoring
score += sceneScore(nzb['name'])
return score return score

40
couchpotato/core/plugins/score/scores.py

@ -1,8 +1,13 @@
from couchpotato.core.event import fireEvent from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env from couchpotato.environment import Env
import re import re
import traceback
log = CPLog(__name__)
name_scores = [ name_scores = [
# Tags # Tags
@ -160,3 +165,38 @@ def halfMultipartScore(nzb_name):
return -30 return -30
return 0 return 0
def sceneScore(nzb_name):
check_names = [nzb_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key = len).strip())
except: pass
for name in check_names:
# Strip twice, remove possible file extensions
name = name.lower().strip(' "\'\.-_\[\]')
name = re.sub('\.([a-z0-9]{0,4})$', '', name)
name = name.strip(' "\'\.-_\[\]')
# Make sure year and groupname is in there
year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name)
group = re.findall('\-([a-z0-9]+)$', name)
if len(year) > 0 and len(group) > 0:
try:
validate = fireEvent('release.validate', name, single = True)
if validate and tryInt(validate.get('score')) != 0:
log.debug('Release "%s" scored %s, reason: %s', (nzb_name, validate['score'], validate['reasons']))
return tryInt(validate.get('score'))
except:
log.error('Failed scoring scene: %s', traceback.format_exc())
return 0

8
couchpotato/core/plugins/subtitle/main.py

@ -1,6 +1,6 @@
from couchpotato import get_session from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.encoding import toUnicode, sp
from couchpotato.core.helpers.variable import splitString from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
@ -58,9 +58,9 @@ class Subtitle(Plugin):
for d_sub in downloaded: for d_sub in downloaded:
log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files))
group['files']['subtitle'].append(d_sub.path) group['files']['subtitle'].append(sp(d_sub.path))
group['before_rename'].append(d_sub.path) group['before_rename'].append(sp(d_sub.path))
group['subtitle_language'][d_sub.path] = [d_sub.language.alpha2] group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2]
return True return True

12
couchpotato/core/plugins/userscript/static/userscript.css

@ -14,25 +14,25 @@
padding: 20px; padding: 20px;
} }
.page.userscript .movie_result { .page.userscript .media_result {
height: 140px; height: 140px;
} }
.page.userscript .movie_result .thumbnail { .page.userscript .media_result .thumbnail {
width: 90px; width: 90px;
} }
.page.userscript .movie_result .options { .page.userscript .media_result .options {
left: 90px; left: 90px;
padding: 54px 15px; padding: 54px 15px;
} }
.page.userscript .movie_result .year { .page.userscript .media_result .year {
display: none; display: none;
} }
.page.userscript .movie_result .options select[name="title"] { .page.userscript .media_result .options select[name="title"] {
width: 190px; width: 190px;
} }
.page.userscript .movie_result .options select[name="profile"] { .page.userscript .media_result .options select[name="profile"] {
width: 70px; width: 70px;
} }

2
couchpotato/core/plugins/userscript/static/userscript.js

@ -34,7 +34,7 @@ Page.Userscript = new Class({
if(json.error) if(json.error)
self.frame.set('html', json.error); self.frame.set('html', json.error);
else { else {
var item = new Block.Search.Item(json.movie); var item = new Block.Search.MovieItem(json.movie);
self.frame.adopt(item); self.frame.adopt(item);
item.showOptions(); item.showOptions();
} }

3
couchpotato/core/providers/automation/flixster/main.py

@ -1,7 +1,6 @@
from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation from couchpotato.core.providers.automation.base import Automation
import json
log = CPLog(__name__) log = CPLog(__name__)
@ -40,7 +39,7 @@ class Flixster(Automation):
if not enablers[index]: if not enablers[index]:
continue continue
data = json.loads(self.getHTMLData(self.url % user_id)) data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1')
for movie in data: for movie in data:
movies.append({'title': movie['movie']['title'], 'year': movie['movie']['year'] }) movies.append({'title': movie['movie']['title'], 'year': movie['movie']['year'] })

9
couchpotato/core/providers/automation/imdb/__init__.py

@ -55,7 +55,14 @@ config = [{
'label': 'TOP 250', 'label': 'TOP 250',
'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart', 'description': 'IMDB <a href="http://www.imdb.com/chart/top/">TOP 250</a> chart',
'default': True, 'default': True,
}, },
{
'name': 'automation_charts_boxoffice',
'type': 'bool',
'label': 'Box offce TOP 10',
'description': 'IMDB Box office <a href="http://www.imdb.com/chart/">TOP 10</a> chart',
'default': True,
},
], ],
}, },
], ],

11
couchpotato/core/providers/automation/imdb/main.py

@ -70,8 +70,11 @@ class IMDBAutomation(IMDBBase):
chart_urls = { chart_urls = {
'theater': 'http://www.imdb.com/movies-in-theaters/', 'theater': 'http://www.imdb.com/movies-in-theaters/',
'top250': 'http://www.imdb.com/chart/top', 'top250': 'http://www.imdb.com/chart/top',
'boxoffice': 'http://www.imdb.com/chart/',
} }
first_table = ['boxoffice']
def getIMDBids(self): def getIMDBids(self):
movies = [] movies = []
@ -84,6 +87,14 @@ class IMDBAutomation(IMDBBase):
try: try:
result_div = html.find('div', attrs = {'id': 'main'}) result_div = html.find('div', attrs = {'id': 'main'})
try:
if url in self.first_table:
table = result_div.find('table')
result_div = table if table else result_div
except:
pass
imdb_ids = getImdb(str(result_div), multiple = True) imdb_ids = getImdb(str(result_div), multiple = True)
for imdb_id in imdb_ids: for imdb_id in imdb_ids:

3
couchpotato/core/providers/automation/itunes/main.py

@ -16,9 +16,6 @@ class ITunes(Automation, RSS):
def getIMDBids(self): def getIMDBids(self):
if self.isDisabled():
return
movies = [] movies = []
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]

6
couchpotato/core/providers/base.py

@ -62,13 +62,17 @@ class Provider(Plugin):
return self.is_available.get(host, False) return self.is_available.get(host, False)
def getJsonData(self, url, **kwargs): def getJsonData(self, url, decode_from = None, **kwargs):
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {}))) cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
data = self.getCache(cache_key, url, **kwargs) data = self.getCache(cache_key, url, **kwargs)
if data: if data:
try: try:
data = data.strip()
if decode_from:
data = data.decode(decode_from)
return json.loads(data) return json.loads(data)
except: except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

16
couchpotato/core/providers/info/_modifier/main.py

@ -49,19 +49,13 @@ class Movie(ModifierBase):
def returnByType(self, results): def returnByType(self, results):
new_results = {'unknown':[]} new_results = {}
for r in results: for r in results:
if r.get('type'): type_name = r.get('type', 'movie') + 's'
type_name = r.get('type') + 's' if not new_results.has_key(type_name):
if not new_results.has_key(type_name): new_results[type_name] = []
new_results[type_name] = []
new_results[type_name].append(r) new_results[type_name].append(r)
else:
new_results['unknown'].append(r)
if len(new_results['unknown']) == 0:
del new_results['unknown']
# Combine movies, needs a cleaner way.. # Combine movies, needs a cleaner way..
if new_results.has_key('movies'): if new_results.has_key('movies'):

13
couchpotato/core/providers/info/couchpotatoapi/main.py

@ -3,6 +3,7 @@ from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.info.base import MovieProvider from couchpotato.core.providers.info.base import MovieProvider
from couchpotato.environment import Env from couchpotato.environment import Env
import base64
import time import time
log = CPLog(__name__) log = CPLog(__name__)
@ -11,6 +12,7 @@ log = CPLog(__name__)
class CouchPotatoApi(MovieProvider): class CouchPotatoApi(MovieProvider):
urls = { urls = {
'validate': 'https://api.couchpota.to/validate/%s/',
'search': 'https://api.couchpota.to/search/%s/', 'search': 'https://api.couchpota.to/search/%s/',
'info': 'https://api.couchpota.to/info/%s/', 'info': 'https://api.couchpota.to/info/%s/',
'is_movie': 'https://api.couchpota.to/ismovie/%s/', 'is_movie': 'https://api.couchpota.to/ismovie/%s/',
@ -24,11 +26,14 @@ class CouchPotatoApi(MovieProvider):
def __init__(self): def __init__(self):
addEvent('movie.info', self.getInfo, priority = 1) addEvent('movie.info', self.getInfo, priority = 1)
addEvent('info.search', self.search, priority = 1)
addEvent('movie.search', self.search, priority = 1) addEvent('movie.search', self.search, priority = 1)
addEvent('movie.release_date', self.getReleaseDate) addEvent('movie.release_date', self.getReleaseDate)
addEvent('movie.suggest', self.getSuggestions) addEvent('movie.suggest', self.getSuggestions)
addEvent('movie.is_movie', self.isMovie) addEvent('movie.is_movie', self.isMovie)
addEvent('release.validate', self.validate)
addEvent('cp.source_url', self.getSourceUrl) addEvent('cp.source_url', self.getSourceUrl)
addEvent('cp.messages', self.getMessages) addEvent('cp.messages', self.getMessages)
@ -50,6 +55,14 @@ class CouchPotatoApi(MovieProvider):
def search(self, q, limit = 5): def search(self, q, limit = 5):
return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders()) return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders())
def validate(self, name = None):
if not name:
return
name_enc = base64.b64encode(name)
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None): def isMovie(self, identifier = None):
if not identifier: if not identifier:

1
couchpotato/core/providers/info/omdbapi/main.py

@ -20,6 +20,7 @@ class OMDBAPI(MovieProvider):
http_time_between_calls = 0 http_time_between_calls = 0
def __init__(self): def __init__(self):
addEvent('info.search', self.search)
addEvent('movie.search', self.search) addEvent('movie.search', self.search)
addEvent('movie.info', self.getInfo) addEvent('movie.info', self.getInfo)

1
couchpotato/core/providers/info/themoviedb/main.py

@ -121,6 +121,7 @@ class TheMovieDb(MovieProvider):
'year': year, 'year': year,
'plot': movie.overview, 'plot': movie.overview,
'genres': genres, 'genres': genres,
'collection': getattr(movie.collection, 'name', None),
} }
movie_data = dict((k, v) for k, v in movie_data.iteritems() if v) movie_data = dict((k, v) for k, v in movie_data.iteritems() if v)

7
couchpotato/core/providers/metadata/xbmc/main.py

@ -104,6 +104,13 @@ class XBMC(MetaDataBase):
writers = SubElement(nfoxml, 'credits') writers = SubElement(nfoxml, 'credits')
writers.text = toUnicode(writer) writers.text = toUnicode(writer)
# Sets or collections
collection_name = movie_info.get('collection')
if collection_name:
collection = SubElement(nfoxml, 'set')
collection.text = toUnicode(collection_name)
sorttitle = SubElement(nfoxml, 'sorttitle')
sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year'))
# Clean up the xml and return it # Clean up the xml and return it
nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))

2
couchpotato/core/providers/nzb/binsearch/main.py

@ -65,7 +65,7 @@ class BinSearch(NZBProvider):
total = tryInt(parts.group('total')) total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts')) parts = tryInt(parts.group('parts'))
if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not 'par2' in info.text.lower()): if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower())):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total)) log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
return False return False

4
couchpotato/core/providers/nzb/newznab/main.py

@ -1,4 +1,4 @@
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
@ -83,7 +83,7 @@ class Newznab(NZBProvider, RSS):
results.append({ results.append({
'id': nzb_id, 'id': nzb_id,
'provider_extra': urlparse(host['host']).hostname or host['host'], 'provider_extra': urlparse(host['host']).hostname or host['host'],
'name': name, 'name': toUnicode(name),
'name_extra': name_extra, 'name_extra': name_extra,
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,

8
couchpotato/core/providers/nzb/omgwtfnzbs/main.py

@ -14,7 +14,8 @@ log = CPLog(__name__)
class OMGWTFNZBs(NZBProvider, RSS): class OMGWTFNZBs(NZBProvider, RSS):
urls = { urls = {
'search': 'http://rss.omgwtfnzbs.org/rss-search.php?%s', 'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s',
'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s',
} }
http_time_between_calls = 1 #seconds http_time_between_calls = 1 #seconds
@ -49,13 +50,14 @@ class OMGWTFNZBs(NZBProvider, RSS):
for nzb in nzbs: for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib enclosure = self.getElement(nzb, 'enclosure').attrib
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]
results.append({ results.append({
'id': parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0], 'id': nzb_id,
'name': toUnicode(self.getTextElement(nzb, 'title')), 'name': toUnicode(self.getTextElement(nzb, 'title')),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))), 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024, 'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'], 'url': enclosure['url'],
'detail_url': self.getTextElement(nzb, 'link'), 'detail_url': self.urls['detail_url'] % nzb_id,
'description': self.getTextElement(nzb, 'description') 'description': self.getTextElement(nzb, 'description')
}) })

43
couchpotato/core/providers/torrent/base.py

@ -1,6 +1,8 @@
from couchpotato.core.helpers.variable import getImdb, md5 from couchpotato.core.helpers.variable import getImdb, md5, cleanHost
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import YarrProvider from couchpotato.core.providers.base import YarrProvider
from couchpotato.environment import Env
import time
log = CPLog(__name__) log = CPLog(__name__)
@ -9,6 +11,9 @@ class TorrentProvider(YarrProvider):
protocol = 'torrent' protocol = 'torrent'
proxy_domain = None
proxy_list = []
def imdbMatch(self, url, imdbId): def imdbMatch(self, url, imdbId):
if getImdb(url) == imdbId: if getImdb(url) == imdbId:
return True return True
@ -25,6 +30,42 @@ class TorrentProvider(YarrProvider):
return False return False
def getDomain(self, url = ''):
forced_domain = self.conf('domain')
if forced_domain:
return cleanHost(forced_domain).rstrip('/') + url
if not self.proxy_domain:
for proxy in self.proxy_list:
prop_name = 'proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed %s proxy %s', (self.getName(), proxy))
if self.correctProxy(data):
log.debug('Using proxy for %s: %s', (self.getName(), proxy))
self.proxy_domain = proxy
break
Env.prop(prop_name, time.time())
if not self.proxy_domain:
log.error('No %s proxies left, please add one in settings, or let us know which one to add on the forum.', self.getName())
return None
return cleanHost(self.proxy_domain).rstrip('/') + url
def correctProxy(self):
return True
class TorrentMagnetProvider(TorrentProvider): class TorrentMagnetProvider(TorrentProvider):
protocol = 'torrent_magnet' protocol = 'torrent_magnet'

12
couchpotato/core/providers/torrent/scenehd/__init__.py → couchpotato/core/providers/torrent/bithdtv/__init__.py

@ -1,16 +1,16 @@
from .main import SceneHD from .main import BiTHDTV
def start(): def start():
return SceneHD() return BiTHDTV()
config = [{ config = [{
'name': 'scenehd', 'name': 'bithdtv',
'groups': [ 'groups': [
{ {
'tab': 'searcher', 'tab': 'searcher',
'list': 'torrent_providers', 'list': 'torrent_providers',
'name': 'SceneHD', 'name': 'BiT-HDTV',
'description': 'See <a href="https://scenehd.org">SceneHD</a>', 'description': 'See <a href="http://bit-hdtv.com">BiT-HDTV</a>',
'wizard': True, 'wizard': True,
'options': [ 'options': [
{ {
@ -46,7 +46,7 @@ config = [{
'advanced': True, 'advanced': True,
'label': 'Extra Score', 'label': 'Extra Score',
'type': 'int', 'type': 'int',
'default': 0, 'default': 20,
'description': 'Starting score for each release found via this provider.', 'description': 'Starting score for each release found via this provider.',
} }
], ],

88
couchpotato/core/providers/torrent/bithdtv/main.py

@ -0,0 +1,88 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class BiTHDTV(TorrentProvider):
urls = {
'test' : 'http://www.bit-hdtv.com/',
'login' : 'http://www.bit-hdtv.com/takelogin.php',
'login_check': 'http://www.bit-hdtv.com/messages.php',
'detail' : 'http://www.bit-hdtv.com/details.php?id=%s',
'search' : 'http://www.bit-hdtv.com/torrents.php?',
}
# Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
cat_id_movies = 7
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
arguments = tryUrlencode({
'search': '%s %s' % (title.replace(':', ''), movie['library']['year']),
'cat': self.cat_id_movies
})
url = "%s&%s" % (self.urls['search'], arguments)
data = self.getHTMLData(url, opener = self.login_opener)
if data:
# Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
split_data = data.partition('-->')
if '## SELECT COUNT(' in split_data[0]:
data = split_data[2]
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs = {'width' : '750', 'class' : ''})
if result_table is None:
return
entries = result_table.find_all('tr')
for result in entries[1:]:
cells = result.find_all('td')
link = cells[2].find('a')
torrent_id = link['href'].replace('/details.php?id=', '')
results.append({
'id': torrent_id,
'name': link.contents[0].get_text(),
'url': cells[0].find('a')['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(cells[6].get_text()),
'seeders': tryInt(cells[8].string),
'leechers': tryInt(cells[9].string),
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
})
def getMoreInfo(self, item):
full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('table', attrs = {'class':'detail'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess

6
couchpotato/core/providers/torrent/kickasstorrents/__init__.py

@ -19,6 +19,12 @@ config = [{
'default': True, 'default': True,
}, },
{ {
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests, keep empty to let CouchPotato pick.',
},
{
'name': 'seed_ratio', 'name': 'seed_ratio',
'label': 'Seed ratio', 'label': 'Seed ratio',
'type': 'float', 'type': 'float',

27
couchpotato/core/providers/torrent/kickasstorrents/main.py

@ -11,9 +11,8 @@ log = CPLog(__name__)
class KickAssTorrents(TorrentMagnetProvider): class KickAssTorrents(TorrentMagnetProvider):
urls = { urls = {
'test': 'https://kickass.to/', 'detail': '%s/%s',
'detail': 'https://kickass.to/%s', 'search': '%s/%s-i%s/',
'search': 'https://kickass.to/%s-i%s/',
} }
cat_ids = [ cat_ids = [
@ -28,9 +27,16 @@ class KickAssTorrents(TorrentMagnetProvider):
http_time_between_calls = 1 #seconds http_time_between_calls = 1 #seconds
cat_backup_id = None cat_backup_id = None
proxy_list = [
'https://kickass.to',
'http://kickass.pw',
'http://www.kickassunblock.info',
'http://www.kickassproxy.info',
]
def _search(self, movie, quality, results): def _search(self, movie, quality, results):
data = self.getHTMLData(self.urls['search'] % ('m', movie['library']['identifier'].replace('tt', ''))) data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', movie['library']['identifier'].replace('tt', '')))
if data: if data:
@ -41,7 +47,7 @@ class KickAssTorrents(TorrentMagnetProvider):
html = BeautifulSoup(data) html = BeautifulSoup(data)
resultdiv = html.find('div', attrs = {'class':'tabs'}) resultdiv = html.find('div', attrs = {'class':'tabs'})
for result in resultdiv.find_all('div', recursive = False): for result in resultdiv.find_all('div', recursive = False):
if result.get('id').lower() not in cat_ids: if result.get('id').lower().strip('tab-') not in cat_ids:
continue continue
try: try:
@ -56,12 +62,12 @@ class KickAssTorrents(TorrentMagnetProvider):
column_name = table_order[nr] column_name = table_order[nr]
if column_name: if column_name:
if column_name is 'name': if column_name == 'name':
link = td.find('div', {'class': 'torrentname'}).find_all('a')[1] link = td.find('div', {'class': 'torrentname'}).find_all('a')[1]
new['id'] = temp.get('id')[-8:] new['id'] = temp.get('id')[-8:]
new['name'] = link.text new['name'] = link.text
new['url'] = td.find('a', 'imagnet')['href'] new['url'] = td.find('a', 'imagnet')['href']
new['detail_url'] = self.urls['detail'] % link['href'][1:] new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
new['score'] = 20 if td.find('a', 'iverif') else 0 new['score'] = 20 if td.find('a', 'iverif') else 0
elif column_name is 'size': elif column_name is 'size':
new['size'] = self.parseSize(td.text) new['size'] = self.parseSize(td.text)
@ -100,3 +106,10 @@ class KickAssTorrents(TorrentMagnetProvider):
age += tryInt(nr) * mult age += tryInt(nr) * mult
return tryInt(age) return tryInt(age)
def isEnabled(self):
return super(KickAssTorrents, self).isEnabled() and self.getDomain()
def correctProxy(self, data):
return 'search query' in data.lower()

79
couchpotato/core/providers/torrent/scenehd/main.py

@ -1,79 +0,0 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
log = CPLog(__name__)
class SceneHD(TorrentProvider):
urls = {
'test': 'https://scenehd.org/',
'login' : 'https://scenehd.org/takelogin.php',
'login_check': 'https://scenehd.org/my.php',
'detail': 'https://scenehd.org/details.php?id=%s',
'search': 'https://scenehd.org/browse.php?ajax',
'download': 'https://scenehd.org/download.php?id=%s',
}
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s"' % (simplifyString(title), movie['library']['year'])
arguments = tryUrlencode({
'search': q,
})
url = "%s&%s" % (self.urls['search'], arguments)
data = self.getHTMLData(url, opener = self.login_opener)
if data:
html = BeautifulSoup(data)
try:
resultsTable = html.find_all('table')[6]
entries = resultsTable.find_all('tr')
for result in entries[1:]:
all_cells = result.find_all('td')
detail_link = all_cells[2].find('a')
details = detail_link['href']
torrent_id = details.replace('details.php?id=', '')
leechers = all_cells[11].find('a')
if leechers:
leechers = leechers.string
else:
leechers = all_cells[11].string
results.append({
'id': torrent_id,
'name': detail_link['title'],
'size': self.parseSize(all_cells[7].string),
'seeders': tryInt(all_cells[10].find('a').string),
'leechers': tryInt(leechers),
'url': self.urls['download'] % torrent_id,
'description': all_cells[1].find('a')['href'],
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
'ssl': 'yes',
})
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess

41
couchpotato/core/providers/torrent/thepiratebay/main.py

@ -1,11 +1,9 @@
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, cleanHost from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
from couchpotato.environment import Env
import re import re
import time
import traceback import traceback
log = CPLog(__name__) log = CPLog(__name__)
@ -30,8 +28,8 @@ class ThePirateBay(TorrentMagnetProvider):
http_time_between_calls = 0 http_time_between_calls = 0
proxy_list = [ proxy_list = [
'https://thepiratebay.se',
'https://tpb.ipredator.se', 'https://tpb.ipredator.se',
'https://thepiratebay.se',
'https://depiraatbaai.be', 'https://depiraatbaai.be',
'https://piratereverse.info', 'https://piratereverse.info',
'https://tpb.pirateparty.org.uk', 'https://tpb.pirateparty.org.uk',
@ -43,10 +41,6 @@ class ThePirateBay(TorrentMagnetProvider):
'https://kuiken.co', 'https://kuiken.co',
] ]
def __init__(self):
self.domain = self.conf('domain')
super(ThePirateBay, self).__init__()
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
page = 0 page = 0
@ -108,38 +102,11 @@ class ThePirateBay(TorrentMagnetProvider):
except: except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self): def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain() return super(ThePirateBay, self).isEnabled() and self.getDomain()
def getDomain(self, url = ''): def correctProxy(self, data):
return 'title="Pirate Search"' in data
if not self.domain:
for proxy in self.proxy_list:
prop_name = 'tpb_proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed tpb proxy %s', proxy)
if 'title="Pirate Search"' in data:
log.debug('Using proxy: %s', proxy)
self.domain = proxy
break
Env.prop(prop_name, time.time())
if not self.domain:
log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.')
return None
return cleanHost(self.domain).rstrip('/') + url
def getMoreInfo(self, item): def getMoreInfo(self, item):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

2
couchpotato/core/providers/torrent/yify/main.py

@ -23,7 +23,7 @@ class Yify(TorrentProvider):
return super(Yify, self).search(movie, quality) return super(Yify, self).search(movie, quality)
def _searchOnTitle(self, title, movie, quality, results): def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % (movie['library']['identifier'], quality['identifier'])) data = self.getJsonData(self.urls['search'] % (movie['library']['identifier'], quality['identifier']))

6
couchpotato/core/providers/userscript/flickchart/__init__.py

@ -0,0 +1,6 @@
from .main import Flickchart
def start():
return Flickchart()
config = []

30
couchpotato/core/providers/userscript/flickchart/main.py

@ -0,0 +1,30 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.userscript.base import UserscriptBase
import traceback
log = CPLog(__name__)
class Flickchart(UserscriptBase):
includes = ['http://www.flickchart.com/movie/*']
def getMovie(self, url):
try:
data = self.getUrl(url)
except:
return
try:
start = data.find('<title>')
end = data.find('</title>', start)
page_title = data[start + len('<title>'):end].strip().split('-')
year_name = fireEvent('scanner.name_year', page_title[0], single = True)
return self.search(**year_name)
except:
log.error('Failed parsing page for title and year: %s', traceback.format_exc())

76
couchpotato/static/scripts/couchpotato.js

@ -11,6 +11,12 @@
pages: [], pages: [],
block: [], block: [],
initialize: function(){
var self = this;
self.global_events = {};
},
setup: function(options) { setup: function(options) {
var self = this; var self = this;
self.setOptions(options); self.setOptions(options);
@ -30,9 +36,9 @@
History.addEvent('change', self.openPage.bind(self)); History.addEvent('change', self.openPage.bind(self));
self.c.addEvent('click:relay(a[href^=/]:not([target]))', self.pushState.bind(self)); self.c.addEvent('click:relay(a[href^=/]:not([target]))', self.pushState.bind(self));
self.c.addEvent('click:relay(a[href^=http])', self.openDerefered.bind(self)); self.c.addEvent('click:relay(a[href^=http])', self.openDerefered.bind(self));
// Check if device is touchenabled // Check if device is touchenabled
self.touch_device = 'ontouchstart' in document.documentElement; self.touch_device = 'ontouchstart' in window || navigator.msMaxTouchPoints;
if(self.touch_device) if(self.touch_device)
self.c.addClass('touch_enabled'); self.c.addClass('touch_enabled');
@ -55,7 +61,7 @@
History.push(url); History.push(url);
} }
}, },
isMac: function(){ isMac: function(){
return Browser.Platform.mac return Browser.Platform.mac
}, },
@ -111,7 +117,7 @@
} }
}) })
]; ];
setting_links.each(function(a){ setting_links.each(function(a){
self.block.more.addLink(a) self.block.more.addLink(a)
}); });
@ -336,6 +342,66 @@
}) })
) )
); );
},
/*
* Global events
*/
on: function(name, handle){
var self = this;
if(!self.global_events[name])
self.global_events[name] = [];
self.global_events[name].push(handle);
},
trigger: function(name, args, on_complete){
var self = this;
if(!self.global_events[name]){ return; }
if(!on_complete && typeOf(args) == 'function'){
on_complete = args;
args = {};
}
// Create parallel callback
var callbacks = [];
self.global_events[name].each(function(handle, nr){
callbacks.push(function(callback){
var results = handle(args || {});
callback(null, results || null);
});
});
// Fire events
async.parallel(callbacks, function(err, results){
if(err) p(err);
if(on_complete)
on_complete(results);
});
},
off: function(name, handle){
var self = this;
if(!self.global_events[name]) return;
// Remove single
if(handle){
self.global_events[name] = self.global_events[name].erase(handle);
}
// Reset full event
else {
self.global_events[name] = [];
}
} }
}); });
@ -503,7 +569,7 @@ function randomString(length, extra) {
case "string": saveKeyPath(argument.match(/[+-]|[^.]+/g)); break; case "string": saveKeyPath(argument.match(/[+-]|[^.]+/g)); break;
} }
}); });
return this.sort(comparer); return this.stableSort(comparer);
} }
}); });

56
couchpotato/static/scripts/library/Array.stableSort.js

@ -0,0 +1,56 @@
/*
---
script: Array.stableSort.js
description: Add a stable sort algorithm for all browsers
license: MIT-style license.
authors:
- Yorick Sijsling
requires:
core/1.3: '*'
provides:
- [Array.stableSort, Array.mergeSort]
...
*/
(function() {
var defaultSortFunction = function(a, b) {
return a > b ? 1 : (a < b ? -1 : 0);
}
Array.implement({
stableSort: function(compare) {
// I would love some real feature recognition. Problem is that an unstable algorithm sometimes/often gives the same result as an unstable algorithm.
return (Browser.chrome || Browser.firefox2 || Browser.opera9) ? this.mergeSort(compare) : this.sort(compare);
},
mergeSort: function(compare, token) {
compare = compare || defaultSortFunction;
if (this.length > 1) {
// Split and sort both parts
var right = this.splice(Math.floor(this.length / 2)).mergeSort(compare);
var left = this.splice(0).mergeSort(compare); // 'this' is now empty.
// Merge parts together
while (left.length > 0 || right.length > 0) {
this.push(
right.length === 0 ? left.shift()
: left.length === 0 ? right.shift()
: compare(left[0], right[0]) > 0 ? right.shift()
: left.shift());
}
}
return this;
}
});
})();

955
couchpotato/static/scripts/library/async.js

@ -0,0 +1,955 @@
/*global setImmediate: false, setTimeout: false, console: false */
(function () {
var async = {};
// global on the server, window in the browser
var root, previous_async;
root = this;
if (root != null) {
previous_async = root.async;
}
async.noConflict = function () {
root.async = previous_async;
return async;
};
function only_once(fn) {
var called = false;
return function() {
if (called) throw new Error("Callback was already called.");
called = true;
fn.apply(root, arguments);
}
}
//// cross-browser compatiblity functions ////
var _each = function (arr, iterator) {
if (arr.forEach) {
return arr.forEach(iterator);
}
for (var i = 0; i < arr.length; i += 1) {
iterator(arr[i], i, arr);
}
};
var _map = function (arr, iterator) {
if (arr.map) {
return arr.map(iterator);
}
var results = [];
_each(arr, function (x, i, a) {
results.push(iterator(x, i, a));
});
return results;
};
var _reduce = function (arr, iterator, memo) {
if (arr.reduce) {
return arr.reduce(iterator, memo);
}
_each(arr, function (x, i, a) {
memo = iterator(memo, x, i, a);
});
return memo;
};
var _keys = function (obj) {
if (Object.keys) {
return Object.keys(obj);
}
var keys = [];
for (var k in obj) {
if (obj.hasOwnProperty(k)) {
keys.push(k);
}
}
return keys;
};
//// exported async module functions ////
//// nextTick implementation with browser-compatible fallback ////
if (typeof process === 'undefined' || !(process.nextTick)) {
if (typeof setImmediate === 'function') {
async.nextTick = function (fn) {
// not a direct alias for IE10 compatibility
setImmediate(fn);
};
async.setImmediate = async.nextTick;
}
else {
async.nextTick = function (fn) {
setTimeout(fn, 0);
};
async.setImmediate = async.nextTick;
}
}
else {
async.nextTick = process.nextTick;
if (typeof setImmediate !== 'undefined') {
async.setImmediate = setImmediate;
}
else {
async.setImmediate = async.nextTick;
}
}
async.each = function (arr, iterator, callback) {
callback = callback || function () {};
if (!arr.length) {
return callback();
}
var completed = 0;
_each(arr, function (x) {
iterator(x, only_once(function (err) {
if (err) {
callback(err);
callback = function () {};
}
else {
completed += 1;
if (completed >= arr.length) {
callback(null);
}
}
}));
});
};
async.forEach = async.each;
async.eachSeries = function (arr, iterator, callback) {
callback = callback || function () {};
if (!arr.length) {
return callback();
}
var completed = 0;
var iterate = function () {
iterator(arr[completed], function (err) {
if (err) {
callback(err);
callback = function () {};
}
else {
completed += 1;
if (completed >= arr.length) {
callback(null);
}
else {
iterate();
}
}
});
};
iterate();
};
async.forEachSeries = async.eachSeries;
async.eachLimit = function (arr, limit, iterator, callback) {
var fn = _eachLimit(limit);
fn.apply(null, [arr, iterator, callback]);
};
async.forEachLimit = async.eachLimit;
var _eachLimit = function (limit) {
return function (arr, iterator, callback) {
callback = callback || function () {};
if (!arr.length || limit <= 0) {
return callback();
}
var completed = 0;
var started = 0;
var running = 0;
(function replenish () {
if (completed >= arr.length) {
return callback();
}
while (running < limit && started < arr.length) {
started += 1;
running += 1;
iterator(arr[started - 1], function (err) {
if (err) {
callback(err);
callback = function () {};
}
else {
completed += 1;
running -= 1;
if (completed >= arr.length) {
callback();
}
else {
replenish();
}
}
});
}
})();
};
};
var doParallel = function (fn) {
return function () {
var args = Array.prototype.slice.call(arguments);
return fn.apply(null, [async.each].concat(args));
};
};
var doParallelLimit = function(limit, fn) {
return function () {
var args = Array.prototype.slice.call(arguments);
return fn.apply(null, [_eachLimit(limit)].concat(args));
};
};
var doSeries = function (fn) {
return function () {
var args = Array.prototype.slice.call(arguments);
return fn.apply(null, [async.eachSeries].concat(args));
};
};
var _asyncMap = function (eachfn, arr, iterator, callback) {
var results = [];
arr = _map(arr, function (x, i) {
return {index: i, value: x};
});
eachfn(arr, function (x, callback) {
iterator(x.value, function (err, v) {
results[x.index] = v;
callback(err);
});
}, function (err) {
callback(err, results);
});
};
async.map = doParallel(_asyncMap);
async.mapSeries = doSeries(_asyncMap);
async.mapLimit = function (arr, limit, iterator, callback) {
return _mapLimit(limit)(arr, iterator, callback);
};
var _mapLimit = function(limit) {
return doParallelLimit(limit, _asyncMap);
};
// reduce only has a series version, as doing reduce in parallel won't
// work in many situations.
async.reduce = function (arr, memo, iterator, callback) {
async.eachSeries(arr, function (x, callback) {
iterator(memo, x, function (err, v) {
memo = v;
callback(err);
});
}, function (err) {
callback(err, memo);
});
};
// inject alias
async.inject = async.reduce;
// foldl alias
async.foldl = async.reduce;
async.reduceRight = function (arr, memo, iterator, callback) {
var reversed = _map(arr, function (x) {
return x;
}).reverse();
async.reduce(reversed, memo, iterator, callback);
};
// foldr alias
async.foldr = async.reduceRight;
var _filter = function (eachfn, arr, iterator, callback) {
var results = [];
arr = _map(arr, function (x, i) {
return {index: i, value: x};
});
eachfn(arr, function (x, callback) {
iterator(x.value, function (v) {
if (v) {
results.push(x);
}
callback();
});
}, function (err) {
callback(_map(results.sort(function (a, b) {
return a.index - b.index;
}), function (x) {
return x.value;
}));
});
};
async.filter = doParallel(_filter);
async.filterSeries = doSeries(_filter);
// select alias
async.select = async.filter;
async.selectSeries = async.filterSeries;
var _reject = function (eachfn, arr, iterator, callback) {
var results = [];
arr = _map(arr, function (x, i) {
return {index: i, value: x};
});
eachfn(arr, function (x, callback) {
iterator(x.value, function (v) {
if (!v) {
results.push(x);
}
callback();
});
}, function (err) {
callback(_map(results.sort(function (a, b) {
return a.index - b.index;
}), function (x) {
return x.value;
}));
});
};
async.reject = doParallel(_reject);
async.rejectSeries = doSeries(_reject);
var _detect = function (eachfn, arr, iterator, main_callback) {
eachfn(arr, function (x, callback) {
iterator(x, function (result) {
if (result) {
main_callback(x);
main_callback = function () {};
}
else {
callback();
}
});
}, function (err) {
main_callback();
});
};
async.detect = doParallel(_detect);
async.detectSeries = doSeries(_detect);
async.some = function (arr, iterator, main_callback) {
async.each(arr, function (x, callback) {
iterator(x, function (v) {
if (v) {
main_callback(true);
main_callback = function () {};
}
callback();
});
}, function (err) {
main_callback(false);
});
};
// any alias
async.any = async.some;
async.every = function (arr, iterator, main_callback) {
async.each(arr, function (x, callback) {
iterator(x, function (v) {
if (!v) {
main_callback(false);
main_callback = function () {};
}
callback();
});
}, function (err) {
main_callback(true);
});
};
// all alias
async.all = async.every;
async.sortBy = function (arr, iterator, callback) {
async.map(arr, function (x, callback) {
iterator(x, function (err, criteria) {
if (err) {
callback(err);
}
else {
callback(null, {value: x, criteria: criteria});
}
});
}, function (err, results) {
if (err) {
return callback(err);
}
else {
var fn = function (left, right) {
var a = left.criteria, b = right.criteria;
return a < b ? -1 : a > b ? 1 : 0;
};
callback(null, _map(results.sort(fn), function (x) {
return x.value;
}));
}
});
};
async.auto = function (tasks, callback) {
callback = callback || function () {};
var keys = _keys(tasks);
if (!keys.length) {
return callback(null);
}
var results = {};
var listeners = [];
var addListener = function (fn) {
listeners.unshift(fn);
};
var removeListener = function (fn) {
for (var i = 0; i < listeners.length; i += 1) {
if (listeners[i] === fn) {
listeners.splice(i, 1);
return;
}
}
};
var taskComplete = function () {
_each(listeners.slice(0), function (fn) {
fn();
});
};
addListener(function () {
if (_keys(results).length === keys.length) {
callback(null, results);
callback = function () {};
}
});
_each(keys, function (k) {
var task = (tasks[k] instanceof Function) ? [tasks[k]]: tasks[k];
var taskCallback = function (err) {
var args = Array.prototype.slice.call(arguments, 1);
if (args.length <= 1) {
args = args[0];
}
if (err) {
var safeResults = {};
_each(_keys(results), function(rkey) {
safeResults[rkey] = results[rkey];
});
safeResults[k] = args;
callback(err, safeResults);
// stop subsequent errors hitting callback multiple times
callback = function () {};
}
else {
results[k] = args;
async.setImmediate(taskComplete);
}
};
var requires = task.slice(0, Math.abs(task.length - 1)) || [];
var ready = function () {
return _reduce(requires, function (a, x) {
return (a && results.hasOwnProperty(x));
}, true) && !results.hasOwnProperty(k);
};
if (ready()) {
task[task.length - 1](taskCallback, results);
}
else {
var listener = function () {
if (ready()) {
removeListener(listener);
task[task.length - 1](taskCallback, results);
}
};
addListener(listener);
}
});
};
async.waterfall = function (tasks, callback) {
callback = callback || function () {};
if (tasks.constructor !== Array) {
var err = new Error('First argument to waterfall must be an array of functions');
return callback(err);
}
if (!tasks.length) {
return callback();
}
var wrapIterator = function (iterator) {
return function (err) {
if (err) {
callback.apply(null, arguments);
callback = function () {};
}
else {
var args = Array.prototype.slice.call(arguments, 1);
var next = iterator.next();
if (next) {
args.push(wrapIterator(next));
}
else {
args.push(callback);
}
async.setImmediate(function () {
iterator.apply(null, args);
});
}
};
};
wrapIterator(async.iterator(tasks))();
};
var _parallel = function(eachfn, tasks, callback) {
callback = callback || function () {};
if (tasks.constructor === Array) {
eachfn.map(tasks, function (fn, callback) {
if (fn) {
fn(function (err) {
var args = Array.prototype.slice.call(arguments, 1);
if (args.length <= 1) {
args = args[0];
}
callback.call(null, err, args);
});
}
}, callback);
}
else {
var results = {};
eachfn.each(_keys(tasks), function (k, callback) {
tasks[k](function (err) {
var args = Array.prototype.slice.call(arguments, 1);
if (args.length <= 1) {
args = args[0];
}
results[k] = args;
callback(err);
});
}, function (err) {
callback(err, results);
});
}
};
async.parallel = function (tasks, callback) {
_parallel({ map: async.map, each: async.each }, tasks, callback);
};
async.parallelLimit = function(tasks, limit, callback) {
_parallel({ map: _mapLimit(limit), each: _eachLimit(limit) }, tasks, callback);
};
async.series = function (tasks, callback) {
callback = callback || function () {};
if (tasks.constructor === Array) {
async.mapSeries(tasks, function (fn, callback) {
if (fn) {
fn(function (err) {
var args = Array.prototype.slice.call(arguments, 1);
if (args.length <= 1) {
args = args[0];
}
callback.call(null, err, args);
});
}
}, callback);
}
else {
var results = {};
async.eachSeries(_keys(tasks), function (k, callback) {
tasks[k](function (err) {
var args = Array.prototype.slice.call(arguments, 1);
if (args.length <= 1) {
args = args[0];
}
results[k] = args;
callback(err);
});
}, function (err) {
callback(err, results);
});
}
};
async.iterator = function (tasks) {
var makeCallback = function (index) {
var fn = function () {
if (tasks.length) {
tasks[index].apply(null, arguments);
}
return fn.next();
};
fn.next = function () {
return (index < tasks.length - 1) ? makeCallback(index + 1): null;
};
return fn;
};
return makeCallback(0);
};
async.apply = function (fn) {
var args = Array.prototype.slice.call(arguments, 1);
return function () {
return fn.apply(
null, args.concat(Array.prototype.slice.call(arguments))
);
};
};
var _concat = function (eachfn, arr, fn, callback) {
var r = [];
eachfn(arr, function (x, cb) {
fn(x, function (err, y) {
r = r.concat(y || []);
cb(err);
});
}, function (err) {
callback(err, r);
});
};
async.concat = doParallel(_concat);
async.concatSeries = doSeries(_concat);
async.whilst = function (test, iterator, callback) {
if (test()) {
iterator(function (err) {
if (err) {
return callback(err);
}
async.whilst(test, iterator, callback);
});
}
else {
callback();
}
};
async.doWhilst = function (iterator, test, callback) {
iterator(function (err) {
if (err) {
return callback(err);
}
if (test()) {
async.doWhilst(iterator, test, callback);
}
else {
callback();
}
});
};
async.until = function (test, iterator, callback) {
if (!test()) {
iterator(function (err) {
if (err) {
return callback(err);
}
async.until(test, iterator, callback);
});
}
else {
callback();
}
};
async.doUntil = function (iterator, test, callback) {
iterator(function (err) {
if (err) {
return callback(err);
}
if (!test()) {
async.doUntil(iterator, test, callback);
}
else {
callback();
}
});
};
async.queue = function (worker, concurrency) {
if (concurrency === undefined) {
concurrency = 1;
}
function _insert(q, data, pos, callback) {
if(data.constructor !== Array) {
data = [data];
}
_each(data, function(task) {
var item = {
data: task,
callback: typeof callback === 'function' ? callback : null
};
if (pos) {
q.tasks.unshift(item);
} else {
q.tasks.push(item);
}
if (q.saturated && q.tasks.length === concurrency) {
q.saturated();
}
async.setImmediate(q.process);
});
}
var workers = 0;
var q = {
tasks: [],
concurrency: concurrency,
saturated: null,
empty: null,
drain: null,
push: function (data, callback) {
_insert(q, data, false, callback);
},
unshift: function (data, callback) {
_insert(q, data, true, callback);
},
process: function () {
if (workers < q.concurrency && q.tasks.length) {
var task = q.tasks.shift();
if (q.empty && q.tasks.length === 0) {
q.empty();
}
workers += 1;
var next = function () {
workers -= 1;
if (task.callback) {
task.callback.apply(task, arguments);
}
if (q.drain && q.tasks.length + workers === 0) {
q.drain();
}
q.process();
};
var cb = only_once(next);
worker(task.data, cb);
}
},
length: function () {
return q.tasks.length;
},
running: function () {
return workers;
}
};
return q;
};
async.cargo = function (worker, payload) {
var working = false,
tasks = [];
var cargo = {
tasks: tasks,
payload: payload,
saturated: null,
empty: null,
drain: null,
push: function (data, callback) {
if(data.constructor !== Array) {
data = [data];
}
_each(data, function(task) {
tasks.push({
data: task,
callback: typeof callback === 'function' ? callback : null
});
if (cargo.saturated && tasks.length === payload) {
cargo.saturated();
}
});
async.setImmediate(cargo.process);
},
process: function process() {
if (working) return;
if (tasks.length === 0) {
if(cargo.drain) cargo.drain();
return;
}
var ts = typeof payload === 'number'
? tasks.splice(0, payload)
: tasks.splice(0);
var ds = _map(ts, function (task) {
return task.data;
});
if(cargo.empty) cargo.empty();
working = true;
worker(ds, function () {
working = false;
var args = arguments;
_each(ts, function (data) {
if (data.callback) {
data.callback.apply(null, args);
}
});
process();
});
},
length: function () {
return tasks.length;
},
running: function () {
return working;
}
};
return cargo;
};
var _console_fn = function (name) {
return function (fn) {
var args = Array.prototype.slice.call(arguments, 1);
fn.apply(null, args.concat([function (err) {
var args = Array.prototype.slice.call(arguments, 1);
if (typeof console !== 'undefined') {
if (err) {
if (console.error) {
console.error(err);
}
}
else if (console[name]) {
_each(args, function (x) {
console[name](x);
});
}
}
}]));
};
};
async.log = _console_fn('log');
async.dir = _console_fn('dir');
/*async.info = _console_fn('info');
async.warn = _console_fn('warn');
async.error = _console_fn('error');*/
async.memoize = function (fn, hasher) {
var memo = {};
var queues = {};
hasher = hasher || function (x) {
return x;
};
var memoized = function () {
var args = Array.prototype.slice.call(arguments);
var callback = args.pop();
var key = hasher.apply(null, args);
if (key in memo) {
callback.apply(null, memo[key]);
}
else if (key in queues) {
queues[key].push(callback);
}
else {
queues[key] = [callback];
fn.apply(null, args.concat([function () {
memo[key] = arguments;
var q = queues[key];
delete queues[key];
for (var i = 0, l = q.length; i < l; i++) {
q[i].apply(null, arguments);
}
}]));
}
};
memoized.memo = memo;
memoized.unmemoized = fn;
return memoized;
};
async.unmemoize = function (fn) {
return function () {
return (fn.unmemoized || fn).apply(null, arguments);
};
};
async.times = function (count, iterator, callback) {
var counter = [];
for (var i = 0; i < count; i++) {
counter.push(i);
}
return async.map(counter, iterator, callback);
};
async.timesSeries = function (count, iterator, callback) {
var counter = [];
for (var i = 0; i < count; i++) {
counter.push(i);
}
return async.mapSeries(counter, iterator, callback);
};
async.compose = function (/* functions... */) {
var fns = Array.prototype.reverse.call(arguments);
return function () {
var that = this;
var args = Array.prototype.slice.call(arguments);
var callback = args.pop();
async.reduce(fns, args, function (newargs, fn, cb) {
fn.apply(that, newargs.concat([function () {
var err = arguments[0];
var nextargs = Array.prototype.slice.call(arguments, 1);
cb(err, nextargs);
}]))
},
function (err, results) {
callback.apply(that, [err].concat(results));
});
};
};
var _applyEach = function (eachfn, fns /*args...*/) {
var go = function () {
var that = this;
var args = Array.prototype.slice.call(arguments);
var callback = args.pop();
return eachfn(fns, function (fn, cb) {
fn.apply(that, args.concat([cb]));
},
callback);
};
if (arguments.length > 2) {
var args = Array.prototype.slice.call(arguments, 2);
return go.apply(this, args);
}
else {
return go;
}
};
async.applyEach = doParallel(_applyEach);
async.applyEachSeries = doSeries(_applyEach);
async.forever = function (fn, callback) {
function next(err) {
if (err) {
if (callback) {
return callback(err);
}
throw err;
}
fn(next);
}
next();
};
// AMD / RequireJS
if (typeof define !== 'undefined' && define.amd) {
define([], function () {
return async;
});
}
// Node.js
else if (typeof module !== 'undefined' && module.exports) {
module.exports = async;
}
// included directly via <script> tag
else {
root.async = async;
}
}());

2
couchpotato/static/scripts/page/about.js

@ -106,7 +106,7 @@ var AboutSettingTab = new Class({
new Element('div.donate', { new Element('div.donate', {
'html': 'html':
'Or support me via:' + 'Or support me via:' +
'<iframe src="http://couchpota.to/donate.html" style="border:none; height: 200px;" scrolling="no"></iframe>' '<iframe src="https://couchpota.to/donate.html" style="border:none; height: 200px;" scrolling="no"></iframe>'
}) })
); );

15
couchpotato/static/scripts/page/home.js

@ -52,11 +52,24 @@ Page.Home = new Class({
}) })
), ),
'filter': { 'filter': {
'release_status': 'snatched,available' 'release_status': 'snatched,seeding,missing,available,downloaded'
}, },
'limit': null, 'limit': null,
'onLoaded': function(){ 'onLoaded': function(){
self.chain.callChain(); self.chain.callChain();
},
'onMovieAdded': function(notification){
// Track movie added
var after_search = function(data){
if(notification.data.id != data.data.id) return;
// Force update after search
self.available_list.update();
App.off('movie.searcher.ended', after_search);
}
App.on('movie.searcher.ended', after_search);
} }
}); });

19
couchpotato/static/scripts/page/manage.js

@ -102,6 +102,8 @@ Page.Manage = new Class({
} }
} }
else { else {
// Capture progress so we can use it in our *each* closure
var progress = json.progress
// Don't add loader when page is loading still // Don't add loader when page is loading still
if(!self.list.navigation) if(!self.list.navigation)
@ -112,10 +114,13 @@ Page.Manage = new Class({
self.progress_container.empty(); self.progress_container.empty();
Object.each(json.progress, function(progress, folder){ var sorted_table = self.parseProgress(json.progress)
sorted_table.each(function(folder){
var folder_progress = progress[folder]
new Element('div').adopt( new Element('div').adopt(
new Element('span.folder', {'text': folder}), new Element('span.folder', {'text': folder}),
new Element('span.percentage', {'text': progress.total ? (((progress.total-progress.to_go)/progress.total)*100).round() + '%' : '0%'}) new Element('span.percentage', {'text': folder_progress.total ? (((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100).round() + '%' : '0%'})
).inject(self.progress_container) ).inject(self.progress_container)
}); });
@ -124,7 +129,17 @@ Page.Manage = new Class({
}) })
}, 1000); }, 1000);
},
parseProgress: function (progress_object) {
var folder, temp_array = [];
for (folder in progress_object) {
if (progress_object.hasOwnProperty(folder)) {
temp_array.push(folder)
}
}
return temp_array.stableSort()
} }
}); });

14
couchpotato/static/scripts/page/settings.js

@ -111,6 +111,10 @@ Page.Settings = new Class({
Cookie.write('advanced_toggle_checked', +self.advanced_toggle.checked, {'duration': 365}); Cookie.write('advanced_toggle_checked', +self.advanced_toggle.checked, {'duration': 365});
}, },
sortByOrder: function(a, b){
return (a.order || 100) - (b.order || 100)
},
create: function(json){ create: function(json){
var self = this; var self = this;
@ -141,13 +145,11 @@ Page.Settings = new Class({
options.include(section); options.include(section);
}); });
options.sort(function(a, b){ options.stableSort(self.sortByOrder).each(function(section){
return (a.order || 100) - (b.order || 100)
}).each(function(section){
var section_name = section.section_name; var section_name = section.section_name;
// Add groups to content // Add groups to content
section.groups.sortBy('order').each(function(group){ section.groups.stableSort(self.sortByOrder).each(function(group){
if(group.hidden) return; if(group.hidden) return;
if(self.wizard_only && !group.wizard) if(self.wizard_only && !group.wizard)
@ -184,9 +186,7 @@ Page.Settings = new Class({
} }
// Add options to group // Add options to group
group.options.sort(function(a, b){ group.options.stableSort(self.sortByOrder).each(function(option){
return (a.order || 100) - (b.order || 100)
}).each(function(option){
if(option.hidden) return; if(option.hidden) return;
var class_name = (option.type || 'string').capitalize(); var class_name = (option.type || 'string').capitalize();
var input = new Option[class_name](section_name, option.name, self.getValue(section_name, option.name), option); var input = new Option[class_name](section_name, option.name, self.getValue(section_name, option.name), option);

2
couchpotato/templates/index.html

@ -4,6 +4,8 @@
<head> <head>
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"/> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"/>
<meta name="apple-mobile-web-app-capable" content="yes"> <meta name="apple-mobile-web-app-capable" content="yes">
<meta name="mobile-web-app-capable" content="yes">
{% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %} {% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %}
<link rel="stylesheet" href="{{ Env.get('web_base') }}{{ url }}" type="text/css">{% end %} <link rel="stylesheet" href="{{ Env.get('web_base') }}{{ url }}" type="text/css">{% end %}

2
init/fedora

@ -1,3 +1,5 @@
#!/bin/sh
#
### BEGIN INIT INFO ### BEGIN INIT INFO
# Provides: CouchPotato application instance # Provides: CouchPotato application instance
# Required-Start: $all # Required-Start: $all

2
init/ubuntu

@ -20,6 +20,8 @@ else
echo "/etc/default/couchpotato not found using default settings."; echo "/etc/default/couchpotato not found using default settings.";
fi fi
. /lib/lsb/init-functions
# Script name # Script name
NAME=couchpotato NAME=couchpotato

4
libs/apscheduler/__init__.py

@ -1,3 +1,3 @@
version_info = (2, 0, 2) version_info = (2, 1, 1)
version = '.'.join(str(n) for n in version_info[:3]) version = '.'.join(str(n) for n in version_info[:3])
release = version + ''.join(str(n) for n in version_info[3:]) release = '.'.join(str(n) for n in version_info)

31
libs/apscheduler/job.py

@ -16,22 +16,25 @@ class MaxInstancesReachedError(Exception):
class Job(object): class Job(object):
""" """
Encapsulates the actual Job along with its metadata. Job instances Encapsulates the actual Job along with its metadata. Job instances
are created by the scheduler when adding jobs, and it should not be are created by the scheduler when adding jobs, and should not be
directly instantiated. directly instantiated. These options can be set when adding jobs
to the scheduler (see :ref:`job_options`).
:param trigger: trigger that determines the execution times
:param func: callable to call when the trigger is triggered :var trigger: trigger that determines the execution times
:param args: list of positional arguments to call func with :var func: callable to call when the trigger is triggered
:param kwargs: dict of keyword arguments to call func with :var args: list of positional arguments to call func with
:param name: name of the job (optional) :var kwargs: dict of keyword arguments to call func with
:param misfire_grace_time: seconds after the designated run time that :var name: name of the job
:var misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run the job is still allowed to be run
:param coalesce: run once instead of many times if the scheduler determines :var coalesce: run once instead of many times if the scheduler determines
that the job should be run more than once in succession that the job should be run more than once in succession
:param max_runs: maximum number of times this job is allowed to be :var max_runs: maximum number of times this job is allowed to be
triggered triggered
:param max_instances: maximum number of concurrently running :var max_instances: maximum number of concurrently running
instances allowed for this job instances allowed for this job
:var runs: number of times this job has been triggered
:var instances: number of concurrently running instances of this job
""" """
id = None id = None
next_run_time = None next_run_time = None
@ -130,5 +133,5 @@ class Job(object):
return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger)) return '<Job (name=%s, trigger=%s)>' % (self.name, repr(self.trigger))
def __str__(self): def __str__(self):
return '%s (trigger: %s, next run at: %s)' % (self.name, return '%s (trigger: %s, next run at: %s)' % (
str(self.trigger), str(self.next_run_time)) self.name, str(self.trigger), str(self.next_run_time))

2
libs/apscheduler/jobstores/ram_store.py

@ -8,7 +8,7 @@ from apscheduler.jobstores.base import JobStore
class RAMJobStore(JobStore): class RAMJobStore(JobStore):
def __init__(self): def __init__(self):
self.jobs = [] self.jobs = []
def add_job(self, job): def add_job(self, job):
self.jobs.append(job) self.jobs.append(job)

91
libs/apscheduler/jobstores/redis_store.py

@ -0,0 +1,91 @@
"""
Stores jobs in a Redis database.
"""
from uuid import uuid4
from datetime import datetime
import logging
from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job
try:
import cPickle as pickle
except ImportError: # pragma: nocover
import pickle
try:
from redis import StrictRedis
except ImportError: # pragma: nocover
raise ImportError('RedisJobStore requires redis installed')
try:
long = long
except NameError:
long = int
logger = logging.getLogger(__name__)
class RedisJobStore(JobStore):
def __init__(self, db=0, key_prefix='jobs.',
pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
self.jobs = []
self.pickle_protocol = pickle_protocol
self.key_prefix = key_prefix
if db is None:
raise ValueError('The "db" parameter must not be empty')
if not key_prefix:
raise ValueError('The "key_prefix" parameter must not be empty')
self.redis = StrictRedis(db=db, **connect_args)
def add_job(self, job):
job.id = str(uuid4())
job_state = job.__getstate__()
job_dict = {
'job_state': pickle.dumps(job_state, self.pickle_protocol),
'runs': '0',
'next_run_time': job_state.pop('next_run_time').isoformat()}
self.redis.hmset(self.key_prefix + job.id, job_dict)
self.jobs.append(job)
def remove_job(self, job):
self.redis.delete(self.key_prefix + job.id)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
keys = self.redis.keys(self.key_prefix + '*')
pipeline = self.redis.pipeline()
for key in keys:
pipeline.hgetall(key)
results = pipeline.execute()
for job_dict in results:
job_state = {}
try:
job = Job.__new__(Job)
job_state = pickle.loads(job_dict['job_state'.encode()])
job_state['runs'] = long(job_dict['runs'.encode()])
dateval = job_dict['next_run_time'.encode()].decode()
job_state['next_run_time'] = datetime.strptime(
dateval, '%Y-%m-%dT%H:%M:%S')
job.__setstate__(job_state)
jobs.append(job)
except Exception:
job_name = job_state.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
attrs = {
'next_run_time': job.next_run_time.isoformat(),
'runs': job.runs}
self.redis.hmset(self.key_prefix + job.id, attrs)
def close(self):
self.redis.connection_pool.disconnect()
def __repr__(self):
return '<%s>' % self.__class__.__name__

5
libs/apscheduler/jobstores/shelve_store.py

@ -32,17 +32,20 @@ class ShelveJobStore(JobStore):
def add_job(self, job): def add_job(self, job):
job.id = self._generate_id() job.id = self._generate_id()
self.jobs.append(job)
self.store[job.id] = job.__getstate__() self.store[job.id] = job.__getstate__()
self.store.sync()
self.jobs.append(job)
def update_job(self, job): def update_job(self, job):
job_dict = self.store[job.id] job_dict = self.store[job.id]
job_dict['next_run_time'] = job.next_run_time job_dict['next_run_time'] = job.next_run_time
job_dict['runs'] = job.runs job_dict['runs'] = job.runs
self.store[job.id] = job_dict self.store[job.id] = job_dict
self.store.sync()
def remove_job(self, job): def remove_job(self, job):
del self.store[job.id] del self.store[job.id]
self.store.sync()
self.jobs.remove(job) self.jobs.remove(job)
def load_jobs(self): def load_jobs(self):

18
libs/apscheduler/jobstores/sqlalchemy_store.py

@ -4,6 +4,8 @@ Stores jobs in a database table using SQLAlchemy.
import pickle import pickle
import logging import logging
import sqlalchemy
from apscheduler.jobstores.base import JobStore from apscheduler.jobstores.base import JobStore
from apscheduler.job import Job from apscheduler.job import Job
@ -28,17 +30,19 @@ class SQLAlchemyJobStore(JobStore):
else: else:
raise ValueError('Need either "engine" or "url" defined') raise ValueError('Need either "engine" or "url" defined')
self.jobs_t = Table(tablename, metadata or MetaData(), if sqlalchemy.__version__ < '0.7':
pickle_coltype = PickleType(pickle_protocol, mutable=False)
else:
pickle_coltype = PickleType(pickle_protocol)
self.jobs_t = Table(
tablename, metadata or MetaData(),
Column('id', Integer, Column('id', Integer,
Sequence(tablename + '_id_seq', optional=True), Sequence(tablename + '_id_seq', optional=True),
primary_key=True), primary_key=True),
Column('trigger', PickleType(pickle_protocol, mutable=False), Column('trigger', pickle_coltype, nullable=False),
nullable=False),
Column('func_ref', String(1024), nullable=False), Column('func_ref', String(1024), nullable=False),
Column('args', PickleType(pickle_protocol, mutable=False), Column('args', pickle_coltype, nullable=False),
nullable=False), Column('kwargs', pickle_coltype, nullable=False),
Column('kwargs', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('name', Unicode(1024)), Column('name', Unicode(1024)),
Column('misfire_grace_time', Integer, nullable=False), Column('misfire_grace_time', Integer, nullable=False),
Column('coalesce', Boolean, nullable=False), Column('coalesce', Boolean, nullable=False),

70
libs/apscheduler/scheduler.py

@ -35,7 +35,7 @@ class Scheduler(object):
their execution. their execution.
""" """
_stopped = False _stopped = True
_thread = None _thread = None
def __init__(self, gconfig={}, **options): def __init__(self, gconfig={}, **options):
@ -60,6 +60,7 @@ class Scheduler(object):
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1)) self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
self.coalesce = asbool(config.pop('coalesce', True)) self.coalesce = asbool(config.pop('coalesce', True))
self.daemonic = asbool(config.pop('daemonic', True)) self.daemonic = asbool(config.pop('daemonic', True))
self.standalone = asbool(config.pop('standalone', False))
# Configure the thread pool # Configure the thread pool
if 'threadpool' in config: if 'threadpool' in config:
@ -85,6 +86,12 @@ class Scheduler(object):
def start(self): def start(self):
""" """
Starts the scheduler in a new thread. Starts the scheduler in a new thread.
In threaded mode (the default), this method will return immediately
after starting the scheduler thread.
In standalone mode, this method will block until there are no more
scheduled jobs.
""" """
if self.running: if self.running:
raise SchedulerAlreadyRunningError raise SchedulerAlreadyRunningError
@ -99,11 +106,15 @@ class Scheduler(object):
del self._pending_jobs[:] del self._pending_jobs[:]
self._stopped = False self._stopped = False
self._thread = Thread(target=self._main_loop, name='APScheduler') if self.standalone:
self._thread.setDaemon(self.daemonic) self._main_loop()
self._thread.start() else:
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.setDaemon(self.daemonic)
self._thread.start()
def shutdown(self, wait=True, shutdown_threadpool=True): def shutdown(self, wait=True, shutdown_threadpool=True,
close_jobstores=True):
""" """
Shuts down the scheduler and terminates the thread. Shuts down the scheduler and terminates the thread.
Does not interrupt any currently running jobs. Does not interrupt any currently running jobs.
@ -111,6 +122,7 @@ class Scheduler(object):
:param wait: ``True`` to wait until all currently executing jobs have :param wait: ``True`` to wait until all currently executing jobs have
finished (if ``shutdown_threadpool`` is also ``True``) finished (if ``shutdown_threadpool`` is also ``True``)
:param shutdown_threadpool: ``True`` to shut down the thread pool :param shutdown_threadpool: ``True`` to shut down the thread pool
:param close_jobstores: ``True`` to close all job stores after shutdown
""" """
if not self.running: if not self.running:
return return
@ -123,11 +135,19 @@ class Scheduler(object):
self._threadpool.shutdown(wait) self._threadpool.shutdown(wait)
# Wait until the scheduler thread terminates # Wait until the scheduler thread terminates
self._thread.join() if self._thread:
self._thread.join()
# Close all job stores
if close_jobstores:
for jobstore in itervalues(self._jobstores):
jobstore.close()
@property @property
def running(self): def running(self):
return not self._stopped and self._thread and self._thread.isAlive() thread_alive = self._thread and self._thread.isAlive()
standalone = getattr(self, 'standalone', False)
return not self._stopped and (standalone or thread_alive)
def add_jobstore(self, jobstore, alias, quiet=False): def add_jobstore(self, jobstore, alias, quiet=False):
""" """
@ -156,21 +176,25 @@ class Scheduler(object):
if not quiet: if not quiet:
self._wakeup.set() self._wakeup.set()
def remove_jobstore(self, alias): def remove_jobstore(self, alias, close=True):
""" """
Removes the job store by the given alias from this scheduler. Removes the job store by the given alias from this scheduler.
:param close: ``True`` to close the job store after removing it
:type alias: str :type alias: str
""" """
self._jobstores_lock.acquire() self._jobstores_lock.acquire()
try: try:
try: jobstore = self._jobstores.pop(alias)
del self._jobstores[alias] if not jobstore:
except KeyError:
raise KeyError('No such job store: %s' % alias) raise KeyError('No such job store: %s' % alias)
finally: finally:
self._jobstores_lock.release() self._jobstores_lock.release()
# Close the job store if requested
if close:
jobstore.close()
# Notify listeners that a job store has been removed # Notify listeners that a job store has been removed
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias)) self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias))
@ -245,8 +269,10 @@ class Scheduler(object):
**options): **options):
""" """
Adds the given job to the job list and notifies the scheduler thread. Adds the given job to the job list and notifies the scheduler thread.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param trigger: alias of the job store to store the job in :param trigger: trigger that determines when ``func`` is called
:param func: callable to run at the given time :param func: callable to run at the given time
:param args: list of positional arguments to call func with :param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with :param kwargs: dict of keyword arguments to call func with
@ -276,6 +302,8 @@ class Scheduler(object):
def add_date_job(self, func, date, args=None, kwargs=None, **options): def add_date_job(self, func, date, args=None, kwargs=None, **options):
""" """
Schedules a job to be completed on a specific date and time. Schedules a job to be completed on a specific date and time.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run at the given time :param func: callable to run at the given time
:param date: the date/time to run the job at :param date: the date/time to run the job at
@ -294,6 +322,8 @@ class Scheduler(object):
**options): **options):
""" """
Schedules a job to be completed on specified intervals. Schedules a job to be completed on specified intervals.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run :param func: callable to run
:param weeks: number of weeks to wait :param weeks: number of weeks to wait
@ -322,6 +352,8 @@ class Scheduler(object):
""" """
Schedules a job to be completed on times that match the given Schedules a job to be completed on times that match the given
expressions. expressions.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run :param func: callable to run
:param year: year to run on :param year: year to run on
@ -352,6 +384,8 @@ class Scheduler(object):
This decorator does not wrap its host function. This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job`` Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`. attribute of the scheduled function to :meth:`unschedule_job`.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
""" """
def inner(func): def inner(func):
func.job = self.add_cron_job(func, **options) func.job = self.add_cron_job(func, **options)
@ -364,6 +398,8 @@ class Scheduler(object):
This decorator does not wrap its host function. This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job`` Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`. attribute of the scheduled function to :meth:`unschedule_job`.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
""" """
def inner(func): def inner(func):
func.job = self.add_interval_job(func, **options) func.job = self.add_interval_job(func, **options)
@ -517,7 +553,8 @@ class Scheduler(object):
job.runs += len(run_times) job.runs += len(run_times)
# Update the job, but don't keep finished jobs around # Update the job, but don't keep finished jobs around
if job.compute_next_run_time(now + timedelta(microseconds=1)): if job.compute_next_run_time(
now + timedelta(microseconds=1)):
jobstore.update_job(job) jobstore.update_job(job)
else: else:
self._remove_job(job, alias, jobstore) self._remove_job(job, alias, jobstore)
@ -550,10 +587,15 @@ class Scheduler(object):
logger.debug('Next wakeup is due at %s (in %f seconds)', logger.debug('Next wakeup is due at %s (in %f seconds)',
next_wakeup_time, wait_seconds) next_wakeup_time, wait_seconds)
self._wakeup.wait(wait_seconds) self._wakeup.wait(wait_seconds)
self._wakeup.clear()
elif self.standalone:
logger.debug('No jobs left; shutting down scheduler')
self.shutdown()
break
else: else:
logger.debug('No jobs; waiting until a job is added') logger.debug('No jobs; waiting until a job is added')
self._wakeup.wait() self._wakeup.wait()
self._wakeup.clear() self._wakeup.clear()
logger.info('Scheduler has been shut down') logger.info('Scheduler has been shut down')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))

16
libs/apscheduler/triggers/cron/__init__.py

@ -21,8 +21,10 @@ class CronTrigger(object):
if self.start_date: if self.start_date:
self.start_date = convert_to_datetime(self.start_date) self.start_date = convert_to_datetime(self.start_date)
# Yank out all None valued fields # Check field names and yank out all None valued fields
for key, value in list(iteritems(values)): for key, value in list(iteritems(values)):
if key not in self.FIELD_NAMES:
raise TypeError('Invalid field name: %s' % key)
if value is None: if value is None:
del values[key] del values[key]
@ -111,17 +113,17 @@ class CronTrigger(object):
if next_value is None: if next_value is None:
# No valid value was found # No valid value was found
next_date, fieldnum = self._increment_field_value(next_date, next_date, fieldnum = self._increment_field_value(
fieldnum - 1) next_date, fieldnum - 1)
elif next_value > curr_value: elif next_value > curr_value:
# A valid, but higher than the starting value, was found # A valid, but higher than the starting value, was found
if field.REAL: if field.REAL:
next_date = self._set_field_value(next_date, fieldnum, next_date = self._set_field_value(
next_value) next_date, fieldnum, next_value)
fieldnum += 1 fieldnum += 1
else: else:
next_date, fieldnum = self._increment_field_value(next_date, next_date, fieldnum = self._increment_field_value(
fieldnum) next_date, fieldnum)
else: else:
# A valid value was found, no changes necessary # A valid value was found, no changes necessary
fieldnum += 1 fieldnum += 1

18
libs/apscheduler/triggers/cron/expressions.py

@ -8,7 +8,7 @@ import re
from apscheduler.util import asint from apscheduler.util import asint
__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', __all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
'WeekdayPositionExpression') 'WeekdayPositionExpression', 'LastDayOfMonthExpression')
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
@ -176,3 +176,19 @@ class WeekdayPositionExpression(AllExpression):
return "%s('%s', '%s')" % (self.__class__.__name__, return "%s('%s', '%s')" % (self.__class__.__name__,
self.options[self.option_num], self.options[self.option_num],
WEEKDAYS[self.weekday]) WEEKDAYS[self.weekday])
class LastDayOfMonthExpression(AllExpression):
value_re = re.compile(r'last', re.IGNORECASE)
def __init__(self):
pass
def get_next_value(self, date, field):
return monthrange(date.year, date.month)[1]
def __str__(self):
return 'last'
def __repr__(self):
return "%s()" % self.__class__.__name__

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save