Browse Source

Merge branch 'refs/heads/develop'

pull/2776/head
Ruud 11 years ago
parent
commit
47d2b81d1c
  1. 28
      contributing.md
  2. 4
      couchpotato/core/_base/_core/main.py
  3. 14
      couchpotato/core/_base/scheduler/main.py
  4. 13
      couchpotato/core/_base/updater/main.py
  5. 21
      couchpotato/core/downloaders/base.py
  6. 11
      couchpotato/core/downloaders/blackhole/main.py
  7. 34
      couchpotato/core/downloaders/deluge/main.py
  8. 7
      couchpotato/core/downloaders/nzbget/__init__.py
  9. 96
      couchpotato/core/downloaders/nzbget/main.py
  10. 10
      couchpotato/core/downloaders/nzbvortex/__init__.py
  11. 56
      couchpotato/core/downloaders/nzbvortex/main.py
  12. 13
      couchpotato/core/downloaders/pneumatic/main.py
  13. 29
      couchpotato/core/downloaders/rtorrent/__init__.py
  14. 83
      couchpotato/core/downloaders/rtorrent/main.py
  15. 7
      couchpotato/core/downloaders/sabnzbd/__init__.py
  16. 71
      couchpotato/core/downloaders/sabnzbd/main.py
  17. 11
      couchpotato/core/downloaders/synology/main.py
  18. 73
      couchpotato/core/downloaders/transmission/main.py
  19. 142
      couchpotato/core/downloaders/utorrent/main.py
  20. 14
      couchpotato/core/helpers/encoding.py
  21. 29
      couchpotato/core/helpers/variable.py
  22. 10
      couchpotato/core/media/__init__.py
  23. 454
      couchpotato/core/media/_base/media/main.py
  24. 383
      couchpotato/core/media/movie/_base/main.py
  25. 7
      couchpotato/core/media/movie/_base/static/list.js
  26. 6
      couchpotato/core/media/movie/_base/static/movie.actions.js
  27. 8
      couchpotato/core/media/movie/_base/static/movie.js
  28. 14
      couchpotato/core/media/movie/searcher/main.py
  29. 4
      couchpotato/core/notifications/boxcar/main.py
  30. 2
      couchpotato/core/notifications/prowl/main.py
  31. 2
      couchpotato/core/notifications/pushalot/main.py
  32. 4
      couchpotato/core/notifications/pushbullet/main.py
  33. 2
      couchpotato/core/notifications/trakt/main.py
  34. 8
      couchpotato/core/notifications/xbmc/__init__.py
  35. 30
      couchpotato/core/notifications/xbmc/main.py
  36. 2
      couchpotato/core/plugins/automation/__init__.py
  37. 4
      couchpotato/core/plugins/automation/main.py
  38. 111
      couchpotato/core/plugins/base.py
  39. 2
      couchpotato/core/plugins/file/main.py
  40. 14
      couchpotato/core/plugins/manage/main.py
  41. 1
      couchpotato/core/plugins/quality/main.py
  42. 199
      couchpotato/core/plugins/release/main.py
  43. 8
      couchpotato/core/plugins/renamer/__init__.py
  44. 448
      couchpotato/core/plugins/renamer/main.py
  45. 8
      couchpotato/core/plugins/scanner/main.py
  46. 4
      couchpotato/core/plugins/score/main.py
  47. 2
      couchpotato/core/plugins/subtitle/__init__.py
  48. 2
      couchpotato/core/plugins/userscript/main.py
  49. 2
      couchpotato/core/plugins/userscript/template.js
  50. 2
      couchpotato/core/providers/automation/imdb/__init__.py
  51. 45
      couchpotato/core/providers/base.py
  52. 11
      couchpotato/core/providers/info/_modifier/main.py
  53. 4
      couchpotato/core/providers/info/couchpotatoapi/main.py
  54. 4
      couchpotato/core/providers/info/omdbapi/main.py
  55. 54
      couchpotato/core/providers/info/themoviedb/main.py
  56. 63
      couchpotato/core/providers/metadata/xbmc/main.py
  57. 4
      couchpotato/core/providers/nzb/binsearch/main.py
  58. 9
      couchpotato/core/providers/nzb/newznab/__init__.py
  59. 24
      couchpotato/core/providers/nzb/newznab/main.py
  60. 6
      couchpotato/core/providers/torrent/bithdtv/main.py
  61. 9
      couchpotato/core/providers/torrent/bitsoup/main.py
  62. 11
      couchpotato/core/providers/torrent/hdbits/main.py
  63. 8
      couchpotato/core/providers/torrent/ilovetorrents/main.py
  64. 37
      couchpotato/core/providers/torrent/iptorrents/main.py
  65. 6
      couchpotato/core/providers/torrent/passthepopcorn/main.py
  66. 6
      couchpotato/core/providers/torrent/sceneaccess/main.py
  67. 6
      couchpotato/core/providers/torrent/torrentbytes/main.py
  68. 11
      couchpotato/core/providers/torrent/torrentday/main.py
  69. 6
      couchpotato/core/providers/torrent/torrentleech/main.py
  70. 6
      couchpotato/core/providers/torrent/torrentshack/main.py
  71. 6
      couchpotato/core/providers/torrent/yify/__init__.py
  72. 27
      couchpotato/core/providers/torrent/yify/main.py
  73. 2
      couchpotato/core/providers/trailer/hdtrailers/main.py
  74. 4
      couchpotato/core/providers/userscript/base.py
  75. 2
      couchpotato/core/providers/userscript/imdb/main.py
  76. 2
      couchpotato/core/providers/userscript/tmdb/main.py
  77. 4
      couchpotato/core/settings/__init__.py
  78. 1
      couchpotato/environment.py
  79. 9
      couchpotato/runner.py
  80. 265
      couchpotato/static/scripts/library/mootools_more.js
  81. 2
      couchpotato/static/scripts/page/home.js
  82. 62
      couchpotato/static/scripts/page/settings.js
  83. 55
      couchpotato/static/scripts/page/wanted.js
  84. 154
      couchpotato/static/style/settings.css
  85. 8
      libs/requests/__init__.py
  86. 134
      libs/requests/adapters.py
  87. 65
      libs/requests/auth.py
  88. 8212
      libs/requests/cacert.pem
  89. 6
      libs/requests/compat.py
  90. 74
      libs/requests/cookies.py
  91. 10
      libs/requests/exceptions.py
  92. 201
      libs/requests/models.py
  93. 34
      libs/requests/packages/charade/__init__.py
  94. 7
      libs/requests/packages/charade/__main__.py
  95. 2
      libs/requests/packages/charade/jpcntx.py
  96. 2
      libs/requests/packages/charade/latin1prober.py
  97. 12
      libs/requests/packages/charade/universaldetector.py
  98. 2
      libs/requests/packages/urllib3/__init__.py
  99. 25
      libs/requests/packages/urllib3/_collections.py
  100. 107
      libs/requests/packages/urllib3/connection.py

28
contributing.md

@ -1,15 +1,25 @@
#So you feel like posting a bug, sending me a pull request or just telling me how awesome I am. No problem!
## Got a issue/feature request or submitting a pull request?
##Just make sure you think of the following things:
Make sure you think of the following things:
* Search through the existing (and closed) issues first. See if you can get your answer there.
## Issue
* Search through the existing (and closed) issues first, see if you can get your answer there.
* Double check the result manually, because it could be an external issue.
* Post logs! Without seeing what is going on, I can't reproduce the error.
* What is the movie + quality you are searching for.
* What are you settings for the specific problem.
* What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby).
* Give me a short step by step of how to reproduce.
* Also check the logs before submitting, obvious errors like permission or http errors are often not related to CP.
* What is the movie + quality you are searching for?
* What are you're settings for the specific problem?
* What providers are you using? (While you're logs include these, scanning through hundred of lines of log isn't our hobby)
* Post the logs from config directory, please do not copy paste the UI. Use pastebin to store these logs!
* Give a short step by step of how to reproduce the error.
* What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example.
* I will mark issues with the "can't reproduce" tag. Don't go asking me "why closed" if it clearly says the issue in the tag ;)
* I will mark issues with the "can't reproduce" tag. Don't go asking "why closed" if it clearly says the issue in the tag ;)
* If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are setup to use our source repo (RuudBurger/CouchPotatoServer) and nothing else!!
**If I don't get enough info, the chance of the issue getting closed is a lot bigger ;)**
## Pull Request
* Make sure you're pull request is made for develop branch (or relevant feature branch)
* Have you tested your PR? If not, why?
* Are there any limitations of your PR we should know of?
* Make sure to keep you're PR up-to-date with the branch you're trying to push into.
**If we don't get enough info, the chance of the issue getting closed is a lot bigger ;)**

4
couchpotato/core/_base/_core/main.py

@ -55,6 +55,10 @@ class Core(Plugin):
if not Env.get('desktop'):
self.signalHandler()
# Set default urlopen timeout
import socket
socket.setdefaulttimeout(30)
def md5Password(self, value):
return md5(value) if value else ''

14
couchpotato/core/_base/scheduler/main.py

@ -17,6 +17,7 @@ class Scheduler(Plugin):
addEvent('schedule.cron', self.cron)
addEvent('schedule.interval', self.interval)
addEvent('schedule.remove', self.remove)
addEvent('schedule.queue', self.queue)
self.sched = Sched(misfire_grace_time = 60)
self.sched.start()
@ -37,7 +38,7 @@ class Scheduler(Plugin):
def stop(self):
if self.started:
log.debug('Stopping scheduler')
self.sched.shutdown()
self.sched.shutdown(wait = False)
log.debug('Scheduler stopped')
self.started = False
@ -64,3 +65,14 @@ class Scheduler(Plugin):
'seconds': seconds,
'job': self.sched.add_interval_job(handle, hours = hours, minutes = minutes, seconds = seconds)
}
def queue(self, handlers = None):
if not handlers: handlers = []
for h in handlers:
h()
if self.shuttingDown():
break
return True

13
couchpotato/core/_base/updater/main.py

@ -32,6 +32,7 @@ class Updater(Plugin):
else:
self.updater = SourceUpdater()
addEvent('app.load', self.logVersion, priority = 10000)
addEvent('app.load', self.setCrons)
addEvent('updater.info', self.info)
@ -53,6 +54,10 @@ class Updater(Plugin):
addEvent('setting.save.updater.enabled.after', self.setCrons)
def logVersion(self):
info = self.info()
log.info('=== VERSION %s, using %s ===', (info.get('version', {}).get('repr', 'UNKNOWN'), self.updater.getName()))
def setCrons(self):
fireEvent('schedule.remove', 'updater.check', single = True)
@ -183,9 +188,6 @@ class GitUpdater(BaseUpdater):
def doUpdate(self):
try:
log.debug('Stashing local changes')
self.repo.saveStash()
log.info('Updating to latest version')
self.repo.pull()
@ -207,6 +209,7 @@ class GitUpdater(BaseUpdater):
output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash)
self.version = {
'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.branch, output.hash[:8], datetime.fromtimestamp(output.getDate())),
'hash': output.hash[:8],
'date': output.getDate(),
'type': 'git',
@ -234,7 +237,7 @@ class GitUpdater(BaseUpdater):
local = self.repo.getHead()
remote = branch.getHead()
log.info('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8]))
log.debug('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8]))
if local.getDate() < remote.getDate():
self.update_version = {
@ -362,6 +365,7 @@ class SourceUpdater(BaseUpdater):
log.debug('Source version output: %s', output)
self.version = output
self.version['type'] = 'source'
self.version['repr'] = 'source:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.branch, output.get('hash', '')[:8], datetime.fromtimestamp(output.get('date', 0)))
except Exception, e:
log.error('Failed using source updater. %s', e)
return {}
@ -449,6 +453,7 @@ class DesktopUpdater(BaseUpdater):
def getVersion(self):
return {
'repr': 'desktop: %s' % self.desktop._esky.active_version,
'hash': self.desktop._esky.active_version,
'date': None,
'type': 'desktop',

21
couchpotato/core/downloaders/base.py

@ -13,6 +13,7 @@ class Downloader(Provider):
protocol = []
http_time_between_calls = 0
status_support = True
torrent_sources = [
'http://torrage.com/torrent/%s.torrent',
@ -49,22 +50,27 @@ class Downloader(Provider):
return []
def _download(self, data = None, movie = None, manual = False, filedata = None):
if not movie: movie = {}
def _download(self, data = None, media = None, manual = False, filedata = None):
if not media: media = {}
if not data: data = {}
if self.isDisabled(manual, data):
return
return self.download(data = data, movie = movie, filedata = filedata)
return self.download(data = data, media = media, filedata = filedata)
def _getAllDownloadStatus(self):
def _getAllDownloadStatus(self, download_ids):
if self.isDisabled(manual = True, data = {}):
return
return self.getAllDownloadStatus()
ids = [download_id['id'] for download_id in download_ids if download_id['downloader'] == self.getName()]
def getAllDownloadStatus(self):
return
if ids:
return self.getAllDownloadStatus(ids)
else:
return
def getAllDownloadStatus(self, ids):
return []
def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}):
@ -128,6 +134,7 @@ class Downloader(Provider):
def downloadReturnId(self, download_id):
return {
'downloader': self.getName(),
'status_support': self.status_support,
'id': download_id
}

11
couchpotato/core/downloaders/blackhole/main.py

@ -11,9 +11,10 @@ log = CPLog(__name__)
class Blackhole(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
@ -33,7 +34,7 @@ class Blackhole(Downloader):
log.error('No nzb/torrent available: %s', data.get('url'))
return False
file_name = self.createFileName(data, filedata, movie)
file_name = self.createFileName(data, filedata, media)
full_path = os.path.join(directory, file_name)
if self.conf('create_subdir'):
@ -51,10 +52,10 @@ class Blackhole(Downloader):
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(full_path, Env.getPermission('file'))
return True
return self.downloadReturnId('')
else:
log.info('File %s already exists.', full_path)
return True
return self.downloadReturnId('')
except:
log.error('Failed to download to blackhole %s', traceback.format_exc())

34
couchpotato/core/downloaders/deluge/main.py

@ -2,7 +2,7 @@ from base64 import b64encode, b16encode, b32decode
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat
from couchpotato.core.helpers.variable import tryFloat, cleanHost
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
@ -22,7 +22,7 @@ class Deluge(Downloader):
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
@ -32,7 +32,10 @@ class Deluge(Downloader):
return self.drpc
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
@ -73,7 +76,7 @@ class Deluge(Downloader):
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, movie)
filename = self.createFileName(data, filedata, media)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
@ -83,25 +86,30 @@ class Deluge(Downloader):
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking Deluge download status.')
if not self.connect():
return False
return []
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents()
queue = self.drpc.get_alltorrents(ids)
if not queue:
log.debug('Nothing in queue or error')
return False
return []
for torrent_id in queue:
torrent = queue[torrent_id]
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
if not 'hash' in torrent:
# When given a list of ids, deluge will return an empty item for a non-existant torrent.
continue
log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
@ -117,11 +125,11 @@ class Deluge(Downloader):
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
@ -205,11 +213,11 @@ class DelugeRPC(object):
return torrent_id
def get_alltorrents(self):
def get_alltorrents(self, ids):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({}, {}).get()
ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files')).get()
except Exception, err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:

7
couchpotato/core/downloaders/nzbget/__init__.py

@ -26,6 +26,13 @@ config = [{
'description': 'Hostname with port. Usually <strong>localhost:6789</strong>',
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'username',
'default': 'nzbget',
'advanced': True,

96
couchpotato/core/downloaders/nzbget/main.py

@ -1,7 +1,7 @@
from base64 import standard_b64encode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import ss, sp
from couchpotato.core.helpers.variable import tryInt, md5
from couchpotato.core.helpers.variable import tryInt, md5, cleanHost
from couchpotato.core.logger import CPLog
from datetime import timedelta
import re
@ -17,10 +17,10 @@ class NZBGet(Downloader):
protocol = ['nzb']
url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc'
rpc = 'xmlrpc'
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
if not filedata:
@ -29,10 +29,11 @@ class NZBGet(Downloader):
log.info('Sending "%s" to NZBGet.', data.get('name'))
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))
nzb_name = ss('%s.nzb' % self.createNzbName(data, media))
url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
log.debug('Successfully connected to NZBGet')
@ -67,13 +68,13 @@ class NZBGet(Downloader):
log.error('NZBGet could not add %s to the queue.', nzb_name)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking NZBGet download status.')
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to check status'):
log.debug('Successfully connected to NZBGet')
@ -81,13 +82,13 @@ class NZBGet(Downloader):
log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False
return []
except xmlrpclib.ProtocolError, e:
if e.errcode == 401:
log.error('Password is incorrect.')
else:
log.error('Protocol Error: %s', e)
return False
return []
# Get NZBGet data
try:
@ -97,56 +98,59 @@ class NZBGet(Downloader):
history = rpc.history()
except:
log.error('Failed getting data: %s', traceback.format_exc(1))
return False
return []
release_downloads = ReleaseDownloadList(self)
for nzb in groups:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
try:
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = nzb['NZBID']
timeleft = -1
try:
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
if nzb_id in ids:
log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
timeleft = -1
try:
if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
except:
pass
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
# Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
'timeleft': timeleft,
})
for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
if nzb['NZBID'] in ids:
log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
release_downloads.append({
'id': nzb['NZBID'],
'name': nzb['NZBFilename'],
'original_status': nzb['Stage'],
'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
})
for nzb in history:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
try:
nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
except:
nzb_id = nzb['NZBID']
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})
if nzb_id in ids:
log.debug('Found %s in NZBGet history. ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
release_downloads.append({
'id': nzb_id,
'name': nzb['NZBFilename'],
'status': 'completed' if nzb['ParStatus'] in ['SUCCESS', 'NONE'] and nzb['ScriptStatus'] in ['SUCCESS', 'NONE'] else 'failed',
'original_status': nzb['ParStatus'] + ', ' + nzb['ScriptStatus'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(nzb['DestDir'])
})
return release_downloads
@ -154,9 +158,9 @@ class NZBGet(Downloader):
log.info('%s failed downloading, deleting...', release_download['name'])
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc
rpc = xmlrpclib.ServerProxy(url)
try:
if rpc.writelog('INFO', 'CouchPotato connected to delete some history'):
log.debug('Successfully connected to NZBGet')

10
couchpotato/core/downloaders/nzbvortex/__init__.py

@ -22,7 +22,15 @@ config = [{
},
{
'name': 'host',
'default': 'https://localhost:4321',
'default': 'localhost:4321',
'description': 'Hostname with port. Usually <strong>localhost:4321</strong>',
},
{
'name': 'ssl',
'default': 1,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'api_key',

56
couchpotato/core/downloaders/nzbvortex/main.py

@ -8,9 +8,11 @@ from uuid import uuid4
import hashlib
import httplib
import json
import os
import socket
import ssl
import sys
import time
import traceback
import urllib2
@ -23,44 +25,46 @@ class NZBVortex(Downloader):
api_level = None
session_id = None
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, movie)
self.call('nzb/add', params = {'file': (nzb_filename, filedata)}, multipart = True)
nzb_filename = self.createFileName(data, filedata, media)
self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
time.sleep(10)
raw_statuses = self.call('nzb')
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if nzb['name'] == nzb_filename][0]
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(item['nzbFileName']) == nzb_filename][0]
return self.downloadReturnId(nzb_id)
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
raw_statuses = self.call('nzb')
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft':-1,
'folder': sp(nzb['destinationPath']),
})
if nzb['id'] in ids:
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft':-1,
'folder': sp(nzb['destinationPath']),
})
return release_downloads
@ -112,11 +116,10 @@ class NZBVortex(Downloader):
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call
url_opener = urllib2.build_opener(HTTPSHandler())
url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api/' + call
try:
data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)
data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
if data:
return json.loads(data)
@ -138,10 +141,9 @@ class NZBVortex(Downloader):
if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
url_opener = urllib2.build_opener(HTTPSHandler())
try:
data = self.urlopen(url, opener = url_opener, show_error = False)
data = self.urlopen(url, show_error = False)
self.api_level = float(json.loads(data).get('apilevel'))
except URLError, e:
if hasattr(e, 'code') and e.code == 403:

13
couchpotato/core/downloaders/pneumatic/main.py

@ -11,9 +11,10 @@ class Pneumatic(Downloader):
protocol = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
status_support = False
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
directory = self.conf('directory')
@ -25,7 +26,7 @@ class Pneumatic(Downloader):
log.error('No nzb available!')
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
fullPath = os.path.join(directory, self.createFileName(data, filedata, media))
try:
if not os.path.isfile(fullPath):
@ -33,7 +34,7 @@ class Pneumatic(Downloader):
with open(fullPath, 'wb') as f:
f.write(filedata)
nzb_name = self.createNzbName(data, movie)
nzb_name = self.createNzbName(data, media)
strm_path = os.path.join(directory, nzb_name)
strm_file = open(strm_path + '.strm', 'wb')
@ -41,11 +42,11 @@ class Pneumatic(Downloader):
strm_file.write(strmContent)
strm_file.close()
return True
return self.downloadReturnId('')
else:
log.info('File %s already exists.', fullPath)
return True
return self.downloadReturnId('')
except:
log.error('Failed to download .strm: %s', traceback.format_exc())

29
couchpotato/core/downloaders/rtorrent/__init__.py

@ -20,11 +20,32 @@ config = [{
'type': 'enabler',
'radio_group': 'torrent',
},
# @RuudBurger: How do I migrate this?
# {
# 'name': 'url',
# 'default': 'http://localhost:80/RPC2',
# 'description': 'XML-RPC Endpoint URI. Usually <strong>scgi://localhost:5000</strong> '
# 'or <strong>http://localhost:80/RPC2</strong>'
# },
{
'name': 'url',
'default': 'http://localhost:80/RPC2',
'description': 'XML-RPC Endpoint URI. Usually <strong>scgi://localhost:5000</strong> '
'or <strong>http://localhost:80/RPC2</strong>'
'name': 'host',
'default': 'localhost:80',
'description': 'Hostname with port or XML-RPC Endpoint URI. Usually <strong>scgi://localhost:5000</strong> '
'or <strong>localhost:80</strong>'
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'rpc_url',
'type': 'string',
'default': 'RPC2',
'advanced': True,
'description': 'Change if you don\'t run rTorrent RPC at the default url.',
},
{
'name': 'username',

83
couchpotato/core/downloaders/rtorrent/main.py

@ -1,7 +1,9 @@
from base64 import b16encode, b32decode
from bencode import bencode, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
@ -17,24 +19,39 @@ class rTorrent(Downloader):
protocol = ['torrent', 'torrent_magnet']
rt = None
# Migration url to host options
def __init__(self):
super(rTorrent, self).__init__()
addEvent('app.load', self.migrate)
def migrate(self):
url = self.conf('url')
if url:
host_split = splitString(url.split('://')[-1], split_on = '/')
self.conf('ssl', value = url.startswith('https'))
self.conf('host', value = host_split[0].strip())
self.conf('rpc_url', value = '/'.join(host_split[1:]))
self.deleteConf('url')
def connect(self):
# Already connected?
if self.rt is not None:
return self.rt
# Ensure url is set
if not self.conf('url'):
log.error('Config properties are not filled in correctly, url is missing.')
return False
url = cleanHost(self.conf('host'), protocol = True, ssl = self.conf('ssl')) + '/' + self.conf('rpc_url').strip('/ ') + '/'
if self.conf('username') and self.conf('password'):
self.rt = RTorrent(
self.conf('url'),
url,
self.conf('username'),
self.conf('password')
)
else:
self.rt = RTorrent(self.conf('url'))
self.rt = RTorrent(url)
return self.rt
@ -77,7 +94,10 @@ class rTorrent(Downloader):
return True
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.debug('Sending "%s" to rTorrent.', (data.get('name')))
if not self.connect():
@ -140,11 +160,11 @@ class rTorrent(Downloader):
log.error('Failed to send torrent to rTorrent: %s', err)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking rTorrent download status.')
if not self.connect():
return False
return []
try:
torrents = self.rt.get_torrents()
@ -152,33 +172,34 @@ class rTorrent(Downloader):
release_downloads = ReleaseDownloadList(self)
for torrent in torrents:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
status = 'busy'
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'completed'
release_downloads.append({
'id': torrent.info_hash,
'name': torrent.name,
'status': status,
'seed_ratio': torrent.ratio,
'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
})
if torrent.info_hash in ids:
torrent_files = []
for file_item in torrent.get_files():
torrent_files.append(sp(os.path.join(torrent.directory, file_item.path)))
status = 'busy'
if torrent.complete:
if torrent.active:
status = 'seeding'
else:
status = 'completed'
release_downloads.append({
'id': torrent.info_hash,
'name': torrent.name,
'status': status,
'seed_ratio': torrent.ratio,
'original_status': torrent.state,
'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
'folder': sp(torrent.directory),
'files': '|'.join(torrent_files)
})
return release_downloads
except Exception, err:
log.error('Failed to get status from rTorrent: %s', err)
return False
return []
def pause(self, release_download, pause = True):
if not self.connect():

7
couchpotato/core/downloaders/sabnzbd/__init__.py

@ -25,6 +25,13 @@ config = [{
'default': 'localhost:8080',
},
{
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'api_key',
'label': 'Api Key',
'description': 'Used for all calls to Sabnzbd.',

71
couchpotato/core/downloaders/sabnzbd/main.py

@ -16,8 +16,8 @@ class Sabnzbd(Downloader):
protocol = ['nzb']
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" to SABnzbd.', data.get('name'))
@ -25,7 +25,7 @@ class Sabnzbd(Downloader):
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
'nzbname': self.createNzbName(data, media),
'priority': self.conf('priority'),
}
@ -36,14 +36,14 @@ class Sabnzbd(Downloader):
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
nzb_filename = self.createFileName(data, filedata, media)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if nzb_filename and req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
else:
sab_data = self.call(req_params)
except URLError:
@ -64,7 +64,7 @@ class Sabnzbd(Downloader):
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking SABnzbd download status.')
@ -75,7 +75,7 @@ class Sabnzbd(Downloader):
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
return []
# Go through history items
try:
@ -85,41 +85,42 @@ class Sabnzbd(Downloader):
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
return []
release_downloads = ReleaseDownloadList(self)
# Get busy releases
for nzb in queue.get('slots', []):
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
if nzb['nzo_id'] in ids:
status = 'busy'
if 'ENCRYPTED / ' in nzb['filename']:
status = 'failed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['filename'],
'status': status,
'original_status': nzb['status'],
'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for nzb in history.get('slots', []):
status = 'busy'
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif nzb['status'] == 'Completed':
status = 'completed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
if nzb['nzo_id'] in ids:
status = 'busy'
if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
status = 'failed'
elif nzb['status'] == 'Completed':
status = 'completed'
release_downloads.append({
'id': nzb['nzo_id'],
'name': nzb['name'],
'status': status,
'original_status': nzb['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
})
return release_downloads
@ -164,7 +165,7 @@ class Sabnzbd(Downloader):
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))

11
couchpotato/core/downloaders/synology/main.py

@ -1,5 +1,6 @@
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
import json
import requests
@ -11,17 +12,17 @@ log = CPLog(__name__)
class Synology(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet']
log = CPLog(__name__)
status_support = False
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
response = False
log.error('Sending "%s" (%s) to Synology.', (data['name'], data['protocol']))
# Load host from config and split out port.
host = self.conf('host').split(':')
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
@ -42,7 +43,7 @@ class Synology(Downloader):
except:
log.error('Exception while adding torrent: %s', traceback.format_exc())
finally:
return response
return self.downloadReturnId('') if response else False
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':

73
couchpotato/core/downloaders/transmission/main.py

@ -1,7 +1,7 @@
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost
from couchpotato.core.logger import CPLog
from datetime import timedelta
import httplib
@ -21,17 +21,19 @@ class Transmission(Downloader):
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.trpc:
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url'), username = self.conf('username'), password = self.conf('password'))
self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
return self.trpc
def download(self, data, movie, filedata = None):
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol')))
@ -81,12 +83,12 @@ class Transmission(Downloader):
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking Transmission download status.')
if not self.connect():
return False
return []
release_downloads = ReleaseDownloadList(self)
@ -94,37 +96,44 @@ class Transmission(Downloader):
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
}
session = self.trpc.get_session()
queue = self.trpc.get_alltorrents(return_params)
if not (queue and queue.get('torrents')):
log.debug('Nothing in queue or error')
return False
return []
for torrent in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / isFinished=%s',
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent['eta'], torrent['uploadRatio'], torrent['isFinished']))
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent['downloadDir'], file_item['name'])))
status = 'busy'
if torrent.get('isStalled') and self.conf('stalled_as_failed'):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent['downloadDir'] if len(torrent_files) == 1 else os.path.join(torrent['downloadDir'], torrent['name'])),
'files': '|'.join(torrent_files)
})
if torrent['hashString'] in ids:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s',
(torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir']))
status = 'busy'
if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'):
status = 'failed'
elif torrent['status'] == 0 and torrent['percentDone'] == 1:
status = 'completed'
elif torrent['status'] in [5, 6]:
status = 'seeding'
if session['incomplete-dir-enabled'] and status == 'busy':
torrent_folder = session['incomplete-dir']
else:
torrent_folder = torrent['downloadDir']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(torrent_folder, file_item['name'])))
release_downloads.append({
'id': torrent['hashString'],
'name': torrent['name'],
'status': status,
'original_status': torrent['status'],
'seed_ratio': torrent['uploadRatio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])),
'files': '|'.join(torrent_files)
})
return release_downloads

142
couchpotato/core/downloaders/utorrent/main.py

@ -2,7 +2,7 @@ from base64 import b16encode, b32decode
from bencode import bencode as benc, bdecode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, ss, sp
from couchpotato.core.helpers.variable import tryInt, tryFloat
from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost
from couchpotato.core.logger import CPLog
from datetime import timedelta
from hashlib import sha1
@ -24,10 +24,20 @@ class uTorrent(Downloader):
protocol = ['torrent', 'torrent_magnet']
utorrent_api = None
status_flags = {
'STARTED' : 1,
'CHECKING' : 2,
'CHECK-START' : 4,
'CHECKED' : 8,
'ERROR' : 16,
'PAUSED' : 32,
'QUEUED' : 64,
'LOADED' : 128
}
def connect(self):
# Load host from config and split out port.
host = self.conf('host').split(':')
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
@ -36,11 +46,11 @@ class uTorrent(Downloader):
return self.utorrent_api
def download(self, data = None, movie = None, filedata = None):
if not movie: movie = {}
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('protocol')))
log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol')))
if not self.connect():
return False
@ -75,10 +85,10 @@ class uTorrent(Downloader):
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
else:
info = bdecode(filedata)["info"]
info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, movie)
torrent_filename = self.createFileName(data, filedata, media)
if data.get('seed_ratio'):
torrent_params['seed_override'] = 1
@ -105,72 +115,62 @@ class uTorrent(Downloader):
return self.downloadReturnId(torrent_hash)
def getAllDownloadStatus(self):
def getAllDownloadStatus(self, ids):
log.debug('Checking uTorrent download status.')
if not self.connect():
return False
return []
release_downloads = ReleaseDownloadList(self)
data = self.utorrent_api.get_status()
if not data:
log.error('Error getting data from uTorrent')
return False
return []
queue = json.loads(data)
if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error'))
return False
return []
if not queue.get('torrents'):
log.debug('Nothing in queue')
return False
return []
# Get torrents
for torrent in queue['torrents']:
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status_flags = {
"STARTED" : 1,
"CHECKING" : 2,
"CHECK-START" : 4,
"CHECKED" : 8,
"ERROR" : 16,
"PAUSED" : 32,
"QUEUED" : 64,
"LOADED" : 128
}
status = 'busy'
if (torrent[1] & status_flags["STARTED"] or torrent[1] & status_flags["QUEUED"]) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & status_flags["ERROR"]):
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
if not status == 'busy':
self.removeReadOnly(torrent_files)
release_downloads.append({
'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
})
if torrent[0] in ids:
#Get files of the torrent
torrent_files = []
try:
torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
except:
log.debug('Failed getting files from torrent: %s', torrent[2])
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif (torrent[1] & self.status_flags['ERROR']):
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'
if not status == 'busy':
self.removeReadOnly(torrent_files)
release_downloads.append({
'id': torrent[0],
'name': torrent[2],
'status': status,
'seed_ratio': float(torrent[7]) / 1000,
'original_status': torrent[1],
'timeleft': str(timedelta(seconds = torrent[10])),
'folder': sp(torrent[26]),
'files': '|'.join(torrent_files)
})
return release_downloads
@ -223,7 +223,7 @@ class uTorrentAPI(object):
if time.time() > self.last_time + 1800:
self.last_time = time.time()
self.token = self.get_token()
request = urllib2.Request(self.url + "?token=" + self.token + "&" + action, data)
request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data)
try:
open_request = self.opener.open(request)
response = open_request.read()
@ -243,52 +243,52 @@ class uTorrentAPI(object):
return False
def get_token(self):
request = self.opener.open(self.url + "token.html")
token = re.findall("<div.*?>(.*?)</", request.read())[0]
request = self.opener.open(self.url + 'token.html')
token = re.findall('<div.*?>(.*?)</', request.read())[0]
return token
def add_torrent_uri(self, filename, torrent, add_folder = False):
action = "action=add-url&s=%s" % urllib.quote(torrent)
action = 'action=add-url&s=%s' % urllib.quote(torrent)
if add_folder:
action += "&path=%s" % urllib.quote(filename)
action += '&path=%s' % urllib.quote(filename)
return self._request(action)
def add_torrent_file(self, filename, filedata, add_folder = False):
action = "action=add-file"
action = 'action=add-file'
if add_folder:
action += "&path=%s" % urllib.quote(filename)
return self._request(action, {"torrent_file": (ss(filename), filedata)})
action += '&path=%s' % urllib.quote(filename)
return self._request(action, {'torrent_file': (ss(filename), filedata)})
def set_torrent(self, hash, params):
action = "action=setprops&hash=%s" % hash
action = 'action=setprops&hash=%s' % hash
for k, v in params.iteritems():
action += "&s=%s&v=%s" % (k, v)
action += '&s=%s&v=%s' % (k, v)
return self._request(action)
def pause_torrent(self, hash, pause = True):
if pause:
action = "action=pause&hash=%s" % hash
action = 'action=pause&hash=%s' % hash
else:
action = "action=unpause&hash=%s" % hash
action = 'action=unpause&hash=%s' % hash
return self._request(action)
def stop_torrent(self, hash):
action = "action=stop&hash=%s" % hash
action = 'action=stop&hash=%s' % hash
return self._request(action)
def remove_torrent(self, hash, remove_data = False):
if remove_data:
action = "action=removedata&hash=%s" % hash
action = 'action=removedata&hash=%s' % hash
else:
action = "action=remove&hash=%s" % hash
action = 'action=remove&hash=%s' % hash
return self._request(action)
def get_status(self):
action = "list=1"
action = 'list=1'
return self._request(action)
def get_settings(self):
action = "action=getsettings"
action = 'action=getsettings'
settings_dict = {}
try:
utorrent_settings = json.loads(self._request(action))
@ -320,5 +320,5 @@ class uTorrentAPI(object):
return self._request(action)
def get_files(self, hash):
action = "action=getfiles&hash=%s" % hash
action = 'action=getfiles&hash=%s' % hash
return self._request(action)

14
couchpotato/core/helpers/encoding.py

@ -54,11 +54,23 @@ def sp(path, *args):
if not path or len(path) == 0:
return path
path = os.path.normcase(os.path.normpath(ss(path, *args)))
# convert windows path (from remote box) to *nix path
if os.path.sep == '/' and '\\' in path:
path = '/' + path.replace(':', '').replace('\\', '/')
path = os.path.normpath(ss(path, *args))
# Remove any trailing path separators
if path != os.path.sep:
path = path.rstrip(os.path.sep)
# Add a trailing separator in case it is a root folder on windows (crashes guessit)
if len(path) == 2 and path[1] == ':':
path = path + os.path.sep
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return path
def ek(original, *args):

29
couchpotato/core/helpers/variable.py

@ -2,7 +2,7 @@ from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog
import collections
import hashlib
import os.path
import os
import platform
import random
import re
@ -11,6 +11,9 @@ import sys
log = CPLog(__name__)
def fnEscape(pattern):
return pattern.replace('[','[[').replace(']','[]]').replace('[[','[[]')
def link(src, dst):
if os.name == 'nt':
import ctypes
@ -115,12 +118,22 @@ def isLocalIP(ip):
def getExt(filename):
return os.path.splitext(filename)[1][1:]
def cleanHost(host):
if not host.startswith(('http://', 'https://')):
host = 'http://' + host
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
if not '://' in host and protocol:
host = 'https://' if ssl else 'http://' + host
if not protocol:
host = host.split('://', 1)[-1]
host = host.rstrip('/')
host += '/'
if protocol and username and password:
login = '%s:%s@' % (username, password)
if not login in host:
host.replace('://', '://' + login, 1)
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
@ -216,3 +229,7 @@ def splitString(str, split_on = ',', clean = True):
def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()])
def isSubFolder(sub_folder, base_folder):
# Returns True if sub_folder is the same as or inside base_folder
return base_folder and sub_folder and os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep in os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep

10
couchpotato/core/media/__init__.py

@ -28,9 +28,12 @@ class MediaBase(Plugin):
def onComplete():
db = get_session()
media = db.query(Media).filter_by(id = id).first()
fireEventAsync('%s.searcher.single' % media.type, media.to_dict(self.default_dict), on_complete = self.createNotifyFront(id))
media_dict = media.to_dict(self.default_dict)
event_name = '%s.searcher.single' % media.type
db.expire_all()
fireEvent(event_name, media_dict, on_complete = self.createNotifyFront(id))
return onComplete
def createNotifyFront(self, media_id):
@ -38,7 +41,10 @@ class MediaBase(Plugin):
def notifyFront():
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
fireEvent('notify.frontend', type = '%s.update' % media.type, data = media.to_dict(self.default_dict))
media_dict = media.to_dict(self.default_dict)
event_name = '%s.update' % media.type
db.expire_all()
fireEvent('notify.frontend', type = event_name, data = media_dict)
return notifyFront

454
couchpotato/core/media/_base/media/main.py

@ -1,10 +1,15 @@
from couchpotato import get_session
from couchpotato import get_session, tryInt
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import mergeDicts, splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from couchpotato.core.settings.model import Media
from couchpotato.core.settings.model import Library, LibraryTitle, Release, \
Media
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase
log = CPLog(__name__)
@ -20,30 +25,449 @@ class MediaPlugin(MediaBase):
}
})
addEvent('app.load', self.addSingleRefresh)
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView)
addEvent('app.load', self.addSingleListView)
addEvent('app.load', self.addSingleCharView)
addEvent('app.load', self.addSingleDeleteView)
addEvent('media.get', self.get)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
def refresh(self, id = '', **kwargs):
db = get_session()
for x in splitString(id):
media = db.query(Media).filter_by(id = x).first()
handlers = []
ids = splitString(id)
if media:
# Get current selected title
default_title = ''
for title in media.library.titles:
if title.default: default_title = title.title
for x in ids:
fireEvent('notify.frontend', type = '%s.busy' % media.type, data = {'id': x})
fireEventAsync('library.update.%s' % media.type, identifier = media.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(x))
refresh_handler = self.createRefreshHandler(x)
if refresh_handler:
handlers.append(refresh_handler)
db.expire_all()
fireEvent('notify.frontend', type = 'media.busy', data = {'id': [tryInt(x) for x in ids]})
fireEventAsync('schedule.queue', handlers = handlers)
return {
'success': True,
}
def addSingleRefresh(self):
def createRefreshHandler(self, id):
db = get_session()
media = db.query(Media).filter_by(id = id).first()
if media:
default_title = getTitle(media.library)
identifier = media.library.identifier
db.expire_all()
def handler():
fireEvent('library.update.%s' % media.type, identifier = identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(id))
return handler
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
db = get_session()
imdb_id = getImdb(str(media_id))
if imdb_id:
m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
else:
m = db.query(Media).filter_by(id = media_id).first()
results = None
if m:
results = m.to_dict(self.default_dict)
db.expire_all()
return results
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def list(self, types = None, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None):
db = get_session()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query movie ids
q = db.query(Media) \
.with_entities(Media.id) \
.group_by(Media.id)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.join(Media.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Release.status_id.in_(statuses))
# Filter on type
if types and len(types) > 0:
try: q = q.filter(Media.type.in_(types))
except: pass
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Media.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
filter_or = []
if starts_with:
starts_with = toUnicode(starts_with.lower())
if starts_with in ascii_lowercase:
filter_or.append(LibraryTitle.simple_title.startswith(starts_with))
else:
ignore = []
for letter in ascii_lowercase:
ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter)))
filter_or.append(not_(or_(*ignore)))
if search:
filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%'))
if len(filter_or) > 0:
q = q.filter(or_(*filter_or))
total_count = q.count()
if total_count == 0:
return 0, []
if order == 'release_order':
q = q.order_by(desc(Release.last_edit))
else:
q = q.order_by(asc(LibraryTitle.simple_title))
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q = q.limit(limit).offset(offset)
# Get all media_ids in sorted order
media_ids = [m.id for m in q.all()]
# List release statuses
releases = db.query(Release) \
.filter(Release.movie_id.in_(media_ids)) \
.all()
release_statuses = dict((m, set()) for m in media_ids)
releases_count = dict((m, 0) for m in media_ids)
for release in releases:
release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.movie_id] += 1
# Get main movie data
q2 = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
q2 = q2.filter(Media.id.in_(media_ids))
results = q2.all()
# Create dict by movie id
movie_dict = {}
for movie in results:
movie_dict[movie.id] = movie
# List movies based on media_ids order
movies = []
for media_id in media_ids:
releases = []
for r in release_statuses.get(media_id):
x = splitString(r)
releases.append({'status_id': x[0], 'quality_id': x[1]})
# Merge releases with movie dict
movies.append(mergeDicts(movie_dict[media_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}), {
'releases': releases,
'releases_count': releases_count.get(media_id),
}))
db.expire_all()
return total_count, movies
def listView(self, **kwargs):
types = splitString(kwargs.get('types'))
status = splitString(kwargs.get('status'))
release_status = splitString(kwargs.get('release_status'))
limit_offset = kwargs.get('limit_offset')
starts_with = kwargs.get('starts_with')
search = kwargs.get('search')
order = kwargs.get('order')
total_movies, movies = self.list(
types = types,
status = status,
release_status = release_status,
limit_offset = limit_offset,
starts_with = starts_with,
search = search,
order = order
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, *args, **kwargs)
addApiView('%s.list' % media_type, tempList)
def availableChars(self, types = None, status = None, release_status = None):
types = types or []
status = status or []
release_status = release_status or []
db = get_session()
# Make a list from string
if not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
q = db.query(Media)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.join(Media.releases) \
.filter(Release.status_id.in_(statuses))
# Filter on type
if types and len(types) > 0:
try: q = q.filter(Media.type.in_(types))
except: pass
q = q.join(Library, LibraryTitle) \
.with_entities(LibraryTitle.simple_title) \
.filter(LibraryTitle.default == True)
titles = q.all()
chars = set()
for title in titles:
try:
char = title[0][0]
char = char if char in ascii_lowercase else '#'
chars.add(str(char))
except:
log.error('Failed getting title for %s', title.libraries_id)
if len(chars) == 25:
break
db.expire_all()
return ''.join(sorted(chars))
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, *args, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
db = get_session()
media = db.query(Media).filter_by(id = media_id).first()
if media:
deleted = False
if delete_from == 'all':
db.delete(media)
db.commit()
deleted = True
else:
done_status = fireEvent('status.get', 'done', single = True)
total_releases = len(media.releases)
total_deleted = 0
new_movie_status = None
for release in media.releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.status_id != done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'done'
elif delete_from == 'manage':
if release.status_id == done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'active'
db.commit()
if total_releases == total_deleted:
db.delete(media)
db.commit()
deleted = True
elif new_movie_status:
new_status = fireEvent('status.get', new_movie_status, single = True)
media.profile_id = None
media.status_id = new_status.get('id')
db.commit()
else:
fireEvent('media.restatus', media.id, single = True)
if deleted:
fireEvent('notify.frontend', type = 'movie.deleted', data = media.to_dict())
db.expire_all()
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
def restatus(self, media_id):
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
m = db.query(Media).filter_by(id = media_id).first()
if not m or len(m.library.titles) == 0:
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
log.debug('Changing status for %s', m.library.titles[0].title)
if not m.profile:
m.status_id = done_status.get('id')
else:
move_to_wanted = True
for t in m.profile.types:
for release in m.releases:
if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish):
move_to_wanted = False
m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id')
db.commit()
return True

383
couchpotato/core/media/movie/_base/main.py

@ -2,15 +2,10 @@ from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getImdb, splitString, tryInt, \
mergeDicts
from couchpotato.core.helpers.variable import splitString, tryInt, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie import MovieTypeBase
from couchpotato.core.settings.model import Library, LibraryTitle, Media, \
Release
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import or_, asc, not_, desc
from string import ascii_lowercase
from couchpotato.core.settings.model import Media
import time
log = CPLog(__name__)
@ -26,28 +21,6 @@ class MovieBase(MovieTypeBase):
super(MovieBase, self).__init__()
self.initType()
addApiView('movie.list', self.listView, docs = {
'desc': 'List movies in wanted list',
'params': {
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'movies': array, movies found,
}"""}
})
addApiView('movie.get', self.getView, docs = {
'desc': 'Get a movie by id',
'params': {
'id': {'desc': 'The id of the movie'},
}
})
addApiView('movie.available_chars', self.charView)
addApiView('movie.add', self.addView, docs = {
'desc': 'Add new movie to the wanted list',
'params': {
@ -62,258 +35,12 @@ class MovieBase(MovieTypeBase):
'params': {
'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
}
})
addApiView('movie.delete', self.deleteView, docs = {
'desc': 'Delete a movie from the wanted list',
'params': {
'id': {'desc': 'Movie ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete movie from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addEvent('movie.add', self.add)
addEvent('movie.delete', self.delete)
addEvent('movie.get', self.get)
addEvent('movie.list', self.list)
addEvent('movie.restatus', self.restatus)
def getView(self, id = None, **kwargs):
movie = self.get(id) if id else None
return {
'success': movie is not None,
'movie': movie,
}
def get(self, movie_id):
db = get_session()
imdb_id = getImdb(str(movie_id))
if imdb_id:
m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first()
else:
m = db.query(Media).filter_by(id = movie_id).first()
results = None
if m:
results = m.to_dict(self.default_dict)
db.expire_all()
return results
def list(self, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None):
db = get_session()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
# query movie ids
q = db.query(Media) \
.with_entities(Media.id) \
.group_by(Media.id)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
q = q.join(Media.releases)
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Release.status_id.in_(statuses))
# Only join when searching / ordering
if starts_with or search or order != 'release_order':
q = q.join(Media.library, Library.titles) \
.filter(LibraryTitle.default == True)
# Add search filters
filter_or = []
if starts_with:
starts_with = toUnicode(starts_with.lower())
if starts_with in ascii_lowercase:
filter_or.append(LibraryTitle.simple_title.startswith(starts_with))
else:
ignore = []
for letter in ascii_lowercase:
ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter)))
filter_or.append(not_(or_(*ignore)))
if search:
filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%'))
if len(filter_or) > 0:
q = q.filter(or_(*filter_or))
total_count = q.count()
if total_count == 0:
return 0, []
if order == 'release_order':
q = q.order_by(desc(Release.last_edit))
else:
q = q.order_by(asc(LibraryTitle.simple_title))
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = splt[0]
offset = 0 if len(splt) is 1 else splt[1]
q = q.limit(limit).offset(offset)
# Get all movie_ids in sorted order
movie_ids = [m.id for m in q.all()]
# List release statuses
releases = db.query(Release) \
.filter(Release.movie_id.in_(movie_ids)) \
.all()
release_statuses = dict((m, set()) for m in movie_ids)
releases_count = dict((m, 0) for m in movie_ids)
for release in releases:
release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id))
releases_count[release.movie_id] += 1
# Get main movie data
q2 = db.query(Media) \
.options(joinedload_all('library.titles')) \
.options(joinedload_all('library.files')) \
.options(joinedload_all('status')) \
.options(joinedload_all('files'))
q2 = q2.filter(Media.id.in_(movie_ids))
results = q2.all()
# Create dict by movie id
movie_dict = {}
for movie in results:
movie_dict[movie.id] = movie
# List movies based on movie_ids order
movies = []
for movie_id in movie_ids:
releases = []
for r in release_statuses.get(movie_id):
x = splitString(r)
releases.append({'status_id': x[0], 'quality_id': x[1]})
# Merge releases with movie dict
movies.append(mergeDicts(movie_dict[movie_id].to_dict({
'library': {'titles': {}, 'files':{}},
'files': {},
}), {
'releases': releases,
'releases_count': releases_count.get(movie_id),
}))
db.expire_all()
return total_count, movies
def availableChars(self, status = None, release_status = None):
status = status or []
release_status = release_status or []
db = get_session()
# Make a list from string
if not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
q = db.query(Media)
# Filter on movie status
if status and len(status) > 0:
statuses = fireEvent('status.get', status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.filter(Media.status_id.in_(statuses))
# Filter on release status
if release_status and len(release_status) > 0:
statuses = fireEvent('status.get', release_status, single = len(release_status) > 1)
statuses = [s.get('id') for s in statuses]
q = q.join(Media.releases) \
.filter(Release.status_id.in_(statuses))
q = q.join(Library, LibraryTitle) \
.with_entities(LibraryTitle.simple_title) \
.filter(LibraryTitle.default == True)
titles = q.all()
chars = set()
for title in titles:
try:
char = title[0][0]
char = char if char in ascii_lowercase else '#'
chars.add(str(char))
except:
log.error('Failed getting title for %s', title.libraries_id)
if len(chars) == 25:
break
db.expire_all()
return ''.join(sorted(chars))
def listView(self, **kwargs):
status = splitString(kwargs.get('status'))
release_status = splitString(kwargs.get('release_status'))
limit_offset = kwargs.get('limit_offset')
starts_with = kwargs.get('starts_with')
search = kwargs.get('search')
order = kwargs.get('order')
total_movies, movies = self.list(
status = status,
release_status = release_status,
limit_offset = limit_offset,
starts_with = starts_with,
search = search,
order = order
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def charView(self, **kwargs):
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None):
if not params: params = {}
@ -402,7 +129,15 @@ class MovieBase(MovieTypeBase):
onComplete()
if added:
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', ''))
if params.get('title'):
message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')
else:
title = getTitle(m.library)
if title:
message = 'Successfully added "%s" to your wanted list.' % title
else:
message = 'Succesfully added to your wanted list.'
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message)
db.expire_all()
return movie_dict
@ -422,9 +157,9 @@ class MovieBase(MovieTypeBase):
available_status = fireEvent('status.get', 'available', single = True)
ids = splitString(id)
for movie_id in ids:
for media_id in ids:
m = db.query(Media).filter_by(id = movie_id).first()
m = db.query(Media).filter_by(id = media_id).first()
if not m:
continue
@ -447,98 +182,12 @@ class MovieBase(MovieTypeBase):
db.commit()
fireEvent('movie.restatus', m.id)
fireEvent('media.restatus', m.id)
movie_dict = m.to_dict(self.default_dict)
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(movie_id))
fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
db.expire_all()
return {
'success': True,
}
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for movie_id in ids:
self.delete(movie_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def delete(self, movie_id, delete_from = None):
db = get_session()
movie = db.query(Media).filter_by(id = movie_id).first()
if movie:
deleted = False
if delete_from == 'all':
db.delete(movie)
db.commit()
deleted = True
else:
done_status = fireEvent('status.get', 'done', single = True)
total_releases = len(movie.releases)
total_deleted = 0
new_movie_status = None
for release in movie.releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.status_id != done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'done'
elif delete_from == 'manage':
if release.status_id == done_status.get('id'):
db.delete(release)
total_deleted += 1
new_movie_status = 'active'
db.commit()
if total_releases == total_deleted:
db.delete(movie)
db.commit()
deleted = True
elif new_movie_status:
new_status = fireEvent('status.get', new_movie_status, single = True)
movie.profile_id = None
movie.status_id = new_status.get('id')
db.commit()
else:
fireEvent('movie.restatus', movie.id, single = True)
if deleted:
fireEvent('notify.frontend', type = 'movie.deleted', data = movie.to_dict())
db.expire_all()
return True
def restatus(self, movie_id):
active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True)
db = get_session()
m = db.query(Media).filter_by(id = movie_id).first()
if not m or len(m.library.titles) == 0:
log.debug('Can\'t restatus movie, doesn\'t seem to exist.')
return False
log.debug('Changing status for %s', m.library.titles[0].title)
if not m.profile:
m.status_id = done_status.get('id')
else:
move_to_wanted = True
for t in m.profile.types:
for release in m.releases:
if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish):
move_to_wanted = False
m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id')
db.commit()
return True

7
couchpotato/core/media/movie/_base/static/list.js

@ -281,7 +281,7 @@ var MovieList = new Class({
// Get available chars and highlight
if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible()))
Api.request('movie.available_chars', {
Api.request('media.available_chars', {
'data': Object.merge({
'status': self.options.status
}, self.filter),
@ -372,7 +372,7 @@ var MovieList = new Class({
'click': function(e){
(e).preventDefault();
this.set('text', 'Deleting..')
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': ids.join(','),
'delete_from': self.options.identifier
@ -550,8 +550,9 @@ var MovieList = new Class({
}
Api.request(self.options.api_call || 'movie.list', {
Api.request(self.options.api_call || 'media.list', {
'data': Object.merge({
'type': 'movie',
'status': self.options.status,
'limit_offset': self.options.limit ? self.options.limit + ',' + self.offset : null
}, self.filter),

6
couchpotato/core/media/movie/_base/static/movie.actions.js

@ -431,7 +431,7 @@ MA.Release = new Class({
markMovieDone: function(){
var self = this;
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': 'wanted'
@ -450,7 +450,7 @@ MA.Release = new Class({
},
tryNextRelease: function(movie_id){
tryNextRelease: function(){
var self = this;
Api.request('movie.searcher.try_next', {
@ -821,7 +821,7 @@ MA.Delete = new Class({
self.callChain();
},
function(){
Api.request('movie.delete', {
Api.request('media.delete', {
'data': {
'id': self.movie.get('id'),
'delete_from': self.movie.list.options.identifier

8
couchpotato/core/media/movie/_base/static/movie.js

@ -36,10 +36,10 @@ var Movie = new Class({
App.on('movie.update', self.global_events['movie.update']);
// Add spinner on load / search
['movie.busy', 'movie.searcher.started'].each(function(listener){
['media.busy', 'movie.searcher.started'].each(function(listener){
self.global_events[listener] = function(notification){
if(notification.data && self.data.id == notification.data.id)
self.busy(true)
if(notification.data && (self.data.id == notification.data.id || (typeOf(notification.data.id) == 'array' && notification.data.id.indexOf(self.data.id) > -1)))
self.busy(true);
}
App.on(listener, self.global_events[listener]);
})
@ -329,4 +329,4 @@ var Movie = new Class({
return this.el;
}
});
});

14
couchpotato/core/media/movie/searcher/main.py

@ -145,7 +145,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
default_title = getTitle(movie['library'])
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('movie.delete', movie['id'], single = True)
fireEvent('media.delete', movie['id'], single = True)
return
fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'id': movie['id']}, message = 'Searching for "%s"' % default_title)
@ -192,7 +192,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('movie.restatus', movie['id'])
fireEvent('media.restatus', movie['id'])
break
# Break if CP wants to shut down
@ -284,6 +284,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return True
else:
# Don't allow movies with years to far in the future
if year is not None and year > now_year + 1:
return False
# For movies before 1972
if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
@ -318,14 +322,14 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
'success': trynext
}
def tryNextRelease(self, movie_id, manual = False):
def tryNextRelease(self, media_id, manual = False):
snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True)
try:
db = get_session()
rels = db.query(Release) \
.filter_by(movie_id = movie_id) \
.filter_by(movie_id = media_id) \
.filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \
.all()
@ -333,7 +337,7 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
rel.status_id = ignored_status.get('id')
db.commit()
movie_dict = fireEvent('movie.get', movie_id, single = True)
movie_dict = fireEvent('media.get', media_id = media_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict['library']))
fireEvent('movie.searcher.single', movie_dict, manual = manual)

4
couchpotato/core/notifications/boxcar/main.py

@ -16,14 +16,14 @@ class Boxcar(Notification):
try:
message = message.strip()
params = {
data = {
'email': self.conf('email'),
'notification[from_screen_name]': self.default_title,
'notification[message]': toUnicode(message),
'notification[from_remote_service_id]': int(time.time()),
}
self.urlopen(self.url, params = params)
self.urlopen(self.url, data = data)
except:
log.error('Check your email and added services on boxcar.io')
return False

2
couchpotato/core/notifications/prowl/main.py

@ -26,7 +26,7 @@ class Prowl(Notification):
}
try:
self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False)
self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
log.info('Prowl notifications sent.')
return True
except:

2
couchpotato/core/notifications/pushalot/main.py

@ -29,7 +29,7 @@ class Pushalot(Notification):
}
try:
self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False)
self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
return True
except:
log.error('PushAlot failed: %s', traceback.format_exc())

4
couchpotato/core/notifications/pushbullet/main.py

@ -74,9 +74,9 @@ class Pushbullet(Notification):
}
if cache:
return self.getJsonData(self.url % method, headers = headers, params = kwargs)
return self.getJsonData(self.url % method, headers = headers, data = kwargs)
else:
data = self.urlopen(self.url % method, headers = headers, params = kwargs)
data = self.urlopen(self.url % method, headers = headers, data = kwargs)
return json.loads(data)
except Exception, ex:

2
couchpotato/core/notifications/trakt/main.py

@ -35,7 +35,7 @@ class Trakt(Notification):
def call(self, method_url, post_data):
try:
response = self.getJsonData(self.urls['base'] % method_url, params = post_data, cache_timeout = 1)
response = self.getJsonData(self.urls['base'] % method_url, data = post_data, cache_timeout = 1)
if response:
if response.get('status') == "success":
log.info('Successfully called Trakt')

8
couchpotato/core/notifications/xbmc/__init__.py

@ -47,6 +47,14 @@ config = [{
'description': 'Only scan new movie folder at remote XBMC servers. Works if movie location is the same.',
},
{
'name': 'force_full_scan',
'label': 'Always do a full scan',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Do a full scan instead of only the new movie. Useful if the XBMC path is different from the path CPS uses.',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',

30
couchpotato/core/notifications/xbmc/main.py

@ -7,6 +7,8 @@ import json
import socket
import traceback
import urllib
import requests
from requests.packages.urllib3.exceptions import MaxRetryError
log = CPLog(__name__)
@ -36,7 +38,7 @@ class XBMC(Notification):
if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0):
param = {}
if self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0]):
if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])):
param = {'directory': data['destination_dir']}
calls.append(('VideoLibrary.Scan', param))
@ -167,22 +169,18 @@ class XBMC(Notification):
# manually fake expected response array
return [{'result': 'Error'}]
except URLError, e:
if isinstance(e.reason, socket.timeout):
log.info('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}]
else:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}]
except (MaxRetryError, requests.exceptions.Timeout):
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}]
except:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}]
def request(self, host, requests):
def request(self, host, do_requests):
server = 'http://%s/jsonrpc' % host
data = []
for req in requests:
for req in do_requests:
method, kwargs = req
data.append({
'method': method,
@ -202,17 +200,13 @@ class XBMC(Notification):
try:
log.debug('Sending request to %s: %s', (host, data))
response = self.getJsonData(server, headers = headers, params = data, timeout = 3, show_error = False)
response = self.getJsonData(server, headers = headers, data = data, timeout = 3, show_error = False)
log.debug('Returned from request %s: %s', (host, response))
return response
except URLError, e:
if isinstance(e.reason, socket.timeout):
log.info('Couldn\'t send request to XBMC, assuming it\'s turned off')
return []
else:
log.error('Failed sending request to XBMC: %s', traceback.format_exc())
return []
except (MaxRetryError, requests.exceptions.Timeout):
log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
return []
except:
log.error('Failed sending request to XBMC: %s', traceback.format_exc())
return []

2
couchpotato/core/plugins/automation/__init__.py

@ -41,7 +41,7 @@ config = [{
'label': 'Required Genres',
'default': '',
'placeholder': 'Example: Action, Crime & Drama',
'description': 'Ignore movies that don\'t contain at least one set of genres. Sets are separated by "," and each word within a set must be separated with "&"'
'description': ('Ignore movies that don\'t contain at least one set of genres.', 'Sets are separated by "," and each word within a set must be separated with "&"')
},
{
'name': 'ignored_genres',

4
couchpotato/core/plugins/automation/main.py

@ -43,7 +43,7 @@ class Automation(Plugin):
if self.shuttingDown():
break
movie_dict = fireEvent('movie.get', movie_id, single = True)
movie_dict = fireEvent('media.get', movie_id, single = True)
fireEvent('movie.searcher.single', movie_dict)
return True
return True

111
couchpotato/core/plugins/base.py

@ -1,19 +1,17 @@
from StringIO import StringIO
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from multipartpost import MultipartPostHandler
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from tornado import template
from tornado.web import StaticFileHandler
from urlparse import urlparse
import cookielib
import glob
import gzip
import inspect
import math
import os.path
import re
import time
@ -39,6 +37,7 @@ class Plugin(object):
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
http_opener = requests.Session()
def __new__(typ, *args, **kwargs):
new_plugin = super(Plugin, typ).__new__(typ)
@ -55,8 +54,11 @@ class Plugin(object):
self.registerStatic(inspect.getfile(self.__class__))
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')
return Env.setting(attr, section = section if section else class_name[0].lower(), value = value, default = default)
class_name = self.getName().lower().split(':')[0].lower()
return Env.setting(attr, section = section if section else class_name, value = value, default = default)
def deleteConf(self, attr):
return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower())
def getName(self):
return self._class_name or self.__class__.__name__
@ -100,13 +102,18 @@ class Plugin(object):
self.makeDir(os.path.dirname(path))
if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path)
try:
f = open(path, 'w+' if not binary else 'w+b')
f.write(content)
f.close()
os.chmod(path, Env.getPermission('file'))
except Exception, e:
log.error('Unable writing to file "%s": %s', (path, e))
log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = ss(path)
@ -120,11 +127,11 @@ class Plugin(object):
return False
# http request
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True):
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, return_raw = False):
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not params: params = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
@ -137,6 +144,8 @@ class Plugin(object):
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = self.http_opener
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
@ -152,50 +161,23 @@ class Plugin(object):
self.wait(host)
try:
# Make sure opener has the correct headers
if opener:
opener.add_headers = headers
if multipart:
log.info('Opening multipart url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
request = urllib2.Request(url, params, headers)
if opener:
opener.add_handler(MultipartPostHandler())
else:
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
response = opener.open(request, timeout = timeout)
else:
log.info('Opening url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
}
method = 'post' if len(data) > 0 or files else 'get'
if isinstance(params, (str, unicode)) and len(params) > 0:
data = params
else:
data = tryUrlencode(params) if len(params) > 0 else None
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.iterkeys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, verify = False, **kwargs)
request = urllib2.Request(url, data, headers)
if opener:
response = opener.open(request, timeout = timeout)
else:
response = urllib2.urlopen(request, timeout = timeout)
# unzip if needed
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj = buf)
data = f.read()
f.close()
else:
data = response.read()
response.close()
data = response.content if return_raw else response.text
self.http_failed_request[host] = 0
except IOError:
except (IOError, MaxRetryError, Timeout):
if show_error:
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(1)))
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))
# Save failed requests by hosts
try:
@ -218,15 +200,19 @@ class Plugin(object):
return data
def wait(self, host = ''):
if self.http_time_between_calls == 0:
return
now = time.time()
last_use = self.http_last_use.get(host, 0)
if last_use > 0:
wait = math.ceil(last_use - now + self.http_time_between_calls)
wait = (last_use - now) + self.http_time_between_calls
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(last_use - now + self.http_time_between_calls)
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(wait)
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
@ -269,18 +255,19 @@ class Plugin(object):
try:
cache_timeout = 300
if kwargs.get('cache_timeout'):
if kwargs.has_key('cache_timeout'):
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data:
if data and cache_timeout > 0:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
return ''
def setCache(self, cache_key, value, timeout = 300):
@ -289,19 +276,19 @@ class Plugin(object):
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, movie):
tag = self.cpTag(movie)
def createNzbName(self, data, media):
tag = self.cpTag(media)
return '%s%s' % (toSafeString(toUnicode(data.get('name'))[:127 - len(tag)]), tag)
def createFileName(self, data, filedata, movie):
name = sp(os.path.join(self.createNzbName(data, movie)))
def createFileName(self, data, filedata, media):
name = sp(os.path.join(self.createNzbName(data, media)))
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, movie):
def cpTag(self, media):
if Env.setting('enabled', 'renamer'):
return '.cp(' + movie['library'].get('identifier') + ')' if movie['library'].get('identifier') else ''
return '.cp(' + media['library'].get('identifier') + ')' if media['library'].get('identifier') else ''
return ''

2
couchpotato/core/plugins/file/main.py

@ -93,7 +93,7 @@ class FileManager(Plugin):
return dest
try:
filedata = self.urlopen(url, **urlopen_kwargs)
filedata = self.urlopen(url, return_raw = True, **urlopen_kwargs)
except:
log.error('Failed downloading file %s: %s', (url, traceback.format_exc()))
return False

14
couchpotato/core/plugins/manage/main.py

@ -1,6 +1,6 @@
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent, fireEventAsync
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.encoding import sp
from couchpotato.core.helpers.variable import splitString, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@ -112,22 +112,20 @@ class Manage(Plugin):
if self.conf('cleanup') and full and not self.shuttingDown():
# Get movies with done status
total_movies, done_movies = fireEvent('movie.list', status = 'done', single = True)
total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', single = True)
for done_movie in done_movies:
if done_movie['library']['identifier'] not in added_identifiers:
fireEvent('movie.delete', movie_id = done_movie['id'], delete_from = 'all')
fireEvent('media.delete', media_id = done_movie['id'], delete_from = 'all')
else:
releases = fireEvent('release.for_movie', id = done_movie.get('id'), single = True)
for release in releases:
if len(release.get('files', [])) == 0:
fireEvent('release.delete', release['id'])
else:
if len(release.get('files', [])) > 0:
for release_file in release.get('files', []):
# Remove release not available anymore
if not os.path.isfile(ss(release_file['path'])):
if not os.path.isfile(sp(release_file['path'])):
fireEvent('release.clean', release['id'])
break
@ -202,7 +200,7 @@ class Manage(Plugin):
self.in_progress[folder]['to_go'] -= 1
total = self.in_progress[folder]['total']
movie_dict = fireEvent('movie.get', identifier, single = True)
movie_dict = fireEvent('media.get', identifier, single = True)
fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict['library']))

1
couchpotato/core/plugins/quality/main.py

@ -296,6 +296,7 @@ class QualityPlugin(Plugin):
'Movie.Name.1999.DVDRip-Group': 'dvdrip',
'Movie.Name.1999.DVD-Rip-Group': 'dvdrip',
'Movie.Name.1999.DVD-R-Group': 'dvdr',
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': '720p',
}
correct = 0

199
couchpotato/core/plugins/release/main.py

@ -100,14 +100,14 @@ class Release(Plugin):
done_status, snatched_status = fireEvent('status.get', ['done', 'snatched'], single = True)
# Add movie
movie = db.query(Media).filter_by(library_id = group['library'].get('id')).first()
if not movie:
movie = Media(
media = db.query(Media).filter_by(library_id = group['library'].get('id')).first()
if not media:
media = Media(
library_id = group['library'].get('id'),
profile_id = 0,
status_id = done_status.get('id')
)
db.add(movie)
db.add(media)
db.commit()
# Add Release
@ -120,7 +120,7 @@ class Release(Plugin):
if not rel:
rel = Relea(
identifier = identifier,
movie = movie,
movie = media,
quality_id = group['meta_data']['quality'].get('id'),
status_id = done_status.get('id')
)
@ -142,7 +142,7 @@ class Release(Plugin):
except:
log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc()))
fireEvent('movie.restatus', movie.id)
fireEvent('media.restatus', media.id)
return True
@ -211,119 +211,136 @@ class Release(Plugin):
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel:
item = {}
for info in rel.info:
item[info.identifier] = info.value
if not rel:
log.error('Couldn\'t find release with id: %s', id)
return {
'success': False
}
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
item = {}
for info in rel.info:
item[info.identifier] = info.value
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
if not item.get('protocol'):
item['protocol'] = item['type']
item['type'] = 'movie'
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
# Backwards compatibility code
if not item.get('protocol'):
item['protocol'] = item['type']
item['type'] = 'movie'
success = self.download(data = item, media = rel.movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
}), manual = True)
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
if success:
db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
success = self.download(data = item, media = rel.movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
}), manual = True)
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': success
}
else:
log.error('Couldn\'t find release with id: %s', id)
if success == True:
db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': False
'success': success == True
}
def download(self, data, media, manual = False):
# Backwards compatibility code
if not data.get('protocol'):
data['protocol'] = data['type']
data['type'] = 'movie'
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if not downloader_enabled:
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol'))
return False
# Download NZB or torrent file
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
try:
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
except:
log.error('Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc()))
return False
if downloader_enabled:
snatched_status, done_status, active_status = fireEvent('status.get', ['snatched', 'done', 'active'], single = True)
if filedata == 'try_next':
return filedata
elif not filedata:
return False
# Download release to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
# Send NZB or torrent file to downloader
download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True)
if not download_result:
log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol'))
return False
log.debug('Downloader result: %s', download_result)
download_result = fireEvent('download', data = data, movie = media, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
snatched_status, done_status, downloaded_status, active_status = fireEvent('status.get', ['snatched', 'done', 'downloaded', 'active'], single = True)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
try:
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if not rls:
log.error('No release found to store download information in')
return False
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
# Mark release as snatched
if renamer_enabled:
self.updateStatus(rls.id, status = snatched_status)
# If renamer isn't used, mark media done if finished or release downloaded
else:
if media['status_id'] == active_status.get('id'):
finished = next((True for profile_type in media['profile']['types'] if \
profile_type['quality_id'] == rls.quality.id and profile_type['finish']), False)
if finished:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls.id, status = done_status)
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark media done
if not renamer_enabled:
try:
if media['status_id'] == active_status.get('id'):
for profile_type in media['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls.id, status = done_status)
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking media finished, renamer disabled: %s', traceback.format_exc())
else:
self.updateStatus(rls.id, status = snatched_status)
except:
log.error('Failed marking media finished: %s', traceback.format_exc())
return True
return True
# Assume release downloaded
self.updateStatus(rls.id, status = downloaded_status)
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol')))
except:
log.error('Failed storing download status: %s', traceback.format_exc())
return False
return False
return True
def tryDownloadResult(self, results, media, quality_type, manual = False):
ignored_status, failed_status = fireEvent('status.get', ['ignored', 'failed'], single = True)

8
couchpotato/core/plugins/renamer/__init__.py

@ -93,7 +93,7 @@ config = [{
'default': 1,
'type': 'int',
'unit': 'min(s)',
'description': 'Detect movie status every X minutes. Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled',
'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled'),
},
{
'advanced': True,
@ -122,13 +122,13 @@ config = [{
'advanced': True,
'name': 'separator',
'label': 'File-Separator',
'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'advanced': True,
'name': 'foldersep',
'label': 'Folder-Separator',
'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'name': 'file_action',
@ -136,7 +136,7 @@ config = [{
'default': 'link',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': '<strong>Link</strong> or <strong>Copy</strong> after downloading completed (and allow for seeding), or <strong>Move</strong> after seeding completed. Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy.',
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.', 'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'),
'advanced': True,
},
{

448
couchpotato/core/plugins/renamer/main.py

@ -3,7 +3,7 @@ from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt, splitString
getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, File, Profile, Release, \
@ -30,10 +30,11 @@ class Renamer(Plugin):
'desc': 'For the renamer to check for new files to rename in a folder',
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'movie_folder': {'desc': 'Optional: The folder of the movie to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same movie_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
'downloader' : {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in movie_folder. \'downloader\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
},
})
@ -64,25 +65,32 @@ class Renamer(Plugin):
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
movie_folder = sp(kwargs.get('movie_folder'))
base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder'))
# Backwards compatibility, to be removed after a few versions :)
if not media_folder:
media_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')])
status = kwargs.get('status', 'completed')
release_download = {'folder': movie_folder} if movie_folder else None
if release_download:
release_download = None
if not base_folder and media_folder:
release_download = {'folder': media_folder}
release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', release_download)
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download)
return {
'success': True
}
def scan(self, release_download = None):
def scan(self, base_folder = None, release_download = None):
if not release_download: release_download = {}
if self.isDisabled():
@ -92,11 +100,14 @@ class Renamer(Plugin):
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
if not base_folder:
base_folder = self.conf('from')
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Get movie folder to process
movie_folder = release_download.get('folder')
# Get media folder to process
media_folder = release_download.get('folder')
# Get all folders that should not be processed
no_process = [to_folder]
@ -109,73 +120,73 @@ class Renamer(Plugin):
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(from_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" have to exist.')
if not os.path.isdir(base_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" folder have to exist.')
return
else:
for item in no_process:
if from_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the "from" folder.')
if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.')
return
# Check to see if the no_process folders are inside the provided movie_folder
if movie_folder and not os.path.isdir(movie_folder):
log.debug('The provided movie folder %s does not exist. Trying to find it in the \'from\' folder.', movie_folder)
# Check to see if the no_process folders are inside the provided media_folder
if media_folder and not os.path.isdir(media_folder):
log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder)
# Update to the from folder
if len(splitString(release_download.get('files'), '|')) == 1:
new_movie_folder = from_folder
new_media_folder = from_folder
else:
new_movie_folder = os.path.join(from_folder, os.path.basename(movie_folder))
new_media_folder = os.path.join(from_folder, os.path.basename(media_folder))
if not os.path.isdir(new_movie_folder):
log.error('The provided movie folder %s does not exist and could also not be found in the \'from\' folder.', movie_folder)
if not os.path.isdir(new_media_folder):
log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder)
return
# Update the files
new_files = [os.path.join(new_movie_folder, os.path.relpath(filename, movie_folder)) for filename in splitString(release_download.get('files'), '|')]
new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in splitString(release_download.get('files'), '|')]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided movie folder %s does not exist and its files could also not be found in the \'from\' folder.', movie_folder)
log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', movie_folder)
release_download['folder'] = new_movie_folder
log.debug('Release %s found in the \'from\' folder.', media_folder)
release_download['folder'] = new_media_folder
release_download['files'] = '|'.join(new_files)
movie_folder = new_movie_folder
media_folder = new_media_folder
if movie_folder:
if media_folder:
for item in no_process:
if movie_folder in item:
log.error('To protect your data, the movie libraries can\'t be inside of or the same as the provided movie folder.')
if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.')
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not release_download and self.conf('run_every') > 0:
fireEvent('renamer.check_snatched')
self.checkSnatched(fire_scan = False)
self.renaming_started = True
# make sure the movie folder name is included in the search
# make sure the media folder name is included in the search
folder = None
files = []
if movie_folder:
log.info('Scanning movie folder %s...', movie_folder)
folder = os.path.dirname(movie_folder)
if media_folder:
log.info('Scanning media folder %s...', media_folder)
folder = os.path.dirname(media_folder)
if release_download.get('files', ''):
files = splitString(release_download['files'], '|')
# If there is only one file in the torrent, the downloader did not create a subfolder
if len(files) == 1:
folder = movie_folder
folder = media_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in os.walk(movie_folder):
for root, folders, names in os.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (movie_folder, traceback.format_exc()))
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
db = get_session()
@ -185,10 +196,10 @@ class Renamer(Plugin):
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, movie_folder, files, extr_files = self.extractFiles(folder = folder, movie_folder = movie_folder, files = files,
folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download))
groups = fireEvent('scanner.scan', folder = folder if folder else from_folder,
groups = fireEvent('scanner.scan', folder = folder if folder else base_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name')
@ -201,6 +212,10 @@ class Renamer(Plugin):
done_status, active_status, downloaded_status, snatched_status, seeding_status = \
fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched', 'seeding'], single = True)
# Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader.
if not groups and self.statusInfoComplete(release_download):
self.tagRelease(release_download = release_download, tag = 'failed_rename')
for group_identifier in groups:
group = groups[group_identifier]
@ -497,7 +512,10 @@ class Renamer(Plugin):
os.remove(src)
parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not parent_dir in [destination, movie_folder] and not from_folder in parent_dir:
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \
not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \
not isSubFolder(parent_dir, base_folder):
delete_folders.append(parent_dir)
except:
@ -513,6 +531,7 @@ class Renamer(Plugin):
# Rename all files marked
group['renamed_files'] = []
failed_rename = False
for src in rename_files:
if rename_files[src]:
dst = rename_files[src]
@ -525,11 +544,20 @@ class Renamer(Plugin):
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
self.tagRelease(group = group, tag = 'failed_rename')
log.error('Failed ranaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
failed_rename = True
break
# If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted.
if failed_rename:
self.tagRelease(group = group, tag = 'failed_rename')
continue
# If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt)
else:
self.untagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(movie_folder) and self.downloadIsTorrent(release_download):
if self.movieInFromFolder(media_folder) and self.downloadIsTorrent(release_download):
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
@ -541,12 +569,12 @@ class Renamer(Plugin):
log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download):
if movie_folder:
if media_folder:
# Delete the movie folder
group_folder = movie_folder
group_folder = media_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = sp(os.path.join(from_folder, os.path.relpath(group['parentdir'], from_folder).split(os.path.sep)[0]))
group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0]))
try:
log.info('Deleting folder: %s', group_folder)
@ -564,7 +592,7 @@ class Renamer(Plugin):
# Break if CP wants to shut down
if self.shuttingDown():
break
self.renaming_started = False
def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False):
@ -614,28 +642,46 @@ Remove it if you want it to be renamed (again, or at least let it try again)
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
# Dont tag .ignore files
if os.path.splitext(filename)[1] == '.ignore':
continue
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
def untagRelease(self, release_download, tag = ''):
def untagRelease(self, group = None, release_download = None, tag = ''):
if not release_download:
return
tag_files = []
folder = None
folder = release_download['folder']
if not os.path.isdir(folder):
return False
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
# Untag download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
folder = group['parentdir']
if not group.get('dirname') or not os.path.isdir(folder):
return False
# Untag all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
elif isinstance(release_download, dict):
# Untag download_files if they are known
if release_download['files']:
tag_files = splitString(release_download['files'], '|')
# Untag all files in release folder
else:
for root, folders, names in os.walk(release_download['folder']):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
folder = release_download['folder']
if not os.path.isdir(folder):
return False
if not folder:
return False
# Find all .ignore files in folder
ignore_files = []
@ -644,7 +690,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (re.escape(os.path.splitext(tag_file)[0]), tag if tag else '*'))
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
for filename in ignore_file:
try:
os.remove(filename)
@ -677,7 +723,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, '%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
if ignore_file:
return True
@ -790,7 +836,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
except:
loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
def checkSnatched(self):
def checkSnatched(self, fire_scan = True):
if self.checking_snatched:
log.debug('Already checking snatched')
@ -806,126 +852,169 @@ Remove it if you want it to be renamed (again, or at least let it try again)
Release.status_id.in_([snatched_status.get('id'), seeding_status.get('id'), missing_status.get('id')])
).all()
if not rels:
#No releases found that need status checking
self.checking_snatched = False
return True
# Collect all download information with the download IDs from the releases
download_ids = []
no_status_support = []
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
if rel_dict['info'].get('download_id') and rel_dict['info'].get('download_downloader'):
download_ids.append({'id': rel_dict['info']['download_id'], 'downloader': rel_dict['info']['download_downloader']})
ds = rel_dict['info'].get('download_status_support')
if ds == False or ds == 'False':
no_status_support.append(ss(rel_dict['info'].get('download_downloader')))
except:
log.error('Error getting download IDs from database')
self.checking_snatched = False
return False
release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else []
if len(no_status_support) > 0:
log.debug('Download status functionality is not implemented for one of the active downloaders: %s', no_status_support)
if not release_downloads:
if fire_scan:
self.scan()
self.checking_snatched = False
return True
scan_releases = []
scan_required = False
if rels:
log.debug('Checking status snatched releases...')
log.debug('Checking status snatched releases...')
release_downloads = fireEvent('download.status', merge = True)
if not release_downloads:
log.debug('Download status functionality is not implemented for active downloaders.')
scan_required = True
else:
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('movie.get', rel.movie_id, single = True)
try:
for rel in rels:
rel_dict = rel.to_dict({'info': {}})
movie_dict = fireEvent('media.get', media_id = rel.movie_id, single = True)
if not isinstance(rel_dict['info'], dict):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
continue
if not isinstance(rel_dict['info'], (dict)):
log.error('Faulty release found without any info, ignoring.')
# Check if download ID is available
if not rel_dict['info'].get('download_id') or not rel_dict['info'].get('download_downloader'):
log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (rel_dict['info'].get('download_downloader', 'unknown'), rel_dict['info']['name']))
scan_required = True
# Continue with next release
continue
# Find release in downloaders
nzbname = self.createNzbName(rel_dict['info'], movie_dict)
found_release = False
for release_download in release_downloads:
found_release = False
if rel_dict['info'].get('download_id'):
if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
break
else:
if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']:
log.debug('Found release by release name or imdb ID: %s', release_download['name'])
found_release = True
break
if not found_release:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.status_id == missing_status.get('id'):
if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
continue
# check status
nzbname = self.createNzbName(rel_dict['info'], movie_dict)
found = False
for release_download in release_downloads:
found_release = False
if rel_dict['info'].get('download_id'):
if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
else:
if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']:
found_release = True
else:
# Set the release to missing
fireEvent('release.update_status', rel.id, status = missing_status, single = True)
if found_release:
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
# Continue with next release
continue
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Log that we found the release
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
# Check status of release
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', movie_id = rel.movie_id)
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'):
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
found = True
break
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.movie_id)
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
if not found:
log.info('%s not found in downloaders', nzbname)
#Make sure the downloader sent over a path to look in
if self.statusInfoComplete(release_download):
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.status_id == missing_status.get('id'):
if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
# If the release has been seeding, process now the seeding is done
if rel.status_id == seeding_status.get('id'):
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to missing
fireEvent('release.update_status', rel.id, status = missing_status, single = True)
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for release_download in scan_releases:
@ -933,7 +1022,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True)
fireEvent('renamer.scan', release_download = release_download)
self.scan(release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']:
@ -944,11 +1033,10 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Ask the downloader to process the item
fireEvent('download.process_complete', release_download = release_download, single = True)
if scan_required:
fireEvent('renamer.scan')
if fire_scan and (scan_required or len(no_status_support) > 0):
self.scan()
self.checking_snatched = False
return True
def extendReleaseDownload(self, release_download):
@ -993,12 +1081,12 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return src in group['before_rename']
def statusInfoComplete(self, release_download):
return release_download['id'] and release_download['downloader'] and release_download['folder']
return release_download.get('id') and release_download.get('downloader') and release_download.get('folder')
def movieInFromFolder(self, movie_folder):
return movie_folder and sp(self.conf('from')) in sp(movie_folder) or not movie_folder
def movieInFromFolder(self, media_folder):
return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
def extractFiles(self, folder = None, movie_folder = None, files = None, cleanup = False):
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = []
# RegEx for finding rar files
@ -1013,7 +1101,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
folder = from_folder
check_file_date = True
if movie_folder:
if media_folder:
check_file_date = False
if not files:
@ -1109,18 +1197,18 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if cleanup:
# Remove all left over folders
log.debug('Removing old movie folder %s...', movie_folder)
self.deleteEmptyFolder(movie_folder)
log.debug('Removing old movie folder %s...', media_folder)
self.deleteEmptyFolder(media_folder)
movie_folder = os.path.join(from_folder, os.path.relpath(movie_folder, folder))
media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder))
folder = from_folder
if extr_files:
files.extend(extr_files)
# Cleanup files and folder if movie_folder was not provided
if not movie_folder:
# Cleanup files and folder if media_folder was not provided
if not media_folder:
files = []
folder = None
return folder, movie_folder, files, extr_files
return folder, media_folder, files, extr_files

8
couchpotato/core/plugins/scanner/main.py

@ -80,7 +80,8 @@ class Scanner(Plugin):
'hdtv': ['hdtv']
}
clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
@ -454,7 +455,7 @@ class Scanner(Plugin):
data['resolution_width'] = meta.get('resolution_width', 720)
data['resolution_height'] = meta.get('resolution_height', 480)
data['audio_channels'] = meta.get('audio_channels', 2.0)
data['aspect'] = meta.get('resolution_width', 720) / meta.get('resolution_height', 480)
data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2)
except:
log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc()))
pass
@ -760,7 +761,8 @@ class Scanner(Plugin):
# Year
if year and identifier[:4] != year:
identifier = '%s %s' % (identifier.split(year)[0].strip(), year)
split_by = ':::' if ':::' in identifier else year
identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year)
else:
identifier = identifier.split('::')[0]

4
couchpotato/core/plugins/score/main.py

@ -35,8 +35,8 @@ class Score(Plugin):
# Torrents only
if nzb.get('seeders'):
try:
score += nzb.get('seeders') / 5
score += nzb.get('leechers') / 10
score += nzb.get('seeders') * 100 / 15
score += nzb.get('leechers') * 100 / 30
except:
pass

2
couchpotato/core/plugins/subtitle/__init__.py

@ -20,7 +20,7 @@ config = [{
},
{
'name': 'languages',
'description': 'Comma separated, 2 letter country code. Example: en, nl. See the codes at <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">on Wikipedia</a>',
'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes">on Wikipedia</a>'),
},
# {
# 'name': 'automatic',

2
couchpotato/core/plugins/userscript/main.py

@ -13,7 +13,7 @@ log = CPLog(__name__)
class Userscript(Plugin):
version = 3
version = 4
def __init__(self):
addApiView('userscript.get/(.*)/(.*)', self.getUserScript, static = True)

2
couchpotato/core/plugins/userscript/template.js

@ -64,7 +64,7 @@ var addStyle = function(css) {
// Styles
addStyle('\
#cp_popup { font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; -moz-border-radius: 6px 0px 0px 6px; -webkit-border-radius: 6px 0px 0px 6px; border-radius: 6px 0px 0px 6px; -moz-box-shadow: 0 0 20px rgba(0,0,0,0.5); -webkit-box-shadow: 0 0 20px rgba(0,0,0,0.5); box-shadow: 0 0 20px rgba(0,0,0,0.5); position:fixed; z-index:9999; bottom:0; right:0; font-size:15px; margin: 20px 0; display: block; background:#4E5969; } \
#cp_popup { font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; -moz-border-radius: 6px 0px 0px 6px; -webkit-border-radius: 6px 0px 0px 6px; border-radius: 6px 0px 0px 6px; -moz-box-shadow: 0 0 20px rgba(0,0,0,0.5); -webkit-box-shadow: 0 0 20px rgba(0,0,0,0.5); box-shadow: 0 0 20px rgba(0,0,0,0.5); position:fixed; z-index:20000; bottom:0; right:0; font-size:15px; margin: 20px 0; display: block; background:#4E5969; } \
#cp_popup.opened { width: 492px; } \
#cp_popup a#add_to { cursor:pointer; text-align:center; text-decoration:none; color: #000; display:block; padding:5px 0 5px 5px; } \
#cp_popup a#close_button { cursor:pointer; float: right; padding:120px 10px 10px; } \

2
couchpotato/core/providers/automation/imdb/__init__.py

@ -59,7 +59,7 @@ config = [{
{
'name': 'automation_charts_boxoffice',
'type': 'bool',
'label': 'Box offce TOP 10',
'label': 'Box office TOP 10',
'description': 'IMDB Box office <a href="http://www.imdb.com/chart/">TOP 10</a> chart',
'default': True,
},

45
couchpotato/core/providers/base.py

@ -1,16 +1,15 @@
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
possibleTitles, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from urlparse import urlparse
import cookielib
import json
import re
import time
import traceback
import urllib2
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -86,7 +85,7 @@ class Provider(Plugin):
if data and len(data) > 0:
try:
data = XMLTree.fromstring(data)
data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
@ -95,7 +94,7 @@ class Provider(Plugin):
def getHTMLData(self, url, **kwargs):
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {})))
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('data', {})))
return self.getCache(cache_key, url, **kwargs)
@ -111,8 +110,7 @@ class YarrProvider(Provider):
sizeMb = ['mb', 'mib']
sizeKb = ['kb', 'kib']
login_opener = None
last_login_check = 0
last_login_check = None
def __init__(self):
addEvent('provider.enabled_protocols', self.getEnabledProtocol)
@ -129,35 +127,30 @@ class YarrProvider(Provider):
# Check if we are still logged in every hour
now = time.time()
if self.login_opener and self.last_login_check < (now - 3600):
if self.last_login_check and self.last_login_check < (now - 3600):
try:
output = self.urlopen(self.urls['login_check'], opener = self.login_opener)
output = self.urlopen(self.urls['login_check'])
if self.loginCheckSuccess(output):
self.last_login_check = now
return True
else:
self.login_opener = None
except:
self.login_opener = None
except: pass
self.last_login_check = None
if self.login_opener:
if self.last_login_check:
return True
try:
cookiejar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
output = self.urlopen(self.urls['login'], params = self.getLoginParams(), opener = opener)
output = self.urlopen(self.urls['login'], data = self.getLoginParams())
if self.loginSuccess(output):
self.last_login_check = now
self.login_opener = opener
return True
error = 'unknown'
except:
error = traceback.format_exc()
self.login_opener = None
self.last_login_check = None
log.error('Failed to login %s: %s', (self.getName(), error))
return False
@ -171,16 +164,16 @@ class YarrProvider(Provider):
try:
if not self.login():
log.error('Failed downloading from %s', self.getName())
return self.urlopen(url, opener = self.login_opener)
return self.urlopen(url, return_raw = True)
except:
log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return ''
return {}
def download(self, url = '', nzb_id = ''):
try:
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False, return_raw = True)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
@ -264,14 +257,14 @@ class ResultList(list):
result_ids = None
provider = None
movie = None
media = None
quality = None
def __init__(self, provider, movie, quality, **kwargs):
def __init__(self, provider, media, quality, **kwargs):
self.result_ids = []
self.provider = provider
self.movie = movie
self.media = media
self.quality = quality
self.kwargs = kwargs
@ -285,13 +278,13 @@ class ResultList(list):
new_result = self.fillResult(result)
is_correct = fireEvent('searcher.correct_release', new_result, self.movie, self.quality,
is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct and new_result['id'] not in self.result_ids:
is_correct_weight = float(is_correct)
new_result['score'] += fireEvent('score.calculate', new_result, self.movie, single = True)
new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True)
old_score = new_result['score']
new_result['score'] = int(old_score * is_correct_weight)

11
couchpotato/core/providers/info/_modifier/main.py

@ -21,14 +21,17 @@ class MovieResultModifier(Plugin):
'poster': [],
'backdrop': [],
'poster_original': [],
'backdrop_original': []
'backdrop_original': [],
'actors': {}
},
'runtime': 0,
'plot': '',
'tagline': '',
'imdb': '',
'genres': [],
'mpaa': None
'mpaa': None,
'actors': [],
'actor_roles': {}
}
def __init__(self):
@ -93,11 +96,11 @@ class MovieResultModifier(Plugin):
for movie in l.movies:
if movie.status_id == active_status['id']:
temp['in_wanted'] = fireEvent('movie.get', movie.id, single = True)
temp['in_wanted'] = fireEvent('media.get', movie.id, single = True)
for release in movie.releases:
if release.status_id == done_status['id']:
temp['in_library'] = fireEvent('movie.get', movie.id, single = True)
temp['in_library'] = fireEvent('media.get', movie.id, single = True)
except:
log.error('Tried getting more info on searched movies: %s', traceback.format_exc())

4
couchpotato/core/providers/info/couchpotatoapi/main.py

@ -74,7 +74,7 @@ class CouchPotatoApi(MovieProvider):
return True
def getInfo(self, identifier = None):
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return
@ -97,7 +97,7 @@ class CouchPotatoApi(MovieProvider):
if not ignore: ignore = []
if not movies: movies = []
suggestions = self.getJsonData(self.urls['suggest'], params = {
suggestions = self.getJsonData(self.urls['suggest'], data = {
'movies': ','.join(movies),
'ignore': ','.join(ignore),
}, headers = self.getRequestHeaders())

4
couchpotato/core/providers/info/omdbapi/main.py

@ -39,14 +39,14 @@ class OMDBAPI(MovieProvider):
if cached:
result = self.parseMovie(cached)
if result.get('titles') and len(result.get('titles')) > 0:
log.info('Found: %s', result['titles'][0] + ' (' + str(result['year']) + ')')
log.info('Found: %s', result['titles'][0] + ' (' + str(result.get('year')) + ')')
return [result]
return []
return []
def getInfo(self, identifier = None):
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return {}

54
couchpotato/core/providers/info/themoviedb/main.py

@ -11,8 +11,8 @@ log = CPLog(__name__)
class TheMovieDb(MovieProvider):
def __init__(self):
addEvent('info.search', self.search, priority = 2)
addEvent('movie.search', self.search, priority = 2)
#addEvent('info.search', self.search, priority = 2)
#addEvent('movie.search', self.search, priority = 2)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info_by_tmdb', self.getInfo)
@ -45,7 +45,7 @@ class TheMovieDb(MovieProvider):
nr = 0
for movie in raw:
results.append(self.parseMovie(movie, with_titles = False))
results.append(self.parseMovie(movie, extended = False))
nr += 1
if nr == limit:
@ -61,7 +61,7 @@ class TheMovieDb(MovieProvider):
return results
def getInfo(self, identifier = None):
def getInfo(self, identifier = None, extended = True):
if not identifier:
return {}
@ -73,14 +73,20 @@ class TheMovieDb(MovieProvider):
try:
log.debug('Getting info: %s', cache_key)
movie = tmdb3.Movie(identifier)
result = self.parseMovie(movie)
self.setCache(cache_key, result)
try: exists = movie.title is not None
except: exists = False
if exists:
result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result)
else:
result = {}
except:
pass
log.error('Failed getting info for %s: %s', (identifier, traceback.format_exc()))
return result
def parseMovie(self, movie, with_titles = True):
def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s' % movie.id
movie_data = self.getCache(cache_key)
@ -92,6 +98,14 @@ class TheMovieDb(MovieProvider):
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
images = {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
}
# Genres
try:
genres = [genre.name for genre in movie.genres]
@ -103,18 +117,23 @@ class TheMovieDb(MovieProvider):
if not movie.releasedate or year == '1900' or year.lower() == 'none':
year = None
# Gather actors data
actors = {}
if extended:
for cast_item in movie.cast:
try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
movie_data = {
'type': 'movie',
'via_tmdb': True,
'tmdb_id': movie.id,
'titles': [toUnicode(movie.title)],
'original_title': movie.originaltitle,
'images': {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
},
'images': images,
'imdb': movie.imdb,
'runtime': movie.runtime,
'released': str(movie.releasedate),
@ -122,12 +141,13 @@ class TheMovieDb(MovieProvider):
'plot': movie.overview,
'genres': genres,
'collection': getattr(movie.collection, 'name', None),
'actor_roles': actors
}
movie_data = dict((k, v) for k, v in movie_data.iteritems() if v)
# Add alternative names
if with_titles:
if extended:
movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles:
alt_name = alt.title
@ -143,9 +163,9 @@ class TheMovieDb(MovieProvider):
image_url = ''
try:
image_url = getattr(movie, type).geturl(size = 'original')
image_url = getattr(movie, type).geturl(size = size)
except:
log.debug('Failed getting %s.%s for "%s"', (type, size, movie.title))
log.debug('Failed getting %s.%s for "%s"', (type, size, str(movie)))
return image_url

63
couchpotato/core/providers/metadata/xbmc/main.py

@ -65,7 +65,7 @@ class XBMC(MetaDataBase):
name = type
try:
if data['library'].get(type):
if movie_info.get(type):
el = SubElement(nfoxml, name)
el.text = toUnicode(movie_info.get(type, ''))
except:
@ -89,10 +89,18 @@ class XBMC(MetaDataBase):
genres.text = toUnicode(genre)
# Actors
for actor in movie_info.get('actors', []):
actors = SubElement(nfoxml, 'actor')
name = SubElement(actors, 'name')
name.text = toUnicode(actor)
for actor_name in movie_info.get('actor_roles', {}):
role_name = movie_info['actor_roles'][actor_name]
actor = SubElement(nfoxml, 'actor')
name = SubElement(actor, 'name')
name.text = toUnicode(actor_name)
if role_name:
role = SubElement(actor, 'role')
role.text = toUnicode(role_name)
if movie_info['images']['actors'].get(actor_name):
thumb = SubElement(actor, 'thumb')
thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name))
# Directors
for director_name in movie_info.get('directors', []):
@ -112,6 +120,51 @@ class XBMC(MetaDataBase):
sorttitle = SubElement(nfoxml, 'sorttitle')
sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year'))
# Images
for image_url in movie_info['images']['poster_original']:
image = SubElement(nfoxml, 'thumb')
image.text = toUnicode(image_url)
fanart = SubElement(nfoxml, 'fanart')
for image_url in movie_info['images']['backdrop_original']:
image = SubElement(fanart, 'thumb')
image.text = toUnicode(image_url)
# Add trailer if found
trailer_found = False
if data.get('renamed_files'):
for filename in data.get('renamed_files'):
if 'trailer' in filename:
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(filename)
trailer_found = True
if not trailer_found and data['files'].get('trailer'):
trailer = SubElement(nfoxml, 'trailer')
trailer.text = toUnicode(data['files']['trailer'][0])
# Add file metadata
fileinfo = SubElement(nfoxml, 'fileinfo')
streamdetails = SubElement(fileinfo, 'streamdetails')
# Video data
if data['meta_data'].get('video'):
video = SubElement(streamdetails, 'video')
codec = SubElement(video, 'codec')
codec.text = toUnicode(data['meta_data']['video'])
aspect = SubElement(video, 'aspect')
aspect.text = str(data['meta_data']['aspect'])
width = SubElement(video, 'width')
width.text = str(data['meta_data']['resolution_width'])
height = SubElement(video, 'height')
height.text = str(data['meta_data']['resolution_height'])
# Audio data
if data['meta_data'].get('audio'):
audio = SubElement(streamdetails, 'audio')
codec = SubElement(audio, 'codec')
codec.text = toUnicode(data['meta_data'].get('audio'))
channels = SubElement(audio, 'channels')
channels.text = toUnicode(data['meta_data'].get('audio_channels'))
# Clean up the xml and return it
nfoxml = xml.dom.minidom.parseString(tostring(nfoxml))
xml_string = nfoxml.toprettyxml(indent = ' ')

4
couchpotato/core/providers/nzb/binsearch/main.py

@ -90,13 +90,13 @@ class BinSearch(NZBProvider):
def download(self, url = '', nzb_id = ''):
params = {
data = {
'action': 'nzb',
nzb_id: 'on'
}
try:
return self.urlopen(url, params = params, show_error = False)
return self.urlopen(url, data = data, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))

9
couchpotato/core/providers/nzb/newznab/__init__.py

@ -39,12 +39,19 @@ config = [{
'description': 'Starting score for each release found via this provider.',
},
{
'name': 'custom_tag',
'advanced': True,
'label': 'Custom tag',
'default': ',,,,,',
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
},
{
'name': 'api_key',
'default': ',,,,,',
'label': 'Api Key',
'description': 'Can be found on your profile page',
'type': 'combined',
'combine': ['use', 'host', 'api_key', 'extra_score'],
'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'],
},
],
},

24
couchpotato/core/providers/nzb/newznab/main.py

@ -10,6 +10,7 @@ from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import urllib2
log = CPLog(__name__)
@ -45,7 +46,7 @@ class Newznab(NZBProvider, RSS):
'imdbid': movie['library']['identifier'].replace('tt', ''),
'apikey': host['api_key'],
'extended': 1
})
}) + ('&%s' % host['custom_tag'] if host.get('custom_tag') else '')
url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
@ -99,6 +100,7 @@ class Newznab(NZBProvider, RSS):
hosts = splitString(self.conf('host'), clean = False)
api_keys = splitString(self.conf('api_key'), clean = False)
extra_score = splitString(self.conf('extra_score'), clean = False)
custom_tags = splitString(self.conf('custom_tag'), clean = False)
list = []
for nr in range(len(hosts)):
@ -109,11 +111,18 @@ class Newznab(NZBProvider, RSS):
try: host = hosts[nr]
except: host = ''
try: score = tryInt(extra_score[nr])
except: score = 0
try: custom_tag = custom_tags[nr]
except: custom_tag = ''
list.append({
'use': uses[nr],
'host': host,
'api_key': key,
'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0
'extra_score': score,
'custom_tag': custom_tag
})
return list
@ -159,7 +168,16 @@ class Newznab(NZBProvider, RSS):
return 'try_next'
try:
data = self.urlopen(url, show_error = False)
# Get final redirected url
log.debug('Checking %s for redirects.', url)
req = urllib2.Request(url)
req.add_header('User-Agent', self.user_agent)
res = urllib2.urlopen(req)
finalurl = res.geturl()
if finalurl != url:
log.debug('Redirect url used: %s', finalurl)
data = self.urlopen(finalurl, show_error = False)
self.limits_reached[host] = False
return data
except HTTPError, e:

6
couchpotato/core/providers/torrent/bithdtv/main.py

@ -31,7 +31,7 @@ class BiTHDTV(TorrentProvider):
url = "%s&%s" % (self.urls['search'], arguments)
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
# Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
@ -68,10 +68,10 @@ class BiTHDTV(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
})
}
def getMoreInfo(self, item):
full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

9
couchpotato/core/providers/torrent/bitsoup/main.py

@ -28,13 +28,16 @@ class Bitsoup(TorrentProvider):
})
url = "%s&%s" % (self.urls['search'], arguments)
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs = {'class': 'koptekst'})
if not result_table or 'nothing found!' in data.lower():
return
entries = result_table.find_all('tr')
for result in entries[1:]:
@ -70,11 +73,11 @@ class Bitsoup(TorrentProvider):
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'ssl': 'yes',
})
}
def loginSuccess(self, output):

11
couchpotato/core/providers/torrent/hdbits/main.py

@ -1,5 +1,4 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
@ -23,7 +22,7 @@ class HDBits(TorrentProvider):
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % movie['library']['identifier'], opener = self.login_opener)
data = self.getJsonData(self.urls['search'] % movie['library']['identifier'])
if data:
try:
@ -42,15 +41,17 @@ class HDBits(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
data = self.getHTMLData('https://hdbits.org/login')
data = self.getHTMLData('https://hdbits.org/login', cache_timeout = 0)
bs = BeautifulSoup(data)
secret = bs.find('input', attrs = {'name': 'lol'})['value']
return tryUrlencode({
return {
'uname': self.conf('username'),
'password': self.conf('password'),
'returnto': '/',
'lol': secret
})
}
def loginSuccess(self, output):
return '/logout.php' in output.lower()

8
couchpotato/core/providers/torrent/ilovetorrents/main.py

@ -42,7 +42,7 @@ class ILoveTorrents(TorrentProvider):
search_url = self.urls['search'] % (movieTitle, page, cats[0])
page += 1
data = self.getHTMLData(search_url, opener = self.login_opener)
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
@ -96,11 +96,11 @@ class ILoveTorrents(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'Welcome to ILT',
})
}
def getMoreInfo(self, item):
cache_key = 'ilt.%s' % item['id']
@ -109,7 +109,7 @@ class ILoveTorrents(TorrentProvider):
if not description:
try:
full_description = self.getHTMLData(item['detail_url'], opener = self.login_opener)
full_description = self.getHTMLData(item['detail_url'])
html = BeautifulSoup(full_description)
nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1]
description = toUnicode(nfo_pre.text) if nfo_pre else ''

37
couchpotato/core/providers/torrent/iptorrents/main.py

@ -1,5 +1,5 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.encoding import tryUrlencode, toSafeString
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
@ -37,7 +37,7 @@ class IPTorrents(TorrentProvider):
while current_page <= pages and not self.shuttingDown():
url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), current_page)
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
@ -57,21 +57,27 @@ class IPTorrents(TorrentProvider):
entries = result_table.find_all('tr')
columns = self.getColumns(entries)
if 'seeders' not in columns or 'leechers' not in columns:
log.warning('Unrecognized table format returned')
return
for result in entries[1:]:
torrent = result.find_all('td')
if len(torrent) <= 1:
cells = result.find_all('td')
if len(cells) <= 1:
break
torrent = torrent[1].find('a')
torrent = cells[1].find('a')
torrent_id = torrent['href'].replace('/details.php?id=', '')
torrent_name = torrent.string
torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.')
torrent_details_url = self.urls['base_url'] + torrent['href']
torrent_size = self.parseSize(result.find_all('td')[5].string)
torrent_seeders = tryInt(result.find('td', attrs = {'class' : 'ac t_seeders'}).string)
torrent_leechers = tryInt(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
torrent_seeders = tryInt(cells[columns['seeders']].string)
torrent_leechers = tryInt(cells[columns['leechers']].string)
results.append({
'id': torrent_id,
@ -89,12 +95,25 @@ class IPTorrents(TorrentProvider):
current_page += 1
def getColumns(self, entries):
result = {}
for x, col in enumerate(entries[0].find_all('th')):
key = toSafeString(col.text).strip().lower()
if not key:
continue
result[key] = x
return result
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'login': 'submit',
})
}
def loginSuccess(self, output):
return 'don\'t have an account' not in output.lower()

6
couchpotato/core/providers/torrent/passthepopcorn/main.py

@ -65,7 +65,7 @@ class PassThePopcorn(TorrentProvider):
})
url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
res = self.getJsonData(url, opener = self.login_opener)
res = self.getJsonData(url)
try:
if not 'Movies' in res:
@ -188,13 +188,13 @@ class PassThePopcorn(TorrentProvider):
return self.unicodeToASCII(self.htmlToUnicode(text))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'passkey': self.conf('passkey'),
'keeplogged': '1',
'login': 'Login'
})
}
def loginSuccess(self, output):
try:

6
couchpotato/core/providers/torrent/sceneaccess/main.py

@ -45,7 +45,7 @@ class SceneAccess(TorrentProvider):
url = "%s&%s" % (url, arguments)
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
@ -78,11 +78,11 @@ class SceneAccess(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'come on in',
})
}
def getMoreInfo(self, item):
full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

6
couchpotato/core/providers/torrent/torrentbytes/main.py

@ -35,7 +35,7 @@ class TorrentBytes(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
@ -69,11 +69,11 @@ class TorrentBytes(TorrentProvider):
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'login': 'submit',
})
}
def loginSuccess(self, output):
return 'logout.php' in output.lower() or 'Welcome' in output.lower()

11
couchpotato/core/providers/torrent/torrentday/main.py

@ -1,4 +1,3 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
@ -30,7 +29,7 @@ class TorrentDay(TorrentProvider):
q = '"%s %s"' % (title, movie['library']['year'])
params = {
data = {
'/browse.php?': None,
'cata': 'yes',
'jxt': 8,
@ -38,7 +37,7 @@ class TorrentDay(TorrentProvider):
'search': q,
}
data = self.getJsonData(self.urls['search'], params = params, opener = self.login_opener)
data = self.getJsonData(self.urls['search'], data = data)
try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except: return
@ -54,11 +53,13 @@ class TorrentDay(TorrentProvider):
})
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit.x': 18,
'submit.y': 11,
'submit': 'submit',
})
}
def loginSuccess(self, output):
return 'Password not correct' not in output

6
couchpotato/core/providers/torrent/torrentleech/main.py

@ -36,7 +36,7 @@ class TorrentLeech(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
@ -68,12 +68,12 @@ class TorrentLeech(TorrentProvider):
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'remember_me': 'on',
'login': 'submit',
})
}
def loginSuccess(self, output):
return '/user/account/logout' in output.lower() or 'welcome back' in output.lower()

6
couchpotato/core/providers/torrent/torrentshack/main.py

@ -34,7 +34,7 @@ class TorrentShack(TorrentProvider):
scene_only = '1' if self.conf('scene_only') else ''
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
@ -65,12 +65,12 @@ class TorrentShack(TorrentProvider):
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
return {
'username': self.conf('username'),
'password': self.conf('password'),
'keeplogged': '1',
'login': 'Login',
})
}
def loginSuccess(self, output):
return 'logout.php' in output.lower()

6
couchpotato/core/providers/torrent/yify/__init__.py

@ -19,6 +19,12 @@ config = [{
'default': 0
},
{
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests, keep empty to let CouchPotato pick.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',

27
couchpotato/core/providers/torrent/yify/main.py

@ -1,20 +1,27 @@
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
import traceback
log = CPLog(__name__)
class Yify(TorrentProvider):
class Yify(TorrentMagnetProvider):
urls = {
'test' : 'https://yify-torrents.com/api',
'search' : 'https://yify-torrents.com/api/list.json?keywords=%s&quality=%s',
'detail': 'https://yify-torrents.com/api/movie.json?id=%s'
'test' : '%s/api',
'search' : '%s/api/list.json?keywords=%s&quality=%s',
'detail': '%s/api/movie.json?id=%s'
}
http_time_between_calls = 1 #seconds
proxy_list = [
'https://yify-torrents.im',
'http://yify.unlocktorrent.com',
'http://yify.ftwnet.co.uk',
'http://yify-torrents.com.come.in',
]
def search(self, movie, quality):
@ -25,7 +32,9 @@ class Yify(TorrentProvider):
def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % (movie['library']['identifier'], quality['identifier']))
search_url = self.urls['search'] % (self.getDomain(), movie['library']['identifier'], quality['identifier'])
data = self.getJsonData(search_url)
if data and data.get('MovieList'):
try:
@ -41,8 +50,8 @@ class Yify(TorrentProvider):
results.append({
'id': result['MovieID'],
'name': title,
'url': result['TorrentUrl'],
'detail_url': self.urls['detail'] % result['MovieID'],
'url': result['TorrentMagnetUrl'],
'detail_url': self.urls['detail'] % (self.getDomain(),result['MovieID']),
'size': self.parseSize(result['Size']),
'seeders': tryInt(result['TorrentSeeds']),
'leechers': tryInt(result['TorrentPeers'])
@ -51,3 +60,5 @@ class Yify(TorrentProvider):
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def correctProxy(self, data):
return 'title="YIFY-Torrents RSS feed"' in data

2
couchpotato/core/providers/trailer/hdtrailers/main.py

@ -100,7 +100,7 @@ class HDTrailers(TrailerProvider):
continue
resolutions = tr.find_all('td', attrs = {'class':'bottomTableResolution'})
for res in resolutions:
if res.a:
if res.a and str(res.a.contents[0]) in results:
results[str(res.a.contents[0])].insert(0, res.a['href'])
except AttributeError:

4
couchpotato/core/providers/userscript/base.py

@ -25,7 +25,7 @@ class UserscriptBase(Plugin):
result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True)
if len(result) > 0:
movie = fireEvent('movie.info', identifier = result[0].get('imdb'), merge = True)
movie = fireEvent('movie.info', identifier = result[0].get('imdb'), extended = False, merge = True)
return movie
else:
return None
@ -54,7 +54,7 @@ class UserscriptBase(Plugin):
return self.getInfo(getImdb(data))
def getInfo(self, identifier):
return fireEvent('movie.info', identifier = identifier, merge = True)
return fireEvent('movie.info', identifier = identifier, extended = False, merge = True)
def getInclude(self):
return self.includes

2
couchpotato/core/providers/userscript/imdb/main.py

@ -8,4 +8,4 @@ class IMDB(UserscriptBase):
includes = ['*://*.imdb.com/title/tt*', '*://imdb.com/title/tt*']
def getMovie(self, url):
return fireEvent('movie.info', identifier = getImdb(url), merge = True)
return self.getInfo(getImdb(url))

2
couchpotato/core/providers/userscript/tmdb/main.py

@ -9,7 +9,7 @@ class TMDB(UserscriptBase):
def getMovie(self, url):
match = re.search('(?P<id>\d+)', url)
movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), merge = True)
movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), extended = False, merge = True)
if movie['imdb']:
return self.getInfo(movie['imdb'])

4
couchpotato/core/settings/__init__.py

@ -110,6 +110,10 @@ class Settings(object):
except:
return default
def delete(self, option = '', section = 'core'):
self.p.remove_option(section, option)
self.save()
def getEnabler(self, section, option):
return self.getBool(section, option)

1
couchpotato/environment.py

@ -78,6 +78,7 @@ class Env(object):
return s.get(attr, default = default, section = section, type = type)
# Set setting
s.addSection(section)
s.set(section, attr, value)
s.save()

9
couchpotato/runner.py

@ -8,6 +8,7 @@ from couchpotato.core.helpers.variable import getDataDir, tryInt
from logging import handlers
from tornado.httpserver import HTTPServer
from tornado.web import Application, StaticFileHandler, RedirectHandler
from uuid import uuid4
import locale
import logging
import os.path
@ -144,7 +145,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
Env.set('dev', development)
# Disable logging for some modules
for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler']:
for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']:
logging.getLogger(logger_name).setLevel(logging.ERROR)
for logger_name in ['gntp', 'migrate']:
@ -167,7 +168,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
logger.addHandler(hdlr)
# To file
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10)
hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
hdlr2.setFormatter(formatter)
logger.addHandler(hdlr2)
@ -215,6 +216,10 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
Env.set('web_base', web_base)
api_key = Env.setting('api_key')
if not api_key:
api_key = uuid4().hex
Env.setting('api_key', value = api_key)
api_base = r'%sapi/%s/' % (web_base, api_key)
Env.set('api_base', api_base)

265
couchpotato/static/scripts/library/mootools_more.js

@ -1,6 +1,6 @@
// MooTools: the javascript framework.
// Load this file's selection again by visiting: http://mootools.net/more/7a819726f7f5e85fc48bef295ff78dbe
// Or build this file again with packager using: packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical More/Tips
// Load this file's selection again by visiting: http://mootools.net/more/0f75cfbac1aabbedaba7630beef8d10c
// Or build this file again with packager using: packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical
/*
---
@ -3161,264 +3161,3 @@ Request.implement({
});
/*
---
script: Tips.js
name: Tips
description: Class for creating nice tips that follow the mouse cursor when hovering an element.
license: MIT-style license
authors:
- Valerio Proietti
- Christoph Pojer
- Luis Merino
requires:
- Core/Options
- Core/Events
- Core/Element.Event
- Core/Element.Style
- Core/Element.Dimensions
- /MooTools.More
provides: [Tips]
...
*/
(function(){
var read = function(option, element){
return (option) ? (typeOf(option) == 'function' ? option(element) : element.get(option)) : '';
};
this.Tips = new Class({
Implements: [Events, Options],
options: {/*
id: null,
onAttach: function(element){},
onDetach: function(element){},
onBound: function(coords){},*/
onShow: function(){
this.tip.setStyle('display', 'block');
},
onHide: function(){
this.tip.setStyle('display', 'none');
},
title: 'title',
text: function(element){
return element.get('rel') || element.get('href');
},
showDelay: 100,
hideDelay: 100,
className: 'tip-wrap',
offset: {x: 16, y: 16},
windowPadding: {x:0, y:0},
fixed: false,
waiAria: true
},
initialize: function(){
var params = Array.link(arguments, {
options: Type.isObject,
elements: function(obj){
return obj != null;
}
});
this.setOptions(params.options);
if (params.elements) this.attach(params.elements);
this.container = new Element('div', {'class': 'tip'});
if (this.options.id){
this.container.set('id', this.options.id);
if (this.options.waiAria) this.attachWaiAria();
}
},
toElement: function(){
if (this.tip) return this.tip;
this.tip = new Element('div', {
'class': this.options.className,
styles: {
position: 'absolute',
top: 0,
left: 0
}
}).adopt(
new Element('div', {'class': 'tip-top'}),
this.container,
new Element('div', {'class': 'tip-bottom'})
);
return this.tip;
},
attachWaiAria: function(){
var id = this.options.id;
this.container.set('role', 'tooltip');
if (!this.waiAria){
this.waiAria = {
show: function(element){
if (id) element.set('aria-describedby', id);
this.container.set('aria-hidden', 'false');
},
hide: function(element){
if (id) element.erase('aria-describedby');
this.container.set('aria-hidden', 'true');
}
};
}
this.addEvents(this.waiAria);
},
detachWaiAria: function(){
if (this.waiAria){
this.container.erase('role');
this.container.erase('aria-hidden');
this.removeEvents(this.waiAria);
}
},
attach: function(elements){
$$(elements).each(function(element){
var title = read(this.options.title, element),
text = read(this.options.text, element);
element.set('title', '').store('tip:native', title).retrieve('tip:title', title);
element.retrieve('tip:text', text);
this.fireEvent('attach', [element]);
var events = ['enter', 'leave'];
if (!this.options.fixed) events.push('move');
events.each(function(value){
var event = element.retrieve('tip:' + value);
if (!event) event = function(event){
this['element' + value.capitalize()].apply(this, [event, element]);
}.bind(this);
element.store('tip:' + value, event).addEvent('mouse' + value, event);
}, this);
}, this);
return this;
},
detach: function(elements){
$$(elements).each(function(element){
['enter', 'leave', 'move'].each(function(value){
element.removeEvent('mouse' + value, element.retrieve('tip:' + value)).eliminate('tip:' + value);
});
this.fireEvent('detach', [element]);
if (this.options.title == 'title'){ // This is necessary to check if we can revert the title
var original = element.retrieve('tip:native');
if (original) element.set('title', original);
}
}, this);
return this;
},
elementEnter: function(event, element){
clearTimeout(this.timer);
this.timer = (function(){
this.container.empty();
['title', 'text'].each(function(value){
var content = element.retrieve('tip:' + value);
var div = this['_' + value + 'Element'] = new Element('div', {
'class': 'tip-' + value
}).inject(this.container);
if (content) this.fill(div, content);
}, this);
this.show(element);
this.position((this.options.fixed) ? {page: element.getPosition()} : event);
}).delay(this.options.showDelay, this);
},
elementLeave: function(event, element){
clearTimeout(this.timer);
this.timer = this.hide.delay(this.options.hideDelay, this, element);
this.fireForParent(event, element);
},
setTitle: function(title){
if (this._titleElement){
this._titleElement.empty();
this.fill(this._titleElement, title);
}
return this;
},
setText: function(text){
if (this._textElement){
this._textElement.empty();
this.fill(this._textElement, text);
}
return this;
},
fireForParent: function(event, element){
element = element.getParent();
if (!element || element == document.body) return;
if (element.retrieve('tip:enter')) element.fireEvent('mouseenter', event);
else this.fireForParent(event, element);
},
elementMove: function(event, element){
this.position(event);
},
position: function(event){
if (!this.tip) document.id(this);
var size = window.getSize(), scroll = window.getScroll(),
tip = {x: this.tip.offsetWidth, y: this.tip.offsetHeight},
props = {x: 'left', y: 'top'},
bounds = {y: false, x2: false, y2: false, x: false},
obj = {};
for (var z in props){
obj[props[z]] = event.page[z] + this.options.offset[z];
if (obj[props[z]] < 0) bounds[z] = true;
if ((obj[props[z]] + tip[z] - scroll[z]) > size[z] - this.options.windowPadding[z]){
obj[props[z]] = event.page[z] - this.options.offset[z] - tip[z];
bounds[z+'2'] = true;
}
}
this.fireEvent('bound', bounds);
this.tip.setStyles(obj);
},
fill: function(element, contents){
if (typeof contents == 'string') element.set('html', contents);
else element.adopt(contents);
},
show: function(element){
if (!this.tip) document.id(this);
if (!this.tip.getParent()) this.tip.inject(document.body);
this.fireEvent('show', [this.tip, element]);
},
hide: function(element){
if (!this.tip) document.id(this);
this.fireEvent('hide', [this.tip, element]);
}
});
})();

2
couchpotato/static/scripts/page/home.js

@ -184,4 +184,4 @@ Page.Home = new Class({
}
});
});

62
couchpotato/static/scripts/page/settings.js

@ -268,20 +268,10 @@ Page.Settings = new Class({
if((typeOf(group.description) == 'array')){
var hint = new Element('span.hint.more_hint', {
'html': group.description[0],
'title': group.description[1]
});
var tip = new Tips(hint, {
'fixed': true,
'offset': {'x': 0, 'y': 0},
'onShow': function(tip, hint){
tip.setStyles({
'margin-top': hint.getSize().y,
'visibility': 'hidden',
'display': 'block'
}).fade('in');
}
'html': group.description[0]
});
createTooltip(group.description[1]).inject(hint, 'top');
}
else {
var hint = new Element('span.hint', {
@ -369,21 +359,10 @@ var OptionBase = new Class({
if((typeOf(self.options.description) == 'array')){
var hint = new Element('p.formHint.more_hint', {
'html': self.options.description[0],
'title': self.options.description[1]
'html': self.options.description[0]
}).inject(self.el);
var tip = new Tips(hint, {
'fixed': true,
'offset': {'x': 0, 'y': 0},
'onShow': function(tip, hint){
tip.setStyles({
'margin-left': 13,
'margin-top': hint.getSize().y+3,
'visibility': 'hidden',
'display': 'block'
}).fade('in');
}
});
createTooltip(self.options.description[1]).inject(hint, 'top');
}
else {
var hint = new Element('p.formHint', {
@ -1308,6 +1287,7 @@ Option.Combined = new Class({
self.inputs = {};
self.items = [];
self.labels = {};
self.descriptions = {};
self.options.combine.each(function(name){
@ -1328,9 +1308,12 @@ Option.Combined = new Class({
Object.each(self.inputs, function(input, name){
self.labels[name] = input.getPrevious().get('text');
self.descriptions[name] = (_in = input.getNext()) ? _in.get('text') : '';
new Element('abbr', {
'class': name,
'text': self.labels[name]
'text': self.labels[name],
'title': self.descriptions[name]
}).inject(head)
});
@ -1456,4 +1439,25 @@ Option.Combined = new Class({
self.saveCombined();
}
});
});
var createTooltip = function(description){
var tip = new Element('div.tooltip', {
'events': {
'mouseenter': function(){
tip.addClass('shown')
},
'mouseleave': function(){
tip.removeClass('shown')
}
}
}).adopt(
new Element('a.icon2.info'),
new Element('div.tip', {
'html': description
})
);
return tip;
}

55
couchpotato/static/scripts/page/wanted.js

@ -4,6 +4,7 @@ Page.Wanted = new Class({
name: 'wanted',
title: 'Gimmy gimmy gimmy!',
folder_browser: null,
indexAction: function(){
var self = this;
@ -18,13 +19,22 @@ Page.Wanted = new Class({
}
});
self.scan_folder = new Element('a', {
'title': 'Scan a folder and rename all movies in it',
'text': 'Manual folder scan',
'events':{
'click': self.scanFolder.bind(self)
}
});
// Wanted movies
self.wanted = new MovieList({
'identifier': 'wanted',
'status': 'active',
'actions': [MA.IMDB, MA.Trailer, MA.Release, MA.Edit, MA.Refresh, MA.Readd, MA.Delete],
'add_new': true,
'menu': [self.manual_search],
'menu': [self.manual_search, self.scan_folder],
'on_empty_element': App.createUserscriptButtons().addClass('empty_wanted')
});
$(self.wanted).inject(self.el);
@ -69,6 +79,45 @@ Page.Wanted = new Class({
});
}, 1000);
}
},
});
scanFolder: function(e) {
(e).stop();
var self = this;
var options = {
'name': 'Scan_folder'
}
if(!self.folder_browser){
self.folder_browser = new Option['Directory']("Scan", "folder", "", options);
self.folder_browser.save = function() {
var folder = self.folder_browser.getValue();
Api.request('renamer.scan', {
'data': {
'base_folder': folder,
},
});
};
self.folder_browser.inject(self.el, 'top');
self.folder_browser.fireEvent('injected');
// Hide the settings box
self.folder_browser.directory_inlay.hide();
self.folder_browser.el.removeChild(self.folder_browser.el.firstChild);
self.folder_browser.showBrowser();
// Make adjustments to the browser
self.folder_browser.browser.getElements('.clear.button').hide();
self.folder_browser.save_button.text = "Select";
self.folder_browser.browser.style.zIndex=1000;
}
else{
self.folder_browser.showBrowser();
}
}
});

154
couchpotato/static/style/settings.css

@ -1,5 +1,5 @@
.page.settings {
min-width: 960px;
min-width: 960px;
}
.page.settings:after {
@ -34,7 +34,7 @@
color: rgba(255, 255, 255, 0.8);
text-shadow: none;
}
.page.settings .tabs a:hover,
.page.settings .tabs a:hover,
.page.settings .tabs .active a {
background: rgb(78, 89, 105);
color: #fff;
@ -113,6 +113,8 @@
width: 20px;
}
.Scan_folder { padding: 0 !important; }
.page .ctrlHolder {
line-height: 25px;
padding: 10px 10px 10px 30px;
@ -128,15 +130,15 @@
.page .ctrlHolder.focused:first-child, .page .ctrlHolder:first-child{ background-color: transparent; }
.page .ctrlHolder .formHint {
width: 47%;
width: 46%;
margin: -18px 0;
color: #fff !important;
color: #fff !important;
display: inline-block;
vertical-align: middle;
padding: 0 0 0 2%;
line-height: 14px;
padding: 0 0 0 2%;
line-height: 14px;
}
.page .check {
margin-top: 6px;
}
@ -159,32 +161,32 @@
}
.page .xsmall { width: 20px !important; text-align: center; }
.page .enabler {
display: block;
}
.page .option_list {
margin-bottom: 20px;
}
.page .option_list .check {
margin-top: 5px;
}
.page .option_list .enabler {
padding: 0;
margin-left: 5px !important;
}
.page .option_list .enabler:not(.disabled) {
margin: 0 0 0 30px;
}
.page .option_list .enabler:not(.disabled) .ctrlHolder:first-child {
margin: 10px 0 -33px 0;
}
.page .option_list h3 {
padding: 0;
margin: 10px 5px 0;
@ -195,7 +197,7 @@
font-size: 12px;
background: rgba(255,255,255,0.03);
}
.page .option_list .enabler.disabled {
display: inline-block;
margin: 3px 3px 3px 20px;
@ -203,14 +205,14 @@
width: 173px;
vertical-align: top;
}
.page .option_list .enabler.disabled h2 {
border: none;
box-shadow: none;
padding: 0 10px 0 25px;
font-size: 16px;
}
.page .option_list .enabler:not(.disabled) h2 {
font-size: 16px;
font-weight: bold;
@ -224,7 +226,7 @@
border: none;
box-shadow: none;
}
.page .option_list .enabler.disabled h2 .hint {
display: none;
}
@ -334,7 +336,7 @@
.page .directory_list li:hover {
background-color: #515c68;
}
.page .directory_list li.empty {
background: none;
height: 100px;
@ -524,7 +526,7 @@
);
background-size: 65%;
}
.page .tag_input .choice:hover .delete,
.page .tag_input .choice:hover .delete,
.page .tag_input .choice.selected .delete { display: inline-block; }
.page .tag_input .choice .delete:hover {
height: 14px;
@ -547,33 +549,25 @@
}
.page .combined_table .head abbr.host { margin-right: 120px; }
.page .combined_table input.host { width: 140px; }
.page .section_newznab .combined_table .head abbr.host { margin-right: 200px; }
.page .section_newznab .combined_table input.host { width: 220px; }
.page .section_newznab .combined_table .head abbr.host { margin-right: 180px; }
.page .section_newznab .combined_table input.host { width: 200px; }
.page .combined_table .head abbr.name { margin-right: 57px; }
.page .combined_table input.name { width: 120px; }
.page .combined_table .head abbr.api_key { margin-right: 75px; }
.page .combined_table .head abbr.pass_key { margin-right: 71px; }
.page .combined_table input.pass_key { width: 113px; }
.page .section_newznab .combined_table .head abbr.api_key { margin-right: 185px; }
.page .section_newznab .combined_table input.api_key { width: 223px; }
.page .combined_table .seed_ratio,
.page .combined_table .seed_time {
width: 70px;
text-align: center;
margin-left: 10px;
}
.page .combined_table .seed_time {
margin-right: 10px;
.page .section_newznab .combined_table .head abbr.api_key { margin-right: 170px; }
.page .section_newznab .combined_table input.api_key { width: 203px; }
.page .combined_table .head abbr.extra_score {
margin-right: 15px;
display: none;
}
.page .combined_table .head .extra_score,
.page .combined_table .extra_score {
width: 70px;
text-align: center;
.page .combined_table input.extra_score {
width: 75px;
display: none;
}
.page.show_advanced .combined_table .head .extra_score,
@ -581,6 +575,30 @@
display: inline-block;
}
.page .combined_table .head abbr.custom_tag {
margin-right: 15px;
display: none;
}
.page .combined_table input.custom_tag {
width: 140px;
display: none;
}
.page.show_advanced .combined_table .head .custom_tag,
.page.show_advanced .combined_table .custom_tag {
display: inline-block;
}
.page .combined_table .seed_ratio,
.page .combined_table .seed_time {
width: 70px;
text-align: center;
margin-left: 10px;
}
.page .combined_table .seed_time {
margin-right: 10px;
}
.page .combined_table .ctrlHolder {
padding-top: 2px;
padding-bottom: 3px;
@ -714,24 +732,50 @@
margin-left: 10px;
display: inline-block;
}
.active .group_imdb_automation:not(.disabled) {
background: url('../images/imdb_watchlist.png') no-repeat right 50px;
min-height: 210px;
}
.tip-wrap {
background: #FFF;
color: #000;
padding: 10px;
width: 300px;
z-index: 200;
.tooltip {
position: absolute;
right: 0px;
width: 30px;
height: 30px;
}
.more_hint:after {
position: relative;
font-family: 'Elusive-Icons';
content: "\e089";
display: inline-block;
top: 1px;
left: 6px;
.tooltip > a {
opacity: .3;
font-size: 11px;
cursor: pointer;
}
.tooltip:hover > a {
opacity: 1;
}
.tooltip div {
background: #FFF;
color: #000;
padding: 10px;
width: 380px;
z-index: 200;
position: absolute;
transition: all .4s cubic-bezier(0.9,0,0.1,1);
margin-top: 40px;
right: 0;
opacity: 0;
visibility: hidden;
}
.tooltip.shown div {
margin-top: 10px;
opacity: 1;
visibility: visible;
}
.tooltip div a {
color: #5b9bd1;
}

8
libs/requests/__init__.py

@ -23,7 +23,7 @@ usage:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
>>> print(r.text)
{
...
"form": {
@ -42,15 +42,15 @@ is at <http://python-requests.org>.
"""
__title__ = 'requests'
__version__ = '1.2.3'
__build__ = 0x010203
__version__ = '2.1.0'
__build__ = 0x020100
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from requests.packages.urllib3.contrib import pyopenssl
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass

134
libs/requests/adapters.py

@ -11,18 +11,20 @@ and maintain connections.
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
except_on_missing_scheme, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
@ -71,6 +73,7 @@ class HTTPAdapter(BaseAdapter):
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
@ -118,7 +121,7 @@ class HTTPAdapter(BaseAdapter):
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.startswith('https') and verify:
if url.lower().startswith('https') and verify:
cert_loc = None
@ -184,18 +187,28 @@ class HTTPAdapter(BaseAdapter):
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`.
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url).scheme)
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy))
except_on_missing_scheme(proxy)
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
@ -211,10 +224,10 @@ class HTTPAdapter(BaseAdapter):
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be
used. Otherwise, we should only use the path portion of the URL.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This shoudl not be called from user code, and is only exposed for use
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
@ -222,9 +235,10 @@ class HTTPAdapter(BaseAdapter):
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme)
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy:
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
@ -232,8 +246,9 @@ class HTTPAdapter(BaseAdapter):
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this adds a
Proxy-Authorization header.
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
@ -242,12 +257,22 @@ class HTTPAdapter(BaseAdapter):
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
proxies = kwargs.get('proxies', {})
pass
if proxies is None:
proxies = {}
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
proxy = proxies.get(urlparse(request.url).scheme)
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
@ -255,8 +280,10 @@ class HTTPAdapter(BaseAdapter):
# to decode them.
username = unquote(username)
password = unquote(password)
request.headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
@ -265,7 +292,7 @@ class HTTPAdapter(BaseAdapter):
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
@ -273,10 +300,15 @@ class HTTPAdapter(BaseAdapter):
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if stream:
timeout = TimeoutSauce(connect=timeout)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
@ -298,27 +330,40 @@ class HTTPAdapter(BaseAdapter):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr)
@ -326,6 +371,9 @@ class HTTPAdapter(BaseAdapter):
except MaxRetryError as e:
raise ConnectionError(e)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)

65
libs/requests/auth.py

@ -16,9 +16,9 @@ import logging
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
@ -64,6 +64,7 @@ class HTTPDigestAuth(AuthBase):
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
@ -78,7 +79,7 @@ class HTTPDigestAuth(AuthBase):
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5':
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
@ -90,7 +91,7 @@ class HTTPDigestAuth(AuthBase):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
# XXX MD5-sess
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
@ -106,23 +107,28 @@ class HTTPDigestAuth(AuthBase):
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2))
respdig = KD(hash_utf8(A1), noncebit)
elif qop is None:
respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2)))
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
@ -139,13 +145,17 @@ class HTTPDigestAuth(AuthBase):
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
@ -159,10 +169,15 @@ class HTTPDigestAuth(AuthBase):
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url)
_r = r.connection.send(r.request, **kwargs)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
@ -173,5 +188,9 @@ class HTTPDigestAuth(AuthBase):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r

8212
libs/requests/cacert.pem

File diff suppressed because it is too large

6
libs/requests/compat.py

@ -83,13 +83,14 @@ except ImportError:
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
@ -100,11 +101,12 @@ if is_py2:
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str

74
libs/requests/cookies.py

@ -6,8 +6,9 @@ Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import time
import collections
from .compat import cookielib, urlparse, Morsel
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
@ -44,7 +45,18 @@ class MockRequest(object):
return self.get_host()
def get_full_url(self):
return self._r.url
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
@ -73,6 +85,10 @@ class MockRequest(object):
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
@ -102,6 +118,9 @@ def extract_cookies_to_jar(jar, request, response):
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
@ -258,6 +277,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
@ -354,19 +378,23 @@ def create_cookie(name, value, **kwargs):
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel["max-age"]:
expires = time.time() + morsel["max-age"]
elif morsel['expires']:
expires = morsel['expires']
if type(expires) == type(""):
time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = time.mktime(time.strptime(expires, time_template))
c = create_cookie(
name=morsel.key,
value=morsel.value,
version=morsel['version'] or 0,
port=None,
port_specified=False,
domain=morsel['domain'],
domain_specified=bool(morsel['domain']),
domain_initial_dot=morsel['domain'].startswith('.'),
path=morsel['path'],
path_specified=bool(morsel['path']),
secure=bool(morsel['secure']),
expires=morsel['max-age'] or morsel['expires'],
expires=expires,
discard=False,
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
@ -375,15 +403,43 @@ def morsel_to_cookie(morsel):
return c
def cookiejar_from_dict(cookie_dict, cookiejar=None):
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar

10
libs/requests/exceptions.py

@ -9,7 +9,7 @@ This module contains the set of Requests' exceptions.
"""
class RequestException(RuntimeError):
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
@ -27,6 +27,10 @@ class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
@ -53,3 +57,7 @@ class InvalidSchema(RequestException, ValueError):
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""

201
libs/requests/models.py

@ -11,22 +11,25 @@ import collections
import logging
import datetime
from io import BytesIO
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
@ -88,12 +91,14 @@ class RequestEncodingMixin(object):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
@ -104,6 +109,10 @@ class RequestEncodingMixin(object):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
@ -111,11 +120,14 @@ class RequestEncodingMixin(object):
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
@ -124,11 +136,10 @@ class RequestEncodingMixin(object):
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
@ -139,6 +150,9 @@ class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
@ -184,8 +198,8 @@ class Request(RequestHooksMixin):
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
@ -209,7 +223,6 @@ class Request(RequestHooksMixin):
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
@ -217,19 +230,17 @@ class Request(RequestHooksMixin):
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
p.prepare_auth(self.auth, self.url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
@ -259,14 +270,43 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
@ -284,11 +324,17 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
@ -337,8 +383,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""Prepares the given HTTP headers."""
if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
@ -352,7 +397,6 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
@ -363,8 +407,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
try:
length = super_len(data)
except (TypeError, AttributeError):
length = False
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
@ -372,13 +416,10 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = str(length)
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
@ -402,12 +443,12 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = str(l)
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
@ -437,14 +478,13 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
self._cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
self._cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
@ -457,6 +497,19 @@ class Response(object):
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
@ -496,6 +549,24 @@ class Response(object):
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
@ -537,11 +608,22 @@ class Response(object):
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size, decode_content=True)
if not chunk:
break
yield chunk
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size,
decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
@ -605,8 +687,8 @@ class Response(object):
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
If Response.encoding is None, encoding will be guessed using
``charade``.
"""
# Try charset from content-type
@ -648,7 +730,7 @@ class Response(object):
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
return json.loads(self.text, **kwargs)
@property
def links(self):
@ -683,4 +765,9 @@ class Response(object):
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()

34
libs/requests/packages/charade/__init__.py

@ -30,3 +30,37 @@ def detect(aBuf):
u.feed(aBuf)
u.close()
return u.result
def _description_of(path):
"""Return a string describing the probable encoding of a file."""
from charade.universaldetector import UniversalDetector
u = UniversalDetector()
for line in open(path, 'rb'):
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (path,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % path
def charade_cli():
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect.py somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
"""
from sys import argv
for path in argv[1:]:
print(_description_of(path))

7
libs/requests/packages/charade/__main__.py

@ -0,0 +1,7 @@
'''
support ';python -m charade <file1> [file2] ...' package execution syntax (2.7+)
'''
from charade import charade_cli
charade_cli()

2
libs/requests/packages/charade/jpcntx.py

@ -169,7 +169,7 @@ class JapaneseContextAnalysis:
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
return float(self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW

2
libs/requests/packages/charade/latin1prober.py

@ -129,7 +129,7 @@ class Latin1Prober(CharSetProber):
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0

12
libs/requests/packages/charade/universaldetector.py

@ -74,12 +74,10 @@ class UniversalDetector:
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
elif aBuf[:4] in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
self.result = {'encoding': "UTF-32", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
@ -92,12 +90,10 @@ class UniversalDetector:
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
elif aBuf[:2] == codecs.BOM_LE or aBuf[:2] == codecs.BOM_BE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self.result = {'encoding': "UTF-16", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):

2
libs/requests/packages/urllib3/__init__.py

@ -23,7 +23,7 @@ from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host
from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings.

25
libs/requests/packages/urllib3/_collections.py

@ -5,7 +5,16 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import Lock
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
@ -40,18 +49,18 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self._lock = Lock()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self._lock:
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self._lock:
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
@ -65,21 +74,21 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self._lock:
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self._lock:
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self._lock:
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
@ -90,5 +99,5 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func(value)
def keys(self):
with self._lock:
with self.lock:
return self._container.keys()

107
libs/requests/packages/urllib3/connection.py

@ -0,0 +1,107 @@
# urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port),
timeout=self.timeout,
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
if self._tunnel_host:
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
if ssl:
HTTPSConnection = VerifiedHTTPSConnection

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save