Browse Source

Merge pull request #1 from CouchPotato/develop

Develop
pull/7240/head
whitter 8 years ago
committed by GitHub
parent
commit
dac86eef26
  1. 4
      .gitignore
  2. 5
      README.md
  3. 2
      couchpotato/core/downloaders/deluge.py
  4. 6
      couchpotato/core/downloaders/putio/__init__.py
  5. 8
      couchpotato/core/downloaders/putio/main.py
  6. 22
      couchpotato/core/downloaders/utorrent.py
  7. 14
      couchpotato/core/media/_base/providers/nzb/newznab.py
  8. 9
      couchpotato/core/media/_base/providers/torrent/bithdtv.py
  9. 14
      couchpotato/core/media/_base/providers/torrent/iptorrents.py
  10. 25
      couchpotato/core/media/_base/providers/torrent/kickasstorrents.py
  11. 14
      couchpotato/core/media/_base/providers/torrent/passthepopcorn.py
  12. 21
      couchpotato/core/media/_base/providers/torrent/thepiratebay.py
  13. 14
      couchpotato/core/media/_base/providers/torrent/torrentday.py
  14. 130
      couchpotato/core/media/_base/providers/torrent/yts.py
  15. 5
      couchpotato/core/media/movie/providers/automation/base.py
  16. 54
      couchpotato/core/media/movie/providers/automation/kinepolis.py
  17. 48
      couchpotato/core/media/movie/providers/automation/moviemeter.py
  18. 72
      couchpotato/core/media/movie/providers/automation/movies_io.py
  19. 48
      couchpotato/core/media/movie/providers/info/omdbapi.py
  20. 1
      couchpotato/core/media/movie/providers/info/themoviedb.py
  21. 3
      couchpotato/core/media/movie/providers/nzb/newznab.py
  22. 2
      couchpotato/core/media/movie/providers/nzb/nzbclub.py
  23. 8
      couchpotato/core/media/movie/providers/torrent/iptorrents.py
  24. 2
      couchpotato/core/media/movie/providers/torrent/passthepopcorn.py
  25. 10
      couchpotato/core/media/movie/providers/torrent/yts.py
  26. 9
      couchpotato/core/media/movie/providers/userscript/filmstarts.py
  27. 93
      couchpotato/core/notifications/discord.py
  28. 2
      couchpotato/core/notifications/emby.py
  29. 84
      couchpotato/core/notifications/join.py
  30. 3
      couchpotato/core/notifications/plex/server.py
  31. 68
      couchpotato/core/notifications/script.py
  32. 2
      couchpotato/core/plugins/log/static/log.js
  33. 12
      couchpotato/core/plugins/profile/main.py
  34. 11
      couchpotato/core/plugins/profile/static/profile.js
  35. 5
      couchpotato/core/plugins/quality/main.py
  36. 4
      couchpotato/core/plugins/quality/static/quality.js
  37. 53
      couchpotato/core/plugins/renamer.py
  38. 11
      couchpotato/core/plugins/scanner.py
  39. 2
      couchpotato/core/plugins/subtitle.py
  40. 13
      couchpotato/static/scripts/combined.plugins.min.js
  41. 2
      couchpotato/static/scripts/couchpotato.js
  42. 2
      couchpotato/templates/login.html
  43. 8
      libs/guessit/fileutils.py
  44. 214
      libs/pio/api.py
  45. 48
      libs/rtorrent/__init__.py
  46. 28
      libs/rtorrent/file.py
  47. 26
      libs/rtorrent/peer.py
  48. 6
      libs/rtorrent/rpc/__init__.py
  49. 168
      libs/rtorrent/torrent.py
  50. 22
      libs/rtorrent/tracker.py
  51. 3
      libs/subliminal/core.py
  52. 13
      libs/subliminal/services/subscenter.py
  53. 138
      libs/subliminal/services/wizdom.py
  54. 190
      libs/tus/__init__.py
  55. 8
      libs/xmpp/transports.py

4
.gitignore

@ -10,3 +10,7 @@
.coverage
coverage.xml
nosetests.xml
# Visual Studio
/.vs

5
README.md

@ -13,7 +13,7 @@ Once a movie is found, it will send it to SABnzbd or download the torrent to a s
CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed.
Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for more details:
Windows, see [the CP forum](http://couchpota.to/forum/viewtopic.php?t=14) for more details:
* Install [Python 2.7](http://www.python.org/download/releases/2.7.3/)
* Then install [PyWin32 2.7](http://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/) and [GIT](http://git-scm.com/)
@ -56,9 +56,6 @@ Linux:
Docker:
* You can use [linuxserver.io](https://github.com/linuxserver/docker-couchpotato) or [razorgirl's](https://github.com/razorgirl/docker-couchpotato) to quickly build your own isolated app container. It's based on the Linux instructions above. For more info about Docker check out the [official website](https://www.docker.com).
Ansible:
* You can use [peerster's] (https://github.com/peerster/ansible-couchpotato) [ansible] (http://www.ansible.com) role to deploy couchpotato.
FreeBSD:
* Become root with `su`

2
couchpotato/core/downloaders/deluge.py

@ -159,7 +159,7 @@ class Deluge(DownloaderBase):
# If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version).
# In this scenario the status of the torrent would never change from BUSY to SEEDING.
# The last check takes care of this case.
if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) <= 0)):
if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)):
# We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.

6
couchpotato/core/downloaders/putio/__init__.py

@ -34,6 +34,12 @@ config = [{
'default': 0,
},
{
'name': 'https',
'description': 'Set to true if your callback host accepts https instead of http',
'type': 'bool',
'default': 0,
},
{
'name': 'callback_host',
'description': 'External reachable url to CP so put.io can do it\'s thing',
},

8
couchpotato/core/downloaders/putio/main.py

@ -61,9 +61,13 @@ class PutIO(DownloaderBase):
# Note callback_host is NOT our address, it's the internet host that putio can call too
callbackurl = None
if self.conf('download'):
callbackurl = 'http://' + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
pre = 'http://'
if self.conf('https'):
pre = 'https://'
callbackurl = pre + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
log.debug('callbackurl is %s', callbackurl)
resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
log.debug('resp is %s', resp.id);
log.debug('resp is %s', resp.id)
return self.downloadReturnId(resp.id)
def test(self):

22
couchpotato/core/downloaders/utorrent.py

@ -1,4 +1,4 @@
from base64 import b16encode, b32decode
from base64 import b16encode, b32decode
from datetime import timedelta
from hashlib import sha1
import cookielib
@ -74,24 +74,6 @@ class uTorrent(DownloaderBase):
if not self.connect():
return False
settings = self.utorrent_api.get_settings()
if not settings:
return False
#Fix settings in case they are not set for CPS compatibility
new_settings = {}
if not (settings.get('seed_prio_limitul') == 0 and settings['seed_prio_limitul_flag']):
new_settings['seed_prio_limitul'] = 0
new_settings['seed_prio_limitul_flag'] = True
log.info('Updated uTorrent settings to set a torrent to complete after it the seeding requirements are met.')
if settings.get('bt.read_only_on_complete'): #This doesn't work as this option seems to be not available through the api. Mitigated with removeReadOnly function
new_settings['bt.read_only_on_complete'] = False
log.info('Updated uTorrent settings to not set the files to read only after completing.')
if new_settings:
self.utorrent_api.set_settings(new_settings)
torrent_params = {}
if self.conf('label'):
torrent_params['label'] = self.conf('label')
@ -194,7 +176,7 @@ class uTorrent(DownloaderBase):
status = 'busy'
if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding'
elif torrent[1] & self.status_flags['ERROR']:
elif torrent[1] & self.status_flags['ERROR'] and 'There is not enough space on the disk' not in torrent[21]:
status = 'failed'
elif torrent[4] == 1000:
status = 'completed'

14
couchpotato/core/media/_base/providers/nzb/newznab.py

@ -128,6 +128,7 @@ class Base(NZBProvider, RSS):
api_keys = splitString(self.conf('api_key'), clean = False)
extra_score = splitString(self.conf('extra_score'), clean = False)
custom_tags = splitString(self.conf('custom_tag'), clean = False)
custom_categories = splitString(self.conf('custom_categories'), clean = False)
list = []
for nr in range(len(hosts)):
@ -144,12 +145,16 @@ class Base(NZBProvider, RSS):
try: custom_tag = custom_tags[nr]
except: custom_tag = ''
try: custom_category = custom_categories[nr].replace(" ", ",")
except: custom_category = ''
list.append({
'use': uses[nr],
'host': host,
'api_key': key,
'extra_score': score,
'custom_tag': custom_tag
'custom_tag': custom_tag,
'custom_category' : custom_category
})
return list
@ -266,6 +271,13 @@ config = [{
'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
},
{
'name': 'custom_categories',
'advanced': True,
'label': 'Custom Categories',
'default': '2000,2000,2000,2000,2000,2000',
'description': 'Specify categories to search in seperated by a single space, defaults to all movies. EG: "2030 2040 2060" would only search in HD, SD, and 3D movie categories',
},
{
'name': 'api_key',
'default': ',,,,,',
'label': 'Api Key',

9
couchpotato/core/media/_base/providers/torrent/bithdtv.py

@ -39,10 +39,15 @@ class Base(TorrentProvider):
if '## SELECT COUNT(' in split_data[0]:
data = split_data[2]
html = BeautifulSoup(data)
html = BeautifulSoup(data, 'html.parser')
try:
result_table = html.find('table', attrs = {'width': '750', 'class': ''})
result_tables = html.find_all('table', attrs = {'width': '750', 'class': ''})
if result_tables is None:
return
result_table = result_tables[1]
if result_table is None:
return

14
couchpotato/core/media/_base/providers/torrent/iptorrents.py

@ -14,11 +14,11 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://iptorrents.eu/',
'base_url': 'https://iptorrents.eu',
'login': 'https://iptorrents.eu/take_login.php',
'login_check': 'https://iptorrents.eu/oldinbox.php',
'search': 'https://iptorrents.eu/t?%s%%s&q=%s&qf=ti#torrents&p=%%d',
'test': 'https://iptorrents.com/',
'base_url': 'https://iptorrents.com',
'login': 'https://iptorrents.com/take_login.php',
'login_check': 'https://iptorrents.com/oldinbox.php',
'search': 'https://iptorrents.com/t?%s%%s&q=%s&qf=ti#torrents&p=%%d',
}
http_time_between_calls = 1 # Seconds
@ -36,6 +36,8 @@ class Base(TorrentProvider):
log.warning('Unable to find category ids for identifier "%s"', quality.get('identifier'))
return None
query = query.replace('"', '')
return self.urls['search'] % ("&".join(("%d=" % x) for x in cat_ids), tryUrlencode(query).replace('%', '%%'))
def _searchOnTitle(self, title, media, quality, results):
@ -121,7 +123,7 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'IPTorrents',
'description': '<a href="https://iptorrents.eu" target="_blank">IPTorrents</a>',
'description': '<a href="https://iptorrents.com" target="_blank">IPTorrents</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
'options': [

25
couchpotato/core/media/_base/providers/torrent/kickasstorrents.py

@ -30,9 +30,28 @@ class Base(TorrentMagnetProvider):
cat_backup_id = None
proxy_list = [
'https://kat.cr',
'https://kickass.unblocked.pw/',
'https://katproxy.com',
'http://flowtorrent.com',
'http://katcr.to/span',
'http://dx-torrente.com',
'https://kickass.unblocked.vip',
'https://katcr.co',
'https://kat.how',
'https://kickass.cd',
'https://kickass.unlockproject.online',
'https://kickasstorrents.video',
'https://kat.al',
'https://katproxy.al',
'https://kattor.xyz',
'https://kickass.unblocked.video',
'https://kickass.unblocked.rocks',
'https://kickass.immunicity.live',
'https://kickass.immunicity.red',
'https://kickass.immunicity.video',
'https://kickass.bypassed.live',
'https://kickass.bypassed.video',
'https://kickass.bypassed.red',
'https://kickass.unblocked.pw',
'https://katproxy.com'
]
def _search(self, media, quality, results):

14
couchpotato/core/media/_base/providers/torrent/passthepopcorn.py

@ -18,12 +18,12 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'domain': 'https://tls.passthepopcorn.me',
'detail': 'https://tls.passthepopcorn.me/torrents.php?torrentid=%s',
'torrent': 'https://tls.passthepopcorn.me/torrents.php',
'login': 'https://tls.passthepopcorn.me/ajax.php?action=login',
'login_check': 'https://tls.passthepopcorn.me/ajax.php?action=login',
'search': 'https://tls.passthepopcorn.me/search/%s/0/7/%d'
'domain': 'https://passthepopcorn.me',
'detail': 'https://passthepopcorn.me/torrents.php?torrentid=%s',
'torrent': 'https://passthepopcorn.me/torrents.php',
'login': 'https://passthepopcorn.me/ajax.php?action=login',
'login_check': 'https://passthepopcorn.me/ajax.php?action=login',
'search': 'https://passthepopcorn.me/search/%s/0/7/%d'
}
login_errors = 0
@ -218,7 +218,7 @@ config = [{
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests (HTTPS only!), keep empty to use default (tls.passthepopcorn.me).',
'description': 'Domain for requests (HTTPS only!), keep empty to use default (passthepopcorn.me).',
},
{
'name': 'username',

21
couchpotato/core/media/_base/providers/torrent/thepiratebay.py

@ -25,35 +25,18 @@ class Base(TorrentMagnetProvider):
http_time_between_calls = 0
proxy_list = [
'https://thepiratebay.mn',
'https://thepiratebay.gd',
'https://thepiratebay.la',
'https://pirateproxy.sx',
'https://piratebay.host',
'https://thepiratebay.expert',
'https://pirateproxy.cat',
'https://pirateproxy.wf',
'https://pirateproxy.tf',
'https://urbanproxy.eu',
'https://pirate.guru',
'https://piratebays.co',
'https://pirateproxy.yt',
'https://thepiratebay.uk.net',
'https://tpb.ninja',
'https://thehiddenbay.me',
'https://ukunlocked.com',
'https://thebay.tv',
'https://tpb.freed0m4all.net',
'https://piratebays.eu',
'https://thepirateproxy.co',
'https://thepiratebayz.com',
'https://zaatoka.eu',
'https://piratemirror.net',
'https://theproxypirate.pw',
'https://torrentdr.com',
'https://tpbproxy.co',
'https://arrr.xyz',
'https://www.cleantpbproxy.com',
'http://tpb.dashitz.com',
'https://tpb.dashitz.com'
]
def __init__(self):

14
couchpotato/core/media/_base/providers/torrent/torrentday.py

@ -9,12 +9,12 @@ log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://classic.torrentday.com/',
'login': 'https://classic.torrentday.com/torrents/',
'login_check': 'https://classic.torrentday.com/userdetails.php',
'detail': 'https://classic.torrentday.com/details.php?id=%s',
'search': 'https://classic.torrentday.com/V3/API/API.php',
'download': 'https://classic.torrentday.com/download.php/%s/%s',
'test': 'https://www.torrentday.com/',
'login': 'https://www.torrentday.com/torrents/',
'login_check': 'https://www.torrentday.com/userdetails.php',
'detail': 'https://www.torrentday.com/details.php?id=%s',
'search': 'https://www.torrentday.com/V3/API/API.php',
'download': 'https://www.torrentday.com/download.php/%s/%s',
}
http_time_between_calls = 1 # Seconds
@ -86,7 +86,7 @@ config = [{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'TorrentDay',
'description': '<a href="https://classic.torrentday.com/" target="_blank">TorrentDay</a>',
'description': '<a href="https://www.torrentday.com/" target="_blank">TorrentDay</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
'options': [

130
couchpotato/core/media/_base/providers/torrent/yts.py

@ -0,0 +1,130 @@
from datetime import datetime
from couchpotato.core.helpers.variable import tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
import random
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
# Only qualities allowed: 720p/1080p/3D - the rest will fail.
# All YTS.ag torrents are verified
urls = {
'detail': 'https://yts.ag/api#list_movies',
'search': 'https://yts.ag/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s'
}
def _search(self, movie, quality, results):
limit = 10
page = 1
data = self.getJsonData(self.urls['search'] % (getIdentifier(movie), limit, page))
if data:
movie_count = tryInt(data['data']['movie_count'])
if movie_count == 0:
log.debug('%s - found no results', (self.getName()))
else:
movie_results = data['data']['movies']
for i in range(0,len(movie_results)):
result = data['data']['movies'][i]
name = result['title']
year = result['year']
detail_url = result['url']
for torrent in result['torrents']:
t_quality = torrent['quality']
if t_quality in quality['label']:
hash = torrent['hash']
size = tryInt(torrent['size_bytes'] / 1048576)
seeders = tryInt(torrent['seeds'])
leechers = tryInt(torrent['peers'])
pubdate = torrent['date_uploaded'] # format: 2017-02-17 18:40:03
pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S')
age = (datetime.now() - pubdate).days
results.append({
'id': random.randint(100, 9999),
'name': '%s (%s) %s %s %s' % (name, year, 'YTS', t_quality, 'BR-Rip'),
'url': self.make_magnet(hash, name),
'size': size,
'seeders': seeders,
'leechers': leechers,
'age': age,
'detail_url': detail_url,
'score': 1
})
return
def make_magnet(self, hash, name):
url_encoded_trackers = 'udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.openbittorr' \
'ent.com%3A80&tr=%0Audp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=%0Audp%3A%2F%2Fglot' \
'orrents.pw%3A6969%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannou' \
'nce&tr=%0Audp%3A%2F%2Ftorrent.gresille.org%3A80%2Fannounce&tr=%0Audp%3A%2F%2Fp4p.are' \
'nabg.com%3A1337&tr=%0Audp%3A%2F%2Ftracker.leechers-paradise.org%3A6969]'
return 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (hash, name.replace(' ', '+'), url_encoded_trackers)
config = [{
'name': 'yts',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'YTS',
'description': '<a href="https://yts.ag/" target="_blank">YTS</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqG'
'iQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+W'
'gEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mY'
'Ybjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9Kumpjg'
'vwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeRO'
'st0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nE'
'KPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9e'
'IlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTy'
'iGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'info',
'label': 'Info',
'type':'bool',
'default':'False',
'description': 'YTS will only work if you set the minimum size for 720p to 500 and 1080p to 800',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]

5
couchpotato/core/media/movie/providers/automation/base.py

@ -1,4 +1,5 @@
import time
import unicodedata
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
@ -45,7 +46,11 @@ class Automation(AutomationBase):
def search(self, name, year = None, imdb_only = False):
try:
cache_name = name.decode('utf-8').encode('ascii', 'ignore')
except UnicodeEncodeError:
cache_name = unicodedata.normalize('NFKD', name).encode('ascii','ignore')
prop_name = 'automation.cached.%s.%s' % (cache_name, year)
cached_imdb = Env.prop(prop_name, default = False)
if cached_imdb and imdb_only:

54
couchpotato/core/media/movie/providers/automation/kinepolis.py

@ -1,54 +0,0 @@
import datetime
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'Kinepolis'
class Kinepolis(Automation, RSS):
interval = 1800
rss_url = 'http://kinepolis.be/nl/top10-box-office/feed'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
name = self.getTextElement(movie, 'title')
year = datetime.datetime.now().strftime('%Y')
imdb = self.search(name, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'kinepolis',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'kinepolis_automation',
'label': 'Kinepolis',
'description': 'Imports movies from the current top 10 of kinepolis.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

48
couchpotato/core/media/movie/providers/automation/moviemeter.py

@ -1,48 +0,0 @@
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'Moviemeter'
class Moviemeter(Automation, RSS):
interval = 1800
rss_url = 'http://www.moviemeter.nl/rss/cinema'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
imdb = self.search(self.getTextElement(movie, 'title'))
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'moviemeter',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'moviemeter_automation',
'label': 'Moviemeter',
'description': 'Imports movies from the current top 10 of moviemeter.nl.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]

72
couchpotato/core/media/movie/providers/automation/movies_io.py

@ -1,72 +0,0 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'MoviesIO'
class MoviesIO(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]
index = -1
for rss_url in splitString(self.conf('automation_urls')):
index += 1
if not enablers[index]:
continue
rss_movies = self.getRSSData(rss_url, headers = {'Referer': ''})
for movie in rss_movies:
nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True)
imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True)
if not imdb:
continue
movies.append(imdb)
return movies
config = [{
'name': 'moviesio',
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'moviesio',
'label': 'Movies.IO',
'description': 'Imports movies from <a href="http://movies.io" target="_blank">Movies.io</a> RSS watchlists',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
},
],
},
],
}]

48
couchpotato/core/media/movie/providers/info/omdbapi.py

@ -18,8 +18,8 @@ autoload = 'OMDBAPI'
class OMDBAPI(MovieProvider):
urls = {
'search': 'http://www.omdbapi.com/?type=movie&%s',
'info': 'http://www.omdbapi.com/?type=movie&i=%s',
'search': 'https://www.omdbapi.com/?apikey=%s&type=movie&%s',
'info': 'https://www.omdbapi.com/?apikey=%s&type=movie&i=%s',
}
http_time_between_calls = 0
@ -30,6 +30,8 @@ class OMDBAPI(MovieProvider):
addEvent('movie.info', self.getInfo)
def search(self, q, limit = 12):
if self.isDisabled():
return []
name_year = fireEvent('scanner.name_year', q, single = True)
@ -39,7 +41,7 @@ class OMDBAPI(MovieProvider):
}
cache_key = 'omdbapi.cache.%s' % q
url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
url = self.urls['search'] % (self.getApiKey(), tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')}))
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
@ -53,12 +55,12 @@ class OMDBAPI(MovieProvider):
return []
def getInfo(self, identifier = None, **kwargs):
if not identifier:
if self.isDisabled() or not identifier:
return {}
cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
url = self.urls['info'] % (self.getApiKey(), identifier)
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)
@ -88,7 +90,8 @@ class OMDBAPI(MovieProvider):
tmp_movie = movie.copy()
for key in tmp_movie:
if tmp_movie.get(key).lower() == 'n/a':
tmp_movie_elem = tmp_movie.get(key)
if not isinstance(tmp_movie_elem, (str, unicode)) or tmp_movie_elem.lower() == 'n/a':
del movie[key]
year = tryInt(movie.get('Year', ''))
@ -122,6 +125,16 @@ class OMDBAPI(MovieProvider):
return movie_data
def isDisabled(self):
if self.getApiKey() == '':
log.error('No API key provided.')
return True
return False
def getApiKey(self):
apikey = self.conf('api_key')
return apikey
def runtimeToMinutes(self, runtime_str):
runtime = 0
@ -132,3 +145,24 @@ class OMDBAPI(MovieProvider):
runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1)
return runtime
config = [{
'name': 'omdbapi',
'groups': [
{
'tab': 'providers',
'name': 'tmdb',
'label': 'OMDB API',
'hidden': True,
'description': 'Used for all calls to TheMovieDB.',
'options': [
{
'name': 'api_key',
'default': 'bbc0e412', # Don't be a dick and use this somewhere else
'label': 'Api Key',
},
],
},
],
}]

1
couchpotato/core/media/movie/providers/info/themoviedb.py

@ -54,6 +54,7 @@ class TheMovieDb(MovieProvider):
languages.remove('en')
# default language has a special management
if self.default_language in languages:
languages.remove(self.default_language)
self.languages = languages

3
couchpotato/core/media/movie/providers/nzb/newznab.py

@ -23,4 +23,7 @@ class Newznab(MovieProvider, Base):
if len(host.get('custom_tag', '')) > 0:
query = '%s&%s' % (query, host.get('custom_tag'))
if len(host['custom_category']) > 0:
query = '%s&cat=%s' % (query, host['custom_category'])
return query

2
couchpotato/core/media/movie/providers/nzb/nzbclub.py

@ -14,7 +14,7 @@ class NZBClub(MovieProvider, Base):
def buildUrl(self, media):
q = tryUrlencode({
'q': '"%s"' % fireEvent('library.query', media, single = True),
'q': '%s' % fireEvent('library.query', media, single = True),
})
query = tryUrlencode({

8
couchpotato/core/media/movie/providers/torrent/iptorrents.py

@ -11,12 +11,12 @@ class IPTorrents(MovieProvider, Base):
cat_ids = [
([87], ['3d']),
([48], ['720p', '1080p']),
([89], ['bd50']),
([96], ['cam', 'ts', 'tc', 'r5', 'scr']),
([48, 20, 90], ['brrip']),
([48], ['720p', '1080p']),
([48, 20], ['brrip']),
([7, 77], ['dvdrip']),
([6], ['dvdr'])
([6], ['dvdr']),
([96], ['cam', 'ts', 'tc', 'r5', 'scr']),
]
def buildUrl(self, title, media, quality):

2
couchpotato/core/media/movie/providers/torrent/passthepopcorn.py

@ -10,6 +10,7 @@ autoload = 'PassThePopcorn'
class PassThePopcorn(MovieProvider, Base):
quality_search_params = {
'2160p': {'resolution': '4K'},
'bd50': {'media': 'Blu-ray', 'format': 'BD50'},
'1080p': {'resolution': '1080p'},
'720p': {'resolution': '720p'},
@ -24,6 +25,7 @@ class PassThePopcorn(MovieProvider, Base):
}
post_search_filters = {
'2160p': {'Resolution': ['4K']},
'bd50': {'Codec': ['BD50']},
'1080p': {'Resolution': ['1080p']},
'720p': {'Resolution': ['720p']},

10
couchpotato/core/media/movie/providers/torrent/yts.py

@ -0,0 +1,10 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.yts import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'Yts'
class Yts(MovieProvider, Base):
pass

9
couchpotato/core/media/movie/providers/userscript/filmstarts.py

@ -1,5 +1,6 @@
from bs4 import BeautifulSoup
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase
import re
autoload = 'Filmstarts'
@ -15,16 +16,16 @@ class Filmstarts(UserscriptBase):
return
html = BeautifulSoup(data)
table = html.find("table", attrs={"class": "table table-standard thead-standard table-striped_2 fs11"})
table = html.find("section", attrs={"class": "section ovw ovw-synopsis", "id": "synopsis-details"})
if table.find(text='Originaltitel'):
if table.find(text=re.compile('Originaltitel')): #some trailing whitespaces on some pages
# Get original film title from the table specified above
name = table.find("div", text="Originaltitel").parent.parent.parent.td.text
name = name = table.find("span", text=re.compile("Originaltitel")).findNext('h2').text
else:
# If none is available get the title from the meta data
name = html.find("meta", {"property":"og:title"})['content']
# Year of production is not available in the meta data, so get it from the table
year = table.find(text="Produktionsjahr").parent.parent.next_sibling.text
year = table.find("span", text=re.compile("Produktionsjahr")).findNext('span').text
return self.search(name, year)

93
couchpotato/core/notifications/discord.py

@ -0,0 +1,93 @@
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import json
import requests
log = CPLog(__name__)
autoload = 'Discord'
class Discord(Notification):
required_confs = ('webhook_url',)
def notify(self, message='', data=None, listener=None):
for key in self.required_confs:
if not self.conf(key):
log.warning('Discord notifications are enabled, but '
'"{0}" is not specified.'.format(key))
return False
data = data or {}
message = message.strip()
if self.conf('include_imdb') and 'identifier' in data:
template = ' http://www.imdb.com/title/{0[identifier]}/'
message += template.format(data)
headers = {b"Content-Type": b"application/json"}
try:
r = requests.post(self.conf('webhook_url'), data=json.dumps(dict(content=message, username=self.conf('bot_name'), avatar_url=self.conf('avatar_url'), tts=self.conf('discord_tts'))), headers=headers)
r.status_code
except Exception as e:
log.warning('Error Sending Discord response error code: {0}'.format(r.status_code))
return False
return True
config = [{
'name': 'discord',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'discord',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'webhook_url',
'description': (
'Your Discord authentication webhook URL.',
'Created under channel settings.'
)
},
{
'name': 'include_imdb',
'default': True,
'type': 'bool',
'descrpition': 'Include a link to the movie page on IMDB.'
},
{
'name': 'bot_name',
'description': 'Name of bot.',
'default': 'CouchPotato',
'advanced': True,
},
{
'name': 'avatar_url',
'description': 'URL to an image to use as the avatar for '
'notifications.',
'default': 'https://couchpota.to/media/images/couch.png',
'advanced': True,
},
{
'name': 'discord_tts',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Send notification using text-to-speech.',
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

2
couchpotato/core/notifications/emby.py

@ -18,7 +18,7 @@ class Emby(Notification):
apikey = self.conf('apikey')
host = cleanHost(host)
url = '%semby/Library/Series/Updated' % (host)
url = '%semby/Library/Movies/Updated' % (host)
values = {}
data = urllib.urlencode(values)

84
couchpotato/core/notifications/join.py

@ -0,0 +1,84 @@
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Join'
class Join(Notification):
# URL for request
url = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?title=%s&text=%s&deviceId=%s&icon=%s'
# URL for notification icon
icon = tryUrlencode('https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/icons/android.png')
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
# default for devices
device_default = [None]
apikey = self.conf('apikey')
if apikey is not None:
# Add apikey to request url
self.url = self.url + '&apikey=' + apikey
# If api key is present, default to sending to all devices
device_default = ['group.all']
devices = self.getDevices() or device_default
successful = 0
for device in devices:
response = self.urlopen(self.url % (self.default_title, tryUrlencode(toUnicode(message)), device, self.icon))
if response:
successful += 1
else:
log.error('Unable to push notification to Join device with ID %s' % device)
return successful == len(devices)
def getDevices(self):
return splitString(self.conf('devices'))
config = [{
'name': 'join',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'join',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'devices',
'default': '',
'description': 'IDs of devices to notify, or group to send to if API key is specified (ex: group.all)'
},
{
'name': 'apikey',
'default': '',
'advanced': True,
'description': 'API Key for sending to all devices, or group'
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]

3
couchpotato/core/notifications/plex/server.py

@ -51,7 +51,8 @@ class PlexServer(object):
req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="")
authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header("Authorization", authheader)
req.add_header("X-Plex-Product", "Couchpotato Notifier")
req.add_header("X-Plex-Device-Name", "CouchPotato")
req.add_header("X-Plex-Product", "CouchPotato Notifier")
req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6")
req.add_header("X-Plex-Version", "1.0")

68
couchpotato/core/notifications/script.py

@ -0,0 +1,68 @@
import traceback
import subprocess
import os
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getIdentifier
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Script'
class Script(Notification):
def __init__(self):
addApiView(self.testNotifyName(), self.test)
addEvent('renamer.after', self.runScript)
def runScript(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
command = [self.conf('path'), group.get('destination_dir')]
log.info('Executing script command: %s ', command)
try:
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
out = p.communicate()
log.info('Result from script: %s', str(out))
return True
except OSError as e:
log.error('Unable to run script: %s', e)
return False
def test(self, **kwargs):
return {
'success': os.path.isfile(self.conf('path'))
}
config = [{
'name': 'script',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'script',
'label': 'Script',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'path',
'description': 'The path to the script to execute.'
}
]
}
]
}]

2
couchpotato/core/plugins/log/static/log.js

@ -250,7 +250,7 @@ Page.Log = new Class({
new Element('a.button', {
'target': '_blank',
'text': 'the contributing guide',
'href': 'https://github.com/CouchPotato/CouchPotatoServer/blob/develop/contributing.md'
'href': 'https://github.com/CouchPotato/CouchPotatoServer/wiki/Developer-branch'
}),
new Element('span', {
'html': ' before posting, then copy the text below and <strong>FILL IN</strong> the dots.'

12
couchpotato/core/plugins/profile/main.py

@ -62,6 +62,18 @@ class ProfilePlugin(Plugin):
except:
log.error('Failed: %s', traceback.format_exc())
# Cleanup profiles that have empty qualites
profiles = self.all()
for profile in profiles:
try:
if '' in profile.get('qualities') or '-1' in profile.get('qualities'):
log.warning('Found profile with empty qualities, cleaning it up')
p = db.get('id', profile.get('_id'))
p['qualities'] = [x for x in p['qualities'] if (x != '' and x != '-1')]
db.update(p)
except:
log.error('Failed: %s', traceback.format_exc())
def allView(self, **kwargs):
return {

11
couchpotato/core/plugins/profile/static/profile.js

@ -140,7 +140,7 @@ var Profile = new Class({
};
Array.each(self.type_container.getElements('.type'), function(type){
if(!type.hasClass('deleted') && type.getElement('select').get('value') != -1)
if(!type.hasClass('deleted') && type.getElement('select').get('value') != -1 && type.getElement('select').get('value') != "")
data.types.include({
'quality': type.getElement('select').get('value'),
'finish': +type.getElement('input.finish[type=checkbox]').checked,
@ -258,9 +258,10 @@ Profile.Type = new Class({
self.create();
self.addEvent('change', function(){
self.el[self.qualities.get('value') == '-1' ? 'addClass' : 'removeClass']('is_empty');
self.el[Quality.getQuality(self.qualities.get('value')).allow_3d ? 'addClass': 'removeClass']('allow_3d');
self.deleted = self.qualities.get('value') == '-1';
var has_quality = !(self.qualities.get('value') == '-1' || self.qualities.get('value') == '');
self.el[!has_quality ? 'addClass' : 'removeClass']('is_empty');
self.el[has_quality && Quality.getQuality(self.qualities.get('value')).allow_3d ? 'addClass': 'removeClass']('allow_3d');
self.deleted = !has_quality;
});
},
@ -337,7 +338,7 @@ Profile.Type = new Class({
}).inject(self.qualities);
});
self.qualities.set('value', self.data.quality);
self.qualities.set('value', self.data.quality || -1);
return self.qualities;

5
couchpotato/core/plugins/quality/main.py

@ -114,7 +114,12 @@ class QualityPlugin(Plugin):
db = get_db()
quality_dict = {}
try:
quality = db.get('quality', identifier, with_doc = True)['doc']
except RecordNotFound:
log.error("Unable to find '%s' in the quality DB", indentifier)
quality = None
if quality:
quality_dict = mergeDicts(self.getQuality(quality['identifier']), quality)

4
couchpotato/core/plugins/quality/static/quality.js

@ -31,9 +31,9 @@ var QualityBase = new Class({
getQuality: function(identifier){
try {
return this.qualities.filter(function(q){
return (this.qualities.filter(function(q){
return q.identifier == identifier;
}).pick();
}).pick() || {});
}
catch(e){}

53
couchpotato/core/plugins/renamer.py

@ -216,6 +216,9 @@ class Renamer(Plugin):
except:
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
# post_filter files from configuration; this is a ":"-separated list of globs
files = self.filesAfterIgnoring(files)
db = get_db()
# Extend the download info with info stored in the downloaded release
@ -347,11 +350,22 @@ class Renamer(Plugin):
'category': category_label,
'3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '',
'3d_type': group['meta_data'].get('3d_type'),
'3d_type_short': group['meta_data'].get('3d_type'),
}
if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'):
replacements['mpaa_only'] = 'Not Rated'
if replacements['3d_type_short']:
replacements['3d_type_short'] = replacements['3d_type_short'].replace('Half ', 'H').replace('Full ', '')
if self.conf('use_tab_threed') and replacements['3d_type']:
if 'OU' in replacements['3d_type']:
replacements['3d_type'] = replacements['3d_type'].replace('OU','TAB')
if self.conf('use_tab_threed') and replacements['3d_type_short']:
if 'OU' in replacements['3d_type_short']:
replacements['3d_type_short'] = replacements['3d_type_short'].replace('OU','TAB')
for file_type in group['files']:
# Move nfo depending on settings
@ -1165,6 +1179,30 @@ Remove it if you want it to be renamed (again, or at least let it try again)
def movieInFromFolder(self, media_folder):
return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
@property
def ignored_in_path(self):
return self.conf('ignored_in_path').split(":") if self.conf('ignored_in_path') else []
def filesAfterIgnoring(self, original_file_list):
kept_files = []
for path in original_file_list:
if self.keepFile(path):
kept_files.append(path)
else:
log.debug('Ignored "%s" during renaming', path)
return kept_files
def keepFile(self, filename):
# ignoredpaths
for i in self.ignored_in_path:
if i in filename.lower():
log.debug('Ignored "%s" contains "%s".', (filename, i))
return False
# All is OK
return True
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = []
@ -1298,6 +1336,7 @@ rename_options = {
'quality_type': '(HD) or (SD)',
'3d': '3D',
'3d_type': '3D Type (Full SBS)',
'3d_type_short' : 'Short 3D Type (FSBS)',
'video': 'Video (x264)',
'audio': 'Audio (DTS)',
'group': 'Releasegroup name',
@ -1360,6 +1399,14 @@ config = [{
},
{
'advanced': True,
'name': 'use_tab_threed',
'type': 'bool',
'label': 'Use TAB 3D',
'description': ('Use TAB (Top And Bottom) instead of OU (Over Under).','This will allow Kodi to recognize vertical formatted 3D movies properly.'),
'default': True
},
{
'advanced': True,
'name': 'replace_doubles',
'type': 'bool',
'label': 'Clean Name',
@ -1367,6 +1414,12 @@ config = [{
'default': True
},
{
'name': 'ignored_in_path',
'label': 'Ignored file patterns',
'description': ('A list of globs to path match when scanning, separated by ":"', 'anything on this list will be skipped during rename operations'),
'default': '*/.sync/*',
},
{
'name': 'unrar',
'type': 'bool',
'description': 'Extract rar files if found.',

11
couchpotato/core/plugins/scanner.py

@ -28,6 +28,7 @@ class Scanner(Plugin):
'_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo',
'thumbs.db', 'ehthumbs.db', 'desktop.ini'] # unpacking, smb-crap, hidden files
ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate']
ignored_extensions = ['ignore', 'lftp-pget-status']
extensions = {
'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v', 'flv'],
'movie_extra': ['mds'],
@ -42,9 +43,9 @@ class Scanner(Plugin):
'Half SBS': [('half', 'sbs'), ('h', 'sbs'), 'hsbs'],
'Full SBS': [('full', 'sbs'), ('f', 'sbs'), 'fsbs'],
'SBS': ['sbs'],
'Half OU': [('half', 'ou'), ('h', 'ou'), 'hou'],
'Full OU': [('full', 'ou'), ('h', 'ou'), 'fou'],
'OU': ['ou'],
'Half OU': [('half', 'ou'), ('h', 'ou'), ('half', 'tab'), ('h', 'tab'), 'htab', 'hou'],
'Full OU': [('full', 'ou'), ('f', 'ou'), ('full', 'tab'), ('f', 'tab'), 'ftab', 'fou'],
'OU': ['ou', 'tab'],
'Frame Packed': ['mvc', ('complete', 'bluray')],
'3D': ['3d']
}
@ -225,12 +226,12 @@ class Scanner(Plugin):
group['unsorted_files'].extend(found_files)
leftovers = leftovers - found_files
has_ignored += 1 if ext == 'ignore' else 0
has_ignored += 1 if ext in self.ignored_extensions else 0
if has_ignored == 0:
for file_path in list(group['unsorted_files']):
ext = getExt(file_path)
has_ignored += 1 if ext == 'ignore' else 0
has_ignored += 1 if ext in self.ignored_extensions else 0
if has_ignored > 0:
ignored_identifiers.append(identifier)

2
couchpotato/core/plugins/subtitle.py

@ -16,7 +16,7 @@ autoload = 'Subtitle'
class Subtitle(Plugin):
services = ['opensubtitles', 'thesubdb', 'subswiki', 'subscenter']
services = ['opensubtitles', 'thesubdb', 'subswiki', 'subscenter', 'wizdom']
def __init__(self):
addEvent('renamer.before', self.searchSingle)

13
couchpotato/static/scripts/combined.plugins.min.js

@ -3093,7 +3093,7 @@ Page.Log = new Class({
}), new Element("a.button", {
target: "_blank",
text: "the contributing guide",
href: "https://github.com/CouchPotato/CouchPotatoServer/blob/develop/contributing.md"
href: "https://github.com/CouchPotato/CouchPotatoServer/wiki/Developer-branch"
}), new Element("span", {
html: " before posting, then copy the text below and <strong>FILL IN</strong> the dots."
})), textarea = new Element("textarea", {
@ -3223,7 +3223,7 @@ var Profile = new Class({
types: []
};
Array.each(self.type_container.getElements(".type"), function(type) {
if (!type.hasClass("deleted") && type.getElement("select").get("value") != -1) data.types.include({
if (!type.hasClass("deleted") && type.getElement("select").get("value") != -1 && type.getElement("select").get("value") != "") data.types.include({
quality: type.getElement("select").get("value"),
finish: +type.getElement("input.finish[type=checkbox]").checked,
"3d": +type.getElement("input.3d[type=checkbox]").checked
@ -3313,9 +3313,10 @@ Profile.Type = new Class({
self.data = data || {};
self.create();
self.addEvent("change", function() {
self.el[self.qualities.get("value") == "-1" ? "addClass" : "removeClass"]("is_empty");
self.el[Quality.getQuality(self.qualities.get("value")).allow_3d ? "addClass" : "removeClass"]("allow_3d");
self.deleted = self.qualities.get("value") == "-1";
var has_quality = !(self.qualities.get("value") == "-1" || self.qualities.get("value") == "");
self.el[!has_quality ? "addClass" : "removeClass"]("is_empty");
self.el[has_quality && Quality.getQuality(self.qualities.get("value")).allow_3d ? "addClass" : "removeClass"]("allow_3d");
self.deleted = !has_quality;
});
},
create: function() {
@ -3364,7 +3365,7 @@ Profile.Type = new Class({
"data-allow_3d": q.allow_3d
}).inject(self.qualities);
});
self.qualities.set("value", self.data.quality);
self.qualities.set("value", self.data.quality || -1);
return self.qualities;
},
getData: function() {

2
couchpotato/static/scripts/couchpotato.js

@ -182,7 +182,7 @@
'click': self.checkForUpdate.bind(self, null)
}
}));
};
}
setting_links.each(function(a){
self.block.more.addLink(a);

2
couchpotato/templates/login.html

@ -25,7 +25,7 @@
<body class="page login">
<form action="" method="post">
<h1>CouchPotato</h1>
<div class="ctrlHolder"><input class="username" name="username" type="text" placeholder="Username" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false" /></div>
<div class="ctrlHolder"><input class="username" name="username" type="text" placeholder="Username" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false" autofocus="autofocus" /></div>
<div class="ctrlHolder"><input class="password" name="password" type="password" placeholder="Password" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false" /></div>
<div class="ctrlHolder">
<label class="remember_me" title="for 30 days"><input id="remember_me" name="remember_me" type="checkbox" value="1" checked="checked" /> Remember me</label>

8
libs/guessit/fileutils.py

@ -23,6 +23,7 @@ from guessit import s, u
import os.path
import zipfile
import io
import re
def split_path(path):
@ -46,6 +47,13 @@ def split_path(path):
head, tail = os.path.split(path)
headlen = len(head)
# if a string has a : in position 1 it gets splitted in everycase, also if
# there is not a valid drive letter and also if : is not followed by \
if headlen >= 2 and headlen <= 3 and head[1] == ':' and ( head + tail == path ) and ( head[1:] != ':\\' or not re.match("^[a-zA-Z]:\\\\", head) ):
tail = path
head = ''
headlen = 0
# on Unix systems, the root folder is '/'
if head and head == '/'*headlen and tail == '':
return ['/'] + result

214
libs/pio/api.py

@ -1,26 +1,53 @@
# -*- coding: utf-8 -*-
# Changed
# Removed iso8601 library requirement
# Added CP logging
import os
import re
import json
import binascii
import webbrowser
try:
from urllib import urlencode
from couchpotato import CPLog
from dateutil.parser import parse
except ImportError:
from urllib.parse import urlencode
from datetime import datetime
import tus
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from couchpotato import CPLog
KB = 1024
MB = 1024 * KB
# Read and write operations are limited to this chunk size.
# This can make a big difference when dealing with large files.
CHUNK_SIZE = 256 * KB
BASE_URL = 'https://api.put.io/v2'
UPLOAD_URL = 'https://upload.put.io/v2/files/upload'
TUS_UPLOAD_URL = 'https://upload.put.io/files/'
ACCESS_TOKEN_URL = 'https://api.put.io/v2/oauth2/access_token'
AUTHENTICATION_URL = 'https://api.put.io/v2/oauth2/authenticate'
log = CPLog(__name__)
class APIError(Exception):
pass
class ClientError(APIError):
pass
class ServerError(APIError):
pass
class AuthHelper(object):
def __init__(self, client_id, client_secret, redirect_uri, type='code'):
@ -58,10 +85,21 @@ class AuthHelper(object):
class Client(object):
def __init__(self, access_token):
def __init__(self, access_token, use_retry=False):
self.access_token = access_token
self.session = requests.session()
if use_retry:
# Retry maximum 10 times, backoff on each retry
# Sleeps 1s, 2s, 4s, 8s, etc to a maximum of 120s between retries
# Retries on HTTP status codes 500, 502, 503, 504
retries = Retry(total=10,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504])
# Use the retry strategy for all HTTPS requests
self.session.mount('https://', HTTPAdapter(max_retries=retries))
# Keep resource classes as attributes of client.
# Pass client to resource classes so resource object
# can use the client.
@ -71,7 +109,7 @@ class Client(object):
self.Account = type('Account', (_Account,), attributes)
def request(self, path, method='GET', params=None, data=None, files=None,
headers=None, raw=False, stream=False):
headers=None, raw=False, allow_redirects=True, stream=False):
"""
Wrapper around requests.request()
@ -91,27 +129,31 @@ class Client(object):
headers['Accept'] = 'application/json'
if path.startswith('https://'):
url = path
else:
url = BASE_URL + path
log.debug('url: %s', url)
response = self.session.request(
method, url, params=params, data=data, files=files,
headers=headers, allow_redirects=True, stream=stream)
headers=headers, allow_redirects=allow_redirects, stream=stream)
log.debug('response: %s', response)
if raw:
return response
log.debug('content: %s', response.content)
try:
response = json.loads(response.content)
body = json.loads(response.content.decode())
except ValueError:
raise Exception('Server didn\'t send valid JSON:\n%s\n%s' % (
response, response.content))
raise ServerError('InvalidJSON', response.content)
if response['status'] == 'ERROR':
raise Exception(response['error_type'])
if body['status'] == 'ERROR':
log.error("API returned error: %s", body)
exception_class = {'4': ClientError, '5': ServerError}[str(response.status_code)[0]]
raise exception_class(body['error_type'], body['error_message'])
return response
return body
class _BaseResource(object):
@ -125,8 +167,8 @@ class _BaseResource(object):
self.name = None
self.__dict__.update(resource_dict)
try:
self.created_at = parse(self.created_at)
except AttributeError:
self.created_at = strptime(self.created_at)
except Exception:
self.created_at = None
def __str__(self):
@ -135,7 +177,7 @@ class _BaseResource(object):
def __repr__(self):
# shorten name for display
name = self.name[:17] + '...' if len(self.name) > 20 else self.name
return '<%s id=%r, name="%r">' % (
return '<%s id=%r, name=%r>' % (
self.__class__.__name__, self.id, name)
@ -160,59 +202,113 @@ class _File(_BaseResource):
files = {'file': (name, f)}
else:
files = {'file': f}
d = cls.client.request('/files/upload', method='POST',
d = cls.client.request(UPLOAD_URL, method='POST',
data={'parent_id': parent_id}, files=files)
f = d['file']
return cls(f)
@classmethod
def upload_tus(cls, path, name=None, parent_id=0):
headers = {'Authorization': 'token %s' % cls.client.access_token}
metadata = {'parent_id': str(parent_id)}
if name:
metadata['name'] = name
with open(path) as f:
tus.upload(f, TUS_UPLOAD_URL, file_name=name, headers=headers, metadata=metadata)
def dir(self):
"""List the files under directory."""
return self.list(parent_id=self.id)
def download(self, dest='.', delete_after_download=False):
def download(self, dest='.', delete_after_download=False, chunk_size=CHUNK_SIZE):
if self.content_type == 'application/x-directory':
self._download_directory(dest, delete_after_download)
self._download_directory(dest, delete_after_download, chunk_size)
else:
self._download_file(dest, delete_after_download)
self._download_file(dest, delete_after_download, chunk_size)
def _download_directory(self, dest='.', delete_after_download=False):
name = self.name
if isinstance(name, unicode):
name = name.encode('utf-8', 'replace')
def _download_directory(self, dest, delete_after_download, chunk_size):
name = _str(self.name)
dest = os.path.join(dest, name)
if not os.path.exists(dest):
os.mkdir(dest)
for sub_file in self.dir():
sub_file.download(dest, delete_after_download)
sub_file.download(dest, delete_after_download, chunk_size)
if delete_after_download:
self.delete()
def _download_file(self, dest='.', delete_after_download=False):
response = self.client.request(
'/files/%s/download' % self.id, raw=True, stream=True)
def _verify_file(self, filepath):
log.info('verifying crc32...')
filesize = os.path.getsize(filepath)
if self.size != filesize:
logging.error('file %s is %d bytes, should be %s bytes' % (filepath, filesize, self.size))
return False
crcbin = 0
with open(filepath, 'rb') as f:
while True:
chunk = f.read(CHUNK_SIZE)
if not chunk:
break
crcbin = binascii.crc32(chunk, crcbin) & 0xffffffff
filename = re.match(
'attachment; filename=(.*)',
response.headers['content-disposition']).groups()[0]
# If file name has spaces, it must have quotes around.
filename = filename.strip('"')
crc32 = '%08x' % crcbin
with open(os.path.join(dest, filename), 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if crc32 != self.crc32:
logging.error('file %s CRC32 is %s, should be %s' % (filepath, crc32, self.crc32))
return False
return True
def _download_file(self, dest, delete_after_download, chunk_size):
name = _str(self.name)
filepath = os.path.join(dest, name)
if os.path.exists(filepath):
first_byte = os.path.getsize(filepath)
if first_byte == self.size:
log.warning('file %s exists and is the correct size %d' % (filepath, self.size))
else:
first_byte = 0
log.debug('file %s is currently %d, should be %d' % (filepath, first_byte, self.size))
if self.size == 0:
# Create an empty file
open(filepath, 'w').close()
log.debug('created empty file %s' % filepath)
else:
if first_byte < self.size:
with open(filepath, 'ab') as f:
headers = {'Range': 'bytes=%d-' % first_byte}
log.debug('request range: bytes=%d-' % first_byte)
response = self.client.request('/files/%s/download' % self.id,
headers=headers,
raw=True,
stream=True)
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
if self._verify_file(filepath):
if delete_after_download:
self.delete()
def delete(self):
return self.client.request('/files/delete', method='POST',
data={'file_ids': str(self.id)})
data={'file_id': str(self.id)})
@classmethod
def delete_multi(cls, ids):
return cls.client.request('/files/delete', method='POST',
data={'file_ids': ','.join(map(str, ids))})
def move(self, parent_id):
return self.client.request('/files/move', method='POST',
@ -239,6 +335,7 @@ class _Transfer(_BaseResource):
@classmethod
def add_url(cls, url, parent_id=0, extract=False, callback_url=None):
log.debug('callback_url is %s', callback_url)
d = cls.client.request('/transfers/add', method='POST', data=dict(
url=url, save_parent_id=parent_id, extract=extract,
callback_url=callback_url))
@ -247,10 +344,10 @@ class _Transfer(_BaseResource):
@classmethod
def add_torrent(cls, path, parent_id=0, extract=False, callback_url=None):
with open(path) as f:
with open(path, 'rb') as f:
files = {'file': f}
d = cls.client.request('/files/upload', method='POST', files=files,
data=dict(save_parent_id=parent_id,
data=dict(parent_id=parent_id,
extract=extract,
callback_url=callback_url))
t = d['transfer']
@ -260,6 +357,17 @@ class _Transfer(_BaseResource):
def clean(cls):
return cls.client.request('/transfers/clean', method='POST')
def cancel(self):
return self.client.request('/transfers/cancel',
method='POST',
data={'transfer_ids': self.id})
@classmethod
def cancel_multi(cls, ids):
return cls.client.request('/transfers/cancel',
method='POST',
data={'transfer_ids': ','.join(map(str, ids))})
class _Account(_BaseResource):
@ -270,3 +378,31 @@ class _Account(_BaseResource):
@classmethod
def settings(cls):
return cls.client.request('/account/settings', method='GET')
# Due to a nasty bug in datetime module, datetime.strptime calls
# are not thread-safe and can throw a TypeError. Details: https://bugs.python.org/issue7980
# Here we are implementing simple RFC3339 parser which is used in Put.io APIv2.
def strptime(date):
"""Returns datetime object from the given date, which is in a specific format: YYYY-MM-ddTHH:mm:ss"""
d = {
'year': date[0:4],
'month': date[5:7],
'day': date[8:10],
'hour': date[11:13],
'minute': date[14:16],
'second': date[17:],
}
d = dict((k, int(v)) for k, v in d.iteritems())
return datetime(**d)
def _str(s):
"""Python 3 compatibility function for converting to str."""
try:
if isinstance(s, unicode):
return s.encode('utf-8', 'replace')
except NameError:
pass
return s

48
libs/rtorrent/__init__.py

@ -74,7 +74,9 @@ class RTorrent:
if m.is_retriever() and m.is_available(self)]
m = rtorrent.rpc.Multicall(self)
m.add("d.multicall", view, "d.get_hash=",
# multicall2 wants .. something .. as its first argument. It accepts a blank string, so let's go with that.
MCFirstArg = ""
m.add("d.multicall2", MCFirstArg, view, "d.hash=",
*[method.rpc_call + "=" for method in retriever_methods])
results = m.call()[0] # only sent one call, only need first result
@ -116,7 +118,7 @@ class RTorrent:
elif verbose:
func_name = "load.verbose"
else:
func_name = "load"
func_name = "load.normal"
elif file_type in ["file", "raw"]:
if start and verbose:
func_name = "load.raw_start_verbose"
@ -137,31 +139,49 @@ class RTorrent:
func_name = self._get_load_function("url", start, verbose)
# rtorrent > 0.9.6 requires first parameter @target
target = ""
# load magnet
getattr(p, func_name)(magneturl)
getattr(p, func_name)(target, magneturl)
if verify_load:
magnet = False
i = 0
while i < verify_retries:
for torrent in self.get_torrents():
if torrent.info_hash != info_hash:
continue
for m in self.get_torrents():
# This block finds the magnet that was just added, starts it, breaks
# out of the for loop, and then out of the while loop.
# If it can't find the magnet, magnet won't get defined.
if m.info_hash == info_hash:
magnet = m
magnet.start()
i += 999
break
# If torrent hasn't been defined, sleep for a second and check again.
if not magnet:
time.sleep(1)
i += 1
# Resolve magnet to torrent
# This bit waits for the magnet to be resolved into an actual
# torrent, and then starts it.
torrent = False
i = 0
while i < verify_retries:
for t in self.get_torrents():
if t.info_hash == info_hash:
if str(info_hash) not in str(t.name):
torrent = t
torrent.start()
i += 999
break
if not torrent:
time.sleep(1)
i += 1
assert info_hash in [t.info_hash for t in self.torrents],\
"Adding magnet was unsuccessful."
i = 0
while i < verify_retries:
for torrent in self.get_torrents():
if torrent.info_hash == info_hash:
if str(info_hash) not in str(torrent.name):
time.sleep(1)
i += 1
return(torrent)

28
libs/rtorrent/file.py

@ -59,29 +59,29 @@ class File:
methods = [
# RETRIEVERS
Method(File, 'get_last_touched', 'f.get_last_touched'),
Method(File, 'get_range_second', 'f.get_range_second'),
Method(File, 'get_size_bytes', 'f.get_size_bytes'),
Method(File, 'get_priority', 'f.get_priority'),
Method(File, 'get_match_depth_next', 'f.get_match_depth_next'),
Method(File, 'get_last_touched', 'f.last_touched'),
Method(File, 'get_range_second', 'f.range_second'),
Method(File, 'get_size_bytes', 'f.size_bytes'),
Method(File, 'get_priority', 'f.priority'),
Method(File, 'get_match_depth_next', 'f.match_depth_next'),
Method(File, 'is_resize_queued', 'f.is_resize_queued',
boolean=True,
),
Method(File, 'get_range_first', 'f.get_range_first'),
Method(File, 'get_match_depth_prev', 'f.get_match_depth_prev'),
Method(File, 'get_path', 'f.get_path'),
Method(File, 'get_completed_chunks', 'f.get_completed_chunks'),
Method(File, 'get_path_components', 'f.get_path_components'),
Method(File, 'get_range_first', 'f.range_first'),
Method(File, 'get_match_depth_prev', 'f.match_depth_prev'),
Method(File, 'get_path', 'f.path'),
Method(File, 'get_completed_chunks', 'f.completed_chunks'),
Method(File, 'get_path_components', 'f.path_components'),
Method(File, 'is_created', 'f.is_created',
boolean=True,
),
Method(File, 'is_open', 'f.is_open',
boolean=True,
),
Method(File, 'get_size_chunks', 'f.get_size_chunks'),
Method(File, 'get_offset', 'f.get_offset'),
Method(File, 'get_frozen_path', 'f.get_frozen_path'),
Method(File, 'get_path_depth', 'f.get_path_depth'),
Method(File, 'get_size_chunks', 'f.size_chunks'),
Method(File, 'get_offset', 'f.offset'),
Method(File, 'get_frozen_path', 'f.frozen_path'),
Method(File, 'get_path_depth', 'f.path_depth'),
Method(File, 'is_create_queued', 'f.is_create_queued',
boolean=True,
),

26
libs/rtorrent/peer.py

@ -60,39 +60,39 @@ methods = [
Method(Peer, 'is_preferred', 'p.is_preferred',
boolean=True,
),
Method(Peer, 'get_down_rate', 'p.get_down_rate'),
Method(Peer, 'get_down_rate', 'p.down_rate'),
Method(Peer, 'is_unwanted', 'p.is_unwanted',
boolean=True,
),
Method(Peer, 'get_peer_total', 'p.get_peer_total'),
Method(Peer, 'get_peer_rate', 'p.get_peer_rate'),
Method(Peer, 'get_port', 'p.get_port'),
Method(Peer, 'get_peer_total', 'p.peer_total'),
Method(Peer, 'get_peer_rate', 'p.peer_rate'),
Method(Peer, 'get_port', 'p.port'),
Method(Peer, 'is_snubbed', 'p.is_snubbed',
boolean=True,
),
Method(Peer, 'get_id_html', 'p.get_id_html'),
Method(Peer, 'get_up_rate', 'p.get_up_rate'),
Method(Peer, 'get_id_html', 'p.id_html'),
Method(Peer, 'get_up_rate', 'p.up_rate'),
Method(Peer, 'is_banned', 'p.banned',
boolean=True,
),
Method(Peer, 'get_completed_percent', 'p.get_completed_percent'),
Method(Peer, 'get_completed_percent', 'p.completed_percent'),
Method(Peer, 'completed_percent', 'p.completed_percent'),
Method(Peer, 'get_id', 'p.get_id'),
Method(Peer, 'get_id', 'p.id'),
Method(Peer, 'is_obfuscated', 'p.is_obfuscated',
boolean=True,
),
Method(Peer, 'get_down_total', 'p.get_down_total'),
Method(Peer, 'get_client_version', 'p.get_client_version'),
Method(Peer, 'get_address', 'p.get_address'),
Method(Peer, 'get.down.total', 'p.down_total'),
Method(Peer, 'get_client_version', 'p.client_version'),
Method(Peer, 'get_address', 'p.address'),
Method(Peer, 'is_incoming', 'p.is_incoming',
boolean=True,
),
Method(Peer, 'is_encrypted', 'p.is_encrypted',
boolean=True,
),
Method(Peer, 'get_options_str', 'p.get_options_str'),
Method(Peer, 'get_options_str', 'p.options_str'),
Method(Peer, 'get_client_version', 'p.client_version'),
Method(Peer, 'get_up_total', 'p.get_up_total'),
Method(Peer, 'get_up_total', 'p.up_total'),
# MODIFIERS
]

6
libs/rtorrent/rpc/__init__.py

@ -38,13 +38,13 @@ def get_varname(rpc_call):
r = re.search(
"([ptdf]\.|system\.|get\_|is\_|set\_)+([^=]*)", rpc_call, re.I)
if r:
return(r.groups()[-1])
return(r.groups()[-1].replace(".","_"))
else:
return(None)
def _handle_unavailable_rpc_method(method, rt_obj):
msg = "Method isn't available."
msg = "Method " + str(method) + " isn't available."
if rt_obj.connection._get_client_version_tuple() < method.min_version:
msg = "This method is only available in " \
"RTorrent version v{0} or later".format(
@ -91,7 +91,7 @@ class Method:
def _get_method_type(self):
"""Determine whether method is a modifier or a retriever"""
if self.method_name[:4] == "set_": return('m') # modifier
if self.method_name[:4] == "set_" or self.method_name[-4:] == ".set": return('m') # modifier
else:
return('r') # retriever

168
libs/rtorrent/torrent.py

@ -139,7 +139,7 @@ class Torrent:
results = m.call()[0] # only sent one call, only need first result
offset_method_index = retriever_methods.index(
rtorrent.rpc.find_method("f.get_offset"))
rtorrent.rpc.find_method("f.offset"))
# make a list of the offsets of all the files, sort appropriately
offset_list = sorted([r[offset_method_index] for r in results])
@ -168,7 +168,7 @@ class Torrent:
"""
m = rtorrent.rpc.Multicall(self)
self.multicall_add(m, "d.try_stop")
self.multicall_add(m, "d.set_directory", d)
self.multicall_add(m, "d.directory.set", d)
self.directory = m.call()[-1]
@ -181,7 +181,7 @@ class Torrent:
"""
m = rtorrent.rpc.Multicall(self)
self.multicall_add(m, "d.try_stop")
self.multicall_add(m, "d.set_directory_base", d)
self.multicall_add(m, "d.directory_base.set", d)
def start(self):
"""Start the torrent"""
@ -304,7 +304,7 @@ class Torrent:
m = rtorrent.rpc.Multicall(self)
field = "custom{0}".format(key)
self.multicall_add(m, "d.get_{0}".format(field))
self.multicall_add(m, "d.{0}".format(field))
setattr(self, field, m.call()[-1])
return (getattr(self, field))
@ -326,7 +326,7 @@ class Torrent:
self._assert_custom_key_valid(key)
m = rtorrent.rpc.Multicall(self)
self.multicall_add(m, "d.set_custom{0}".format(key), value)
self.multicall_add(m, "d.custom{0}.set".format(key), value)
return(m.call()[-1])
@ -355,7 +355,7 @@ class Torrent:
@note: Variable where the result for this method is stored Torrent.hash_checking_queued"""
m = rtorrent.rpc.Multicall(self)
self.multicall_add(m, "d.get_hashing")
self.multicall_add(m, "d.hashing")
self.multicall_add(m, "d.is_hash_checking")
results = m.call()
@ -397,86 +397,86 @@ methods = [
Method(Torrent, 'is_hash_checking', 'd.is_hash_checking',
boolean=True,
),
Method(Torrent, 'get_peers_max', 'd.get_peers_max'),
Method(Torrent, 'get_tracker_focus', 'd.get_tracker_focus'),
Method(Torrent, 'get_skip_total', 'd.get_skip_total'),
Method(Torrent, 'get_state', 'd.get_state'),
Method(Torrent, 'get_peer_exchange', 'd.get_peer_exchange'),
Method(Torrent, 'get_down_rate', 'd.get_down_rate'),
Method(Torrent, 'get_connection_seed', 'd.get_connection_seed'),
Method(Torrent, 'get_uploads_max', 'd.get_uploads_max'),
Method(Torrent, 'get_priority_str', 'd.get_priority_str'),
Method(Torrent, 'get_peers_max', 'd.peers_max'),
Method(Torrent, 'get_tracker_focus', 'd.tracker_focus'),
Method(Torrent, 'get_skip_total', 'd.skip.total'),
Method(Torrent, 'get_state', 'd.state'),
Method(Torrent, 'get_peer_exchange', 'd.peer_exchange'),
Method(Torrent, 'get_down_rate', 'd.down.rate'),
Method(Torrent, 'get_connection_seed', 'd.connection_seed'),
Method(Torrent, 'get_uploads_max', 'd.uploads_max'),
Method(Torrent, 'get_priority_str', 'd.priority_str'),
Method(Torrent, 'is_open', 'd.is_open',
boolean=True,
),
Method(Torrent, 'get_peers_min', 'd.get_peers_min'),
Method(Torrent, 'get_peers_complete', 'd.get_peers_complete'),
Method(Torrent, 'get_tracker_numwant', 'd.get_tracker_numwant'),
Method(Torrent, 'get_connection_current', 'd.get_connection_current'),
Method(Torrent, 'is_complete', 'd.get_complete',
Method(Torrent, 'get_peers_min', 'd.peers_min'),
Method(Torrent, 'get_peers_complete', 'd.peers_complete'),
Method(Torrent, 'get_tracker_numwant', 'd.tracker_numwant'),
Method(Torrent, 'get_connection_current', 'd.connection_current'),
Method(Torrent, 'is_complete', 'd.complete',
boolean=True,
),
Method(Torrent, 'get_peers_connected', 'd.get_peers_connected'),
Method(Torrent, 'get_chunk_size', 'd.get_chunk_size'),
Method(Torrent, 'get_state_counter', 'd.get_state_counter'),
Method(Torrent, 'get_base_filename', 'd.get_base_filename'),
Method(Torrent, 'get_state_changed', 'd.get_state_changed'),
Method(Torrent, 'get_peers_not_connected', 'd.get_peers_not_connected'),
Method(Torrent, 'get_directory', 'd.get_directory'),
Method(Torrent, 'get_peers_connected', 'd.peers_connected'),
Method(Torrent, 'get_chunk_size', 'd.chunk_size'),
Method(Torrent, 'get_state_counter', 'd.state_counter'),
Method(Torrent, 'get_base_filename', 'd.base_filename'),
Method(Torrent, 'get_state_changed', 'd.state_changed'),
Method(Torrent, 'get_peers_not_connected', 'd.peers_not_connected'),
Method(Torrent, 'get_directory', 'd.directory'),
Method(Torrent, 'is_incomplete', 'd.incomplete',
boolean=True,
),
Method(Torrent, 'get_tracker_size', 'd.get_tracker_size'),
Method(Torrent, 'get_tracker_size', 'd.tracker_size'),
Method(Torrent, 'is_multi_file', 'd.is_multi_file',
boolean=True,
),
Method(Torrent, 'get_local_id', 'd.get_local_id'),
Method(Torrent, 'get_ratio', 'd.get_ratio',
Method(Torrent, 'get_local_id', 'd.local_id'),
Method(Torrent, 'get_ratio', 'd.ratio',
post_process_func=lambda x: x / 1000.0,
),
Method(Torrent, 'get_loaded_file', 'd.get_loaded_file'),
Method(Torrent, 'get_max_file_size', 'd.get_max_file_size'),
Method(Torrent, 'get_size_chunks', 'd.get_size_chunks'),
Method(Torrent, 'get_loaded_file', 'd.loaded_file'),
Method(Torrent, 'get_max_file_size', 'd.max_file_size'),
Method(Torrent, 'get_size_chunks', 'd.size_chunks'),
Method(Torrent, 'is_pex_active', 'd.is_pex_active',
boolean=True,
),
Method(Torrent, 'get_hashing', 'd.get_hashing'),
Method(Torrent, 'get_bitfield', 'd.get_bitfield'),
Method(Torrent, 'get_local_id_html', 'd.get_local_id_html'),
Method(Torrent, 'get_connection_leech', 'd.get_connection_leech'),
Method(Torrent, 'get_peers_accounted', 'd.get_peers_accounted'),
Method(Torrent, 'get_message', 'd.get_message'),
Method(Torrent, 'get_hashing', 'd.hashing'),
Method(Torrent, 'get_bitfield', 'd.bitfield'),
Method(Torrent, 'get_local_id_html', 'd.local_id_html'),
Method(Torrent, 'get_connection_leech', 'd.connection_leech'),
Method(Torrent, 'get_peers_accounted', 'd.peers_accounted'),
Method(Torrent, 'get_message', 'd.message'),
Method(Torrent, 'is_active', 'd.is_active',
boolean=True,
),
Method(Torrent, 'get_size_bytes', 'd.get_size_bytes'),
Method(Torrent, 'get_ignore_commands', 'd.get_ignore_commands'),
Method(Torrent, 'get_creation_date', 'd.get_creation_date'),
Method(Torrent, 'get_base_path', 'd.get_base_path'),
Method(Torrent, 'get_left_bytes', 'd.get_left_bytes'),
Method(Torrent, 'get_size_files', 'd.get_size_files'),
Method(Torrent, 'get_size_pex', 'd.get_size_pex'),
Method(Torrent, 'get_size_bytes', 'd.size_bytes'),
Method(Torrent, 'get_ignore_commands', 'd.ignore_commands'),
Method(Torrent, 'get_creation_date', 'd.creation_date'),
Method(Torrent, 'get_base_path', 'd.base_path'),
Method(Torrent, 'get_left_bytes', 'd.left_bytes'),
Method(Torrent, 'get_size_files', 'd.size_files'),
Method(Torrent, 'get_size_pex', 'd.size_pex'),
Method(Torrent, 'is_private', 'd.is_private',
boolean=True,
),
Method(Torrent, 'get_max_size_pex', 'd.get_max_size_pex'),
Method(Torrent, 'get_num_chunks_hashed', 'd.get_chunks_hashed',
Method(Torrent, 'get_max_size_pex', 'd.max_size_pex'),
Method(Torrent, 'get_num_chunks_hashed', 'd.chunks_hashed',
aliases=("get_chunks_hashed",)),
Method(Torrent, 'get_num_chunks_wanted', 'd.wanted_chunks'),
Method(Torrent, 'get_priority', 'd.get_priority'),
Method(Torrent, 'get_skip_rate', 'd.get_skip_rate'),
Method(Torrent, 'get_completed_bytes', 'd.get_completed_bytes'),
Method(Torrent, 'get_name', 'd.get_name'),
Method(Torrent, 'get_completed_chunks', 'd.get_completed_chunks'),
Method(Torrent, 'get_throttle_name', 'd.get_throttle_name'),
Method(Torrent, 'get_free_diskspace', 'd.get_free_diskspace'),
Method(Torrent, 'get_directory_base', 'd.get_directory_base'),
Method(Torrent, 'get_hashing_failed', 'd.get_hashing_failed'),
Method(Torrent, 'get_tied_to_file', 'd.get_tied_to_file'),
Method(Torrent, 'get_down_total', 'd.get_down_total'),
Method(Torrent, 'get_bytes_done', 'd.get_bytes_done'),
Method(Torrent, 'get_up_rate', 'd.get_up_rate'),
Method(Torrent, 'get_up_total', 'd.get_up_total'),
Method(Torrent, 'get_priority', 'd.priority'),
Method(Torrent, 'get_skip_rate', 'd.skip.rate'),
Method(Torrent, 'get_completed_bytes', 'd.completed_bytes'),
Method(Torrent, 'get_name', 'd.name'),
Method(Torrent, 'get_completed_chunks', 'd.completed_chunks'),
Method(Torrent, 'get_throttle_name', 'd.throttle_name'),
Method(Torrent, 'get_free_diskspace', 'd.free_diskspace'),
Method(Torrent, 'get_directory_base', 'd.directory_base'),
Method(Torrent, 'get_hashing_failed', 'd.hashing_failed'),
Method(Torrent, 'get_tied_to_file', 'd.tied_to_file'),
Method(Torrent, 'get_down_total', 'd.down.total'),
Method(Torrent, 'get_bytes_done', 'd.bytes_done'),
Method(Torrent, 'get_up_rate', 'd.up.rate'),
Method(Torrent, 'get_up_total', 'd.up.total'),
Method(Torrent, 'is_accepting_seeders', 'd.accepting_seeders',
boolean=True,
),
@ -490,28 +490,28 @@ methods = [
boolean=True,
),
Method(Torrent, "get_time_started", "d.timestamp.started"),
Method(Torrent, "get_custom1", "d.get_custom1"),
Method(Torrent, "get_custom2", "d.get_custom2"),
Method(Torrent, "get_custom3", "d.get_custom3"),
Method(Torrent, "get_custom4", "d.get_custom4"),
Method(Torrent, "get_custom5", "d.get_custom5"),
Method(Torrent, "get_custom1", "d.custom1"),
Method(Torrent, "get_custom2", "d.custom2"),
Method(Torrent, "get_custom3", "d.custom3"),
Method(Torrent, "get_custom4", "d.custom4"),
Method(Torrent, "get_custom5", "d.custom5"),
# MODIFIERS
Method(Torrent, 'set_uploads_max', 'd.set_uploads_max'),
Method(Torrent, 'set_tied_to_file', 'd.set_tied_to_file'),
Method(Torrent, 'set_tracker_numwant', 'd.set_tracker_numwant'),
Method(Torrent, 'set_priority', 'd.set_priority'),
Method(Torrent, 'set_peers_max', 'd.set_peers_max'),
Method(Torrent, 'set_hashing_failed', 'd.set_hashing_failed'),
Method(Torrent, 'set_message', 'd.set_message'),
Method(Torrent, 'set_throttle_name', 'd.set_throttle_name'),
Method(Torrent, 'set_peers_min', 'd.set_peers_min'),
Method(Torrent, 'set_ignore_commands', 'd.set_ignore_commands'),
Method(Torrent, 'set_max_file_size', 'd.set_max_file_size'),
Method(Torrent, 'set_custom5', 'd.set_custom5'),
Method(Torrent, 'set_custom4', 'd.set_custom4'),
Method(Torrent, 'set_custom2', 'd.set_custom2'),
Method(Torrent, 'set_custom1', 'd.set_custom1'),
Method(Torrent, 'set_custom3', 'd.set_custom3'),
Method(Torrent, 'set_connection_current', 'd.set_connection_current'),
Method(Torrent, 'set_uploads_max', 'd.uploads_max.set'),
Method(Torrent, 'set_tied_to_file', 'd.tied_to_file.set'),
Method(Torrent, 'set_tracker_numwant', 'd.tracker_numwant.set'),
Method(Torrent, 'set_priority', 'd.priority.set'),
Method(Torrent, 'set_peers_max', 'd.peers_max.set'),
Method(Torrent, 'set_hashing_failed', 'd.hashing_failed.set'),
Method(Torrent, 'set_message', 'd.message.set'),
Method(Torrent, 'set_throttle_name', 'd.throttle_name.set'),
Method(Torrent, 'set_peers_min', 'd.peers_min.set'),
Method(Torrent, 'set_ignore_commands', 'd.ignore_commands.set'),
Method(Torrent, 'set_max_file_size', 'd.max_file_size.set'),
Method(Torrent, 'set_custom5', 'd.custom5.set'),
Method(Torrent, 'set_custom4', 'd.custom4.set'),
Method(Torrent, 'set_custom2', 'd.custom2.set'),
Method(Torrent, 'set_custom1', 'd.custom1.set'),
Method(Torrent, 'set_custom3', 'd.custom3.set'),
Method(Torrent, 'set_connection_current', 'd.connection_current.set'),
]

22
libs/rtorrent/tracker.py

@ -70,17 +70,17 @@ class Tracker:
methods = [
# RETRIEVERS
Method(Tracker, 'is_enabled', 't.is_enabled', boolean=True),
Method(Tracker, 'get_id', 't.get_id'),
Method(Tracker, 'get_scrape_incomplete', 't.get_scrape_incomplete'),
Method(Tracker, 'get_id', 't.id'),
Method(Tracker, 'get_scrape_incomplete', 't.scrape_incomplete'),
Method(Tracker, 'is_open', 't.is_open', boolean=True),
Method(Tracker, 'get_min_interval', 't.get_min_interval'),
Method(Tracker, 'get_scrape_downloaded', 't.get_scrape_downloaded'),
Method(Tracker, 'get_group', 't.get_group'),
Method(Tracker, 'get_scrape_time_last', 't.get_scrape_time_last'),
Method(Tracker, 'get_type', 't.get_type'),
Method(Tracker, 'get_normal_interval', 't.get_normal_interval'),
Method(Tracker, 'get_url', 't.get_url'),
Method(Tracker, 'get_scrape_complete', 't.get_scrape_complete',
Method(Tracker, 'get_min_interval', 't.min_interval'),
Method(Tracker, 'get_scrape_downloaded', 't.scrape_downloaded'),
Method(Tracker, 'get_group', 't.group'),
Method(Tracker, 'get_scrape_time_last', 't.scrape_time_last'),
Method(Tracker, 'get_type', 't.type'),
Method(Tracker, 'get_normal_interval', 't.normal_interval'),
Method(Tracker, 'get_url', 't.url'),
Method(Tracker, 'get_scrape_complete', 't.scrape_complete',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_last', 't.activity_time_last',
@ -134,5 +134,5 @@ methods = [
),
# MODIFIERS
Method(Tracker, 'set_enabled', 't.set_enabled'),
Method(Tracker, 'set_enabled', 't.is_enabled.set'),
]

3
libs/subliminal/core.py

@ -32,7 +32,8 @@ __all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE',
'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence',
'key_subtitles', 'group_by_video']
logger = logging.getLogger(__name__)
SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles', 'subscenter']
SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles',
'subscenter', 'wizdom']
LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4)

13
libs/subliminal/services/subscenter.py

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2012 Ofir Brukner <ofirbrukner@gmail.com>
# Copyright 2012 Ofir123 <ofirbrukner@gmail.com>
#
# This file is part of subliminal.
#
@ -16,7 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..exceptions import DownloadFailedError, ServiceError
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie
@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
class Subscenter(ServiceBase):
server = 'http://www.subscenter.co/he/'
server = 'http://www.subscenter.info/he/'
api_based = False
languages = language_set(['he'])
videos = [Episode, Movie]
@ -110,10 +110,11 @@ class Subscenter(ServiceBase):
# Read the item.
subtitle_id = subtitle_item['id']
subtitle_key = subtitle_item['key']
subtitle_version = subtitle_item['h_version']
release = subtitle_item['subtitle_version']
subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi)
download_link = self.server_url + 'subtitle/download/{0}/{1}/?v={2}&key={3}'.format(
language_code, subtitle_id, release, subtitle_key)
language_code, subtitle_id, subtitle_version, subtitle_key)
# Add the release and increment downloaded count if we already have the subtitle.
if subtitle_id in subtitles:
logger.debug('Found additional release {0} for subtitle {1}'.format(
@ -128,7 +129,11 @@ class Subscenter(ServiceBase):
return subtitles.values()
def download(self, subtitle):
try:
self.download_zip_file(subtitle.link, subtitle.path)
except DownloadFailedError:
# If no zip file was retrieved, daily downloads limit has exceeded.
raise ServiceError('Daily limit exceeded')
return subtitle

138
libs/subliminal/services/wizdom.py

@ -0,0 +1,138 @@
# -*- coding: utf-8 -*-
# Copyright 2017 Ofir123 <ofirbrukner@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie
from ..utils import to_unicode
import bisect
import logging
logger = logging.getLogger(__name__)
class Wizdom(ServiceBase):
server = 'http://wizdom.xyz'
api_based = True
languages = language_set(['he'])
videos = [Episode, Movie]
require_video = False
_tmdb_api_key = 'a51ee051bcd762543373903de296e0a3'
def _search_imdb_id(self, title, year, is_movie):
"""Search the IMDB ID for the given `title` and `year`.
:param str title: title to search for.
:param int year: year to search for (or 0 if not relevant).
:param bool is_movie: If True, IMDB ID will be searched for in TMDB instead of Wizdom.
:return: the IMDB ID for the given title and year (or None if not found).
:rtype: str
"""
# make the search
logger.info('Searching IMDB ID for %r%r', title, '' if not year else ' ({})'.format(year))
category = 'movie' if is_movie else 'tv'
title = title.replace('\'', '')
# get TMDB ID first
r = self.session.get('http://api.tmdb.org/3/search/{}?api_key={}&query={}{}&language=en'.format(
category, self._tmdb_api_key, title, '' if not year else '&year={}'.format(year)))
r.raise_for_status()
tmdb_results = r.json().get('results')
if tmdb_results:
tmdb_id = tmdb_results[0].get('id')
if tmdb_id:
# get actual IMDB ID from TMDB
r = self.session.get('http://api.tmdb.org/3/{}/{}{}?api_key={}&language=en'.format(
category, tmdb_id, '' if is_movie else '/external_ids', self._tmdb_api_key))
r.raise_for_status()
return str(r.json().get('imdb_id', '')) or None
return None
def list_checked(self, video, languages):
series = None
season = None
episode = None
title = video.title
imdb_id = video.imdbid
year = video.year
if isinstance(video, Episode):
series = video.series
season = video.season
episode = video.episode
return self.query(video.path or video.release, languages, series, season,
episode, title, imdb_id, year)
def query(self, filepath, languages=None, series=None, season=None, episode=None, title=None, imdbid=None,
year=None):
logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format(
series, season, episode, languages))
# search for the IMDB ID if needed
is_movie = not (series and season and episode)
if is_movie and not title:
raise ServiceError('One or more parameters are missing')
# for TV series, we need the series IMDB ID, and not the specific episode ID
imdb_id = imdbid or self._search_imdb_id(title, year, is_movie)
# search
logger.debug(u'Using IMDB ID {0}'.format(imdb_id))
url = 'http://json.{}/{}.json'.format(self.server_url, imdb_id)
# get the list of subtitles
logger.debug('Getting the list of subtitles')
r = self.session.get(url)
r.raise_for_status()
try:
results = r.json()
except ValueError:
return {}
# filter irrelevant results
if not is_movie:
results = results.get('subs', {}).get(str(season), {}).get(str(episode), [])
else:
results = results.get('subs', [])
# loop over results
subtitles = dict()
for result in results:
language_object = self.get_language('heb')
subtitle_id = result['id']
release = result['version']
subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi)
download_link = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle_id)
# add the release and increment downloaded count if we already have the subtitle
if subtitle_id in subtitles:
logger.debug(u'Found additional release {0} for subtitle {1}'.format(release, subtitle_id))
bisect.insort_left(subtitles[subtitle_id].releases, release) # deterministic order
subtitles[subtitle_id].downloaded += 1
continue
# otherwise create it
subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(),
download_link, release=to_unicode(release))
logger.debug(u'Found subtitle {0}'.format(subtitle))
subtitles[subtitle_id] = subtitle
return subtitles.values()
def download(self, subtitle):
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = Wizdom

190
libs/tus/__init__.py

@ -0,0 +1,190 @@
import os
import base64
import logging
import argparse
import requests
LOG_LEVEL = logging.INFO
DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024
TUS_VERSION = '1.0.0'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
class TusError(Exception):
pass
def _init():
fmt = "[%(asctime)s] %(levelname)s %(message)s"
h = logging.StreamHandler()
h.setLevel(LOG_LEVEL)
h.setFormatter(logging.Formatter(fmt))
logger.addHandler(h)
def _create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=argparse.FileType('rb'))
parser.add_argument('--chunk-size', type=int, default=DEFAULT_CHUNK_SIZE)
parser.add_argument(
'--header',
action='append',
help="A single key/value pair"
" to be sent with all requests as HTTP header."
" Can be specified multiple times to send more then one header."
" Key and value must be separated with \":\".")
return parser
def _cmd_upload():
_init()
parser = _create_parser()
parser.add_argument('tus_endpoint')
parser.add_argument('--file_name')
parser.add_argument(
'--metadata',
action='append',
help="A single key/value pair to be sent in Upload-Metadata header."
" Can be specified multiple times to send more than one pair."
" Key and value must be separated with space.")
args = parser.parse_args()
headers = dict([x.split(':') for x in args.header])
metadata = dict([x.split(' ') for x in args.metadata])
upload(
args.file,
args.tus_endpoint,
chunk_size=args.chunk_size,
file_name=args.file_name,
headers=headers,
metadata=metadata)
def _cmd_resume():
_init()
parser = _create_parser()
parser.add_argument('file_endpoint')
args = parser.parse_args()
headers = dict([x.split(':') for x in args.header])
resume(
args.file,
args.file_endpoint,
chunk_size=args.chunk_size,
headers=headers)
def upload(file_obj,
tus_endpoint,
chunk_size=DEFAULT_CHUNK_SIZE,
file_name=None,
headers=None,
metadata=None):
file_name = os.path.basename(file_obj.name)
file_size = _get_file_size(file_obj)
location = _create_file(
tus_endpoint,
file_name,
file_size,
extra_headers=headers,
metadata=metadata)
resume(
file_obj, location, chunk_size=chunk_size, headers=headers, offset=0)
def _get_file_size(f):
pos = f.tell()
f.seek(0, 2)
size = f.tell()
f.seek(pos)
return size
def _create_file(tus_endpoint,
file_name,
file_size,
extra_headers=None,
metadata=None):
logger.info("Creating file endpoint")
headers = {
"Tus-Resumable": TUS_VERSION,
"Upload-Length": str(file_size),
}
if extra_headers:
headers.update(extra_headers)
if metadata:
l = [k + ' ' + base64.b64encode(v) for k, v in metadata.items()]
headers["Upload-Metadata"] = ','.join(l)
response = requests.post(tus_endpoint, headers=headers)
if response.status_code != 201:
raise TusError("Create failed: %s" % response)
location = response.headers["Location"]
logger.info("Created: %s", location)
return location
def resume(file_obj,
file_endpoint,
chunk_size=DEFAULT_CHUNK_SIZE,
headers=None,
offset=None):
if offset is None:
offset = _get_offset(file_endpoint, extra_headers=headers)
total_sent = 0
file_size = _get_file_size(file_obj)
while offset < file_size:
file_obj.seek(offset)
data = file_obj.read(chunk_size)
offset = _upload_chunk(
data, offset, file_endpoint, extra_headers=headers)
total_sent += len(data)
logger.info("Total bytes sent: %i", total_sent)
def _get_offset(file_endpoint, extra_headers=None):
logger.info("Getting offset")
headers = {"Tus-Resumable": TUS_VERSION}
if extra_headers:
headers.update(extra_headers)
response = requests.head(file_endpoint, headers=headers)
response.raise_for_status()
offset = int(response.headers["Upload-Offset"])
logger.info("offset=%i", offset)
return offset
def _upload_chunk(data, offset, file_endpoint, extra_headers=None):
logger.info("Uploading chunk from offset: %i", offset)
headers = {
'Content-Type': 'application/offset+octet-stream',
'Upload-Offset': str(offset),
'Tus-Resumable': TUS_VERSION,
}
if extra_headers:
headers.update(extra_headers)
response = requests.patch(file_endpoint, headers=headers, data=data)
if response.status_code != 204:
raise TusError("Upload chunk failed: %s" % response)
return int(response.headers["Upload-Offset"])

8
libs/xmpp/transports.py

@ -27,7 +27,7 @@ Transports are stackable so you - f.e. TLS use HTPPROXYsocket or TCPsocket as mo
Also exception 'error' is defined to allow capture of this module specific exceptions.
"""
import socket, select, base64, dispatcher, sys
import socket, ssl, select, base64, dispatcher, sys
from simplexml import ustr
from client import PlugIn
from protocol import *
@ -312,9 +312,9 @@ class TLS(PlugIn):
""" Immidiatedly switch socket to TLS mode. Used internally."""
""" Here we should switch pending_data to hint mode."""
tcpsock = self._owner.Connection
tcpsock._sslObj = socket.ssl(tcpsock._sock, None, None)
tcpsock._sslIssuer = tcpsock._sslObj.issuer()
tcpsock._sslServer = tcpsock._sslObj.server()
tcpsock._sslObj = ssl.wrap_socket(tcpsock._sock, None, None)
tcpsock._sslIssuer = tcpsock._sslObj.getpeercert().get('issuer')
tcpsock._sslServer = tcpsock._sslObj.getpeercert().get('server')
tcpsock._recv = tcpsock._sslObj.read
tcpsock._send = tcpsock._sslObj.write

Loading…
Cancel
Save