committed by
GitHub
55 changed files with 1357 additions and 514 deletions
@ -0,0 +1,130 @@ |
|||||
|
from datetime import datetime |
||||
|
from couchpotato.core.helpers.variable import tryInt, getIdentifier |
||||
|
from couchpotato.core.logger import CPLog |
||||
|
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider |
||||
|
import random |
||||
|
|
||||
|
log = CPLog(__name__) |
||||
|
|
||||
|
|
||||
|
class Base(TorrentMagnetProvider): |
||||
|
# Only qualities allowed: 720p/1080p/3D - the rest will fail. |
||||
|
# All YTS.ag torrents are verified |
||||
|
urls = { |
||||
|
'detail': 'https://yts.ag/api#list_movies', |
||||
|
'search': 'https://yts.ag/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s' |
||||
|
} |
||||
|
|
||||
|
def _search(self, movie, quality, results): |
||||
|
limit = 10 |
||||
|
page = 1 |
||||
|
data = self.getJsonData(self.urls['search'] % (getIdentifier(movie), limit, page)) |
||||
|
|
||||
|
if data: |
||||
|
movie_count = tryInt(data['data']['movie_count']) |
||||
|
|
||||
|
if movie_count == 0: |
||||
|
log.debug('%s - found no results', (self.getName())) |
||||
|
else: |
||||
|
|
||||
|
movie_results = data['data']['movies'] |
||||
|
for i in range(0,len(movie_results)): |
||||
|
result = data['data']['movies'][i] |
||||
|
name = result['title'] |
||||
|
year = result['year'] |
||||
|
detail_url = result['url'] |
||||
|
|
||||
|
for torrent in result['torrents']: |
||||
|
t_quality = torrent['quality'] |
||||
|
|
||||
|
if t_quality in quality['label']: |
||||
|
hash = torrent['hash'] |
||||
|
size = tryInt(torrent['size_bytes'] / 1048576) |
||||
|
seeders = tryInt(torrent['seeds']) |
||||
|
leechers = tryInt(torrent['peers']) |
||||
|
pubdate = torrent['date_uploaded'] # format: 2017-02-17 18:40:03 |
||||
|
pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S') |
||||
|
age = (datetime.now() - pubdate).days |
||||
|
|
||||
|
results.append({ |
||||
|
'id': random.randint(100, 9999), |
||||
|
'name': '%s (%s) %s %s %s' % (name, year, 'YTS', t_quality, 'BR-Rip'), |
||||
|
'url': self.make_magnet(hash, name), |
||||
|
'size': size, |
||||
|
'seeders': seeders, |
||||
|
'leechers': leechers, |
||||
|
'age': age, |
||||
|
'detail_url': detail_url, |
||||
|
'score': 1 |
||||
|
}) |
||||
|
|
||||
|
return |
||||
|
|
||||
|
def make_magnet(self, hash, name): |
||||
|
url_encoded_trackers = 'udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.openbittorr' \ |
||||
|
'ent.com%3A80&tr=%0Audp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=%0Audp%3A%2F%2Fglot' \ |
||||
|
'orrents.pw%3A6969%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannou' \ |
||||
|
'nce&tr=%0Audp%3A%2F%2Ftorrent.gresille.org%3A80%2Fannounce&tr=%0Audp%3A%2F%2Fp4p.are' \ |
||||
|
'nabg.com%3A1337&tr=%0Audp%3A%2F%2Ftracker.leechers-paradise.org%3A6969]' |
||||
|
|
||||
|
return 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (hash, name.replace(' ', '+'), url_encoded_trackers) |
||||
|
|
||||
|
|
||||
|
config = [{ |
||||
|
'name': 'yts', |
||||
|
'groups': [ |
||||
|
{ |
||||
|
'tab': 'searcher', |
||||
|
'list': 'torrent_providers', |
||||
|
'name': 'YTS', |
||||
|
'description': '<a href="https://yts.ag/" target="_blank">YTS</a>', |
||||
|
'wizard': True, |
||||
|
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqG' |
||||
|
'iQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+W' |
||||
|
'gEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mY' |
||||
|
'Ybjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9Kumpjg' |
||||
|
'vwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeRO' |
||||
|
'st0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nE' |
||||
|
'KPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9e' |
||||
|
'IlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTy' |
||||
|
'iGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==', |
||||
|
|
||||
|
'options': [ |
||||
|
{ |
||||
|
'name': 'enabled', |
||||
|
'type': 'enabler', |
||||
|
'default': False |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'seed_ratio', |
||||
|
'label': 'Seed ratio', |
||||
|
'type': 'float', |
||||
|
'default': 1, |
||||
|
'description': 'Will not be (re)moved until this seed ratio is met.', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'seed_time', |
||||
|
'label': 'Seed time', |
||||
|
'type': 'int', |
||||
|
'default': 40, |
||||
|
'description': 'Will not be (re)moved until this seed time (in hours) is met.', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'info', |
||||
|
'label': 'Info', |
||||
|
'type':'bool', |
||||
|
'default':'False', |
||||
|
'description': 'YTS will only work if you set the minimum size for 720p to 500 and 1080p to 800', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'extra_score', |
||||
|
'advanced': True, |
||||
|
'label': 'Extra Score', |
||||
|
'type': 'int', |
||||
|
'default': 0, |
||||
|
'description': 'Starting score for each release found via this provider.', |
||||
|
} |
||||
|
], |
||||
|
} |
||||
|
] |
||||
|
}] |
@ -1,54 +0,0 @@ |
|||||
import datetime |
|
||||
|
|
||||
from couchpotato.core.helpers.rss import RSS |
|
||||
from couchpotato.core.logger import CPLog |
|
||||
from couchpotato.core.media.movie.providers.automation.base import Automation |
|
||||
|
|
||||
|
|
||||
log = CPLog(__name__) |
|
||||
|
|
||||
autoload = 'Kinepolis' |
|
||||
|
|
||||
|
|
||||
class Kinepolis(Automation, RSS): |
|
||||
|
|
||||
interval = 1800 |
|
||||
rss_url = 'http://kinepolis.be/nl/top10-box-office/feed' |
|
||||
|
|
||||
def getIMDBids(self): |
|
||||
|
|
||||
movies = [] |
|
||||
|
|
||||
rss_movies = self.getRSSData(self.rss_url) |
|
||||
|
|
||||
for movie in rss_movies: |
|
||||
name = self.getTextElement(movie, 'title') |
|
||||
year = datetime.datetime.now().strftime('%Y') |
|
||||
|
|
||||
imdb = self.search(name, year) |
|
||||
|
|
||||
if imdb and self.isMinimalMovie(imdb): |
|
||||
movies.append(imdb['imdb']) |
|
||||
|
|
||||
return movies |
|
||||
|
|
||||
|
|
||||
config = [{ |
|
||||
'name': 'kinepolis', |
|
||||
'groups': [ |
|
||||
{ |
|
||||
'tab': 'automation', |
|
||||
'list': 'automation_providers', |
|
||||
'name': 'kinepolis_automation', |
|
||||
'label': 'Kinepolis', |
|
||||
'description': 'Imports movies from the current top 10 of kinepolis.', |
|
||||
'options': [ |
|
||||
{ |
|
||||
'name': 'automation_enabled', |
|
||||
'default': False, |
|
||||
'type': 'enabler', |
|
||||
}, |
|
||||
], |
|
||||
}, |
|
||||
], |
|
||||
}] |
|
@ -1,48 +0,0 @@ |
|||||
from couchpotato.core.helpers.rss import RSS |
|
||||
from couchpotato.core.logger import CPLog |
|
||||
from couchpotato.core.media.movie.providers.automation.base import Automation |
|
||||
|
|
||||
log = CPLog(__name__) |
|
||||
|
|
||||
autoload = 'Moviemeter' |
|
||||
|
|
||||
|
|
||||
class Moviemeter(Automation, RSS): |
|
||||
|
|
||||
interval = 1800 |
|
||||
rss_url = 'http://www.moviemeter.nl/rss/cinema' |
|
||||
|
|
||||
def getIMDBids(self): |
|
||||
|
|
||||
movies = [] |
|
||||
|
|
||||
rss_movies = self.getRSSData(self.rss_url) |
|
||||
|
|
||||
for movie in rss_movies: |
|
||||
imdb = self.search(self.getTextElement(movie, 'title')) |
|
||||
|
|
||||
if imdb and self.isMinimalMovie(imdb): |
|
||||
movies.append(imdb['imdb']) |
|
||||
|
|
||||
return movies |
|
||||
|
|
||||
|
|
||||
config = [{ |
|
||||
'name': 'moviemeter', |
|
||||
'groups': [ |
|
||||
{ |
|
||||
'tab': 'automation', |
|
||||
'list': 'automation_providers', |
|
||||
'name': 'moviemeter_automation', |
|
||||
'label': 'Moviemeter', |
|
||||
'description': 'Imports movies from the current top 10 of moviemeter.nl.', |
|
||||
'options': [ |
|
||||
{ |
|
||||
'name': 'automation_enabled', |
|
||||
'default': False, |
|
||||
'type': 'enabler', |
|
||||
}, |
|
||||
], |
|
||||
}, |
|
||||
], |
|
||||
}] |
|
@ -1,72 +0,0 @@ |
|||||
from couchpotato.core.event import fireEvent |
|
||||
from couchpotato.core.helpers.rss import RSS |
|
||||
from couchpotato.core.helpers.variable import tryInt, splitString |
|
||||
from couchpotato.core.logger import CPLog |
|
||||
from couchpotato.core.media.movie.providers.automation.base import Automation |
|
||||
|
|
||||
log = CPLog(__name__) |
|
||||
|
|
||||
autoload = 'MoviesIO' |
|
||||
|
|
||||
|
|
||||
class MoviesIO(Automation, RSS): |
|
||||
|
|
||||
interval = 1800 |
|
||||
|
|
||||
def getIMDBids(self): |
|
||||
|
|
||||
movies = [] |
|
||||
|
|
||||
enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] |
|
||||
|
|
||||
index = -1 |
|
||||
for rss_url in splitString(self.conf('automation_urls')): |
|
||||
|
|
||||
index += 1 |
|
||||
if not enablers[index]: |
|
||||
continue |
|
||||
|
|
||||
rss_movies = self.getRSSData(rss_url, headers = {'Referer': ''}) |
|
||||
|
|
||||
for movie in rss_movies: |
|
||||
|
|
||||
nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True) |
|
||||
imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True) |
|
||||
|
|
||||
if not imdb: |
|
||||
continue |
|
||||
|
|
||||
movies.append(imdb) |
|
||||
|
|
||||
return movies |
|
||||
|
|
||||
|
|
||||
config = [{ |
|
||||
'name': 'moviesio', |
|
||||
'groups': [ |
|
||||
{ |
|
||||
'tab': 'automation', |
|
||||
'list': 'watchlist_providers', |
|
||||
'name': 'moviesio', |
|
||||
'label': 'Movies.IO', |
|
||||
'description': 'Imports movies from <a href="http://movies.io" target="_blank">Movies.io</a> RSS watchlists', |
|
||||
'options': [ |
|
||||
{ |
|
||||
'name': 'automation_enabled', |
|
||||
'default': False, |
|
||||
'type': 'enabler', |
|
||||
}, |
|
||||
{ |
|
||||
'name': 'automation_urls_use', |
|
||||
'label': 'Use', |
|
||||
}, |
|
||||
{ |
|
||||
'name': 'automation_urls', |
|
||||
'label': 'url', |
|
||||
'type': 'combined', |
|
||||
'combine': ['automation_urls_use', 'automation_urls'], |
|
||||
}, |
|
||||
], |
|
||||
}, |
|
||||
], |
|
||||
}] |
|
@ -0,0 +1,10 @@ |
|||||
|
from couchpotato.core.logger import CPLog |
||||
|
from couchpotato.core.media._base.providers.torrent.yts import Base |
||||
|
from couchpotato.core.media.movie.providers.base import MovieProvider |
||||
|
|
||||
|
log = CPLog(__name__) |
||||
|
|
||||
|
autoload = 'Yts' |
||||
|
|
||||
|
class Yts(MovieProvider, Base): |
||||
|
pass |
@ -0,0 +1,93 @@ |
|||||
|
from couchpotato.core.logger import CPLog |
||||
|
from couchpotato.core.notifications.base import Notification |
||||
|
import json |
||||
|
import requests |
||||
|
|
||||
|
log = CPLog(__name__) |
||||
|
autoload = 'Discord' |
||||
|
|
||||
|
|
||||
|
class Discord(Notification): |
||||
|
required_confs = ('webhook_url',) |
||||
|
|
||||
|
def notify(self, message='', data=None, listener=None): |
||||
|
for key in self.required_confs: |
||||
|
if not self.conf(key): |
||||
|
log.warning('Discord notifications are enabled, but ' |
||||
|
'"{0}" is not specified.'.format(key)) |
||||
|
return False |
||||
|
|
||||
|
data = data or {} |
||||
|
message = message.strip() |
||||
|
|
||||
|
if self.conf('include_imdb') and 'identifier' in data: |
||||
|
template = ' http://www.imdb.com/title/{0[identifier]}/' |
||||
|
message += template.format(data) |
||||
|
|
||||
|
headers = {b"Content-Type": b"application/json"} |
||||
|
try: |
||||
|
r = requests.post(self.conf('webhook_url'), data=json.dumps(dict(content=message, username=self.conf('bot_name'), avatar_url=self.conf('avatar_url'), tts=self.conf('discord_tts'))), headers=headers) |
||||
|
r.status_code |
||||
|
except Exception as e: |
||||
|
log.warning('Error Sending Discord response error code: {0}'.format(r.status_code)) |
||||
|
return False |
||||
|
return True |
||||
|
|
||||
|
|
||||
|
config = [{ |
||||
|
'name': 'discord', |
||||
|
'groups': [ |
||||
|
{ |
||||
|
'tab': 'notifications', |
||||
|
'list': 'notification_providers', |
||||
|
'name': 'discord', |
||||
|
'options': [ |
||||
|
{ |
||||
|
'name': 'enabled', |
||||
|
'default': 0, |
||||
|
'type': 'enabler', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'webhook_url', |
||||
|
'description': ( |
||||
|
'Your Discord authentication webhook URL.', |
||||
|
'Created under channel settings.' |
||||
|
) |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'include_imdb', |
||||
|
'default': True, |
||||
|
'type': 'bool', |
||||
|
'descrpition': 'Include a link to the movie page on IMDB.' |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'bot_name', |
||||
|
'description': 'Name of bot.', |
||||
|
'default': 'CouchPotato', |
||||
|
'advanced': True, |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'avatar_url', |
||||
|
'description': 'URL to an image to use as the avatar for ' |
||||
|
'notifications.', |
||||
|
'default': 'https://couchpota.to/media/images/couch.png', |
||||
|
'advanced': True, |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'discord_tts', |
||||
|
'default': 0, |
||||
|
'type': 'bool', |
||||
|
'advanced': True, |
||||
|
'description': 'Send notification using text-to-speech.', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'on_snatch', |
||||
|
'default': 0, |
||||
|
'type': 'bool', |
||||
|
'advanced': True, |
||||
|
'description': 'Also send message when movie is snatched.', |
||||
|
}, |
||||
|
], |
||||
|
} |
||||
|
], |
||||
|
}] |
@ -0,0 +1,84 @@ |
|||||
|
from couchpotato.core.helpers.encoding import toUnicode |
||||
|
from couchpotato.core.helpers.encoding import tryUrlencode |
||||
|
from couchpotato.core.helpers.variable import splitString |
||||
|
from couchpotato.core.logger import CPLog |
||||
|
from couchpotato.core.notifications.base import Notification |
||||
|
|
||||
|
|
||||
|
log = CPLog(__name__) |
||||
|
|
||||
|
autoload = 'Join' |
||||
|
|
||||
|
|
||||
|
class Join(Notification): |
||||
|
|
||||
|
# URL for request |
||||
|
url = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?title=%s&text=%s&deviceId=%s&icon=%s' |
||||
|
|
||||
|
# URL for notification icon |
||||
|
icon = tryUrlencode('https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/icons/android.png') |
||||
|
|
||||
|
def notify(self, message = '', data = None, listener = None): |
||||
|
if not data: data = {} |
||||
|
|
||||
|
# default for devices |
||||
|
device_default = [None] |
||||
|
|
||||
|
apikey = self.conf('apikey') |
||||
|
if apikey is not None: |
||||
|
# Add apikey to request url |
||||
|
self.url = self.url + '&apikey=' + apikey |
||||
|
# If api key is present, default to sending to all devices |
||||
|
device_default = ['group.all'] |
||||
|
|
||||
|
devices = self.getDevices() or device_default |
||||
|
successful = 0 |
||||
|
for device in devices: |
||||
|
response = self.urlopen(self.url % (self.default_title, tryUrlencode(toUnicode(message)), device, self.icon)) |
||||
|
|
||||
|
if response: |
||||
|
successful += 1 |
||||
|
else: |
||||
|
log.error('Unable to push notification to Join device with ID %s' % device) |
||||
|
|
||||
|
return successful == len(devices) |
||||
|
|
||||
|
def getDevices(self): |
||||
|
return splitString(self.conf('devices')) |
||||
|
|
||||
|
|
||||
|
config = [{ |
||||
|
'name': 'join', |
||||
|
'groups': [ |
||||
|
{ |
||||
|
'tab': 'notifications', |
||||
|
'list': 'notification_providers', |
||||
|
'name': 'join', |
||||
|
'options': [ |
||||
|
{ |
||||
|
'name': 'enabled', |
||||
|
'default': 0, |
||||
|
'type': 'enabler', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'devices', |
||||
|
'default': '', |
||||
|
'description': 'IDs of devices to notify, or group to send to if API key is specified (ex: group.all)' |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'apikey', |
||||
|
'default': '', |
||||
|
'advanced': True, |
||||
|
'description': 'API Key for sending to all devices, or group' |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'on_snatch', |
||||
|
'default': 0, |
||||
|
'type': 'bool', |
||||
|
'advanced': True, |
||||
|
'description': 'Also send message when movie is snatched.', |
||||
|
}, |
||||
|
], |
||||
|
} |
||||
|
], |
||||
|
}] |
@ -0,0 +1,68 @@ |
|||||
|
import traceback |
||||
|
import subprocess |
||||
|
import os |
||||
|
|
||||
|
from couchpotato.core.helpers.encoding import toUnicode |
||||
|
from couchpotato.core.helpers.variable import getIdentifier |
||||
|
from couchpotato.api import addApiView |
||||
|
from couchpotato.core.event import addEvent |
||||
|
from couchpotato.core.logger import CPLog |
||||
|
from couchpotato.core.notifications.base import Notification |
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
log = CPLog(__name__) |
||||
|
|
||||
|
autoload = 'Script' |
||||
|
|
||||
|
class Script(Notification): |
||||
|
|
||||
|
def __init__(self): |
||||
|
addApiView(self.testNotifyName(), self.test) |
||||
|
|
||||
|
addEvent('renamer.after', self.runScript) |
||||
|
|
||||
|
def runScript(self, message = None, group = None): |
||||
|
if self.isDisabled(): return |
||||
|
if not group: group = {} |
||||
|
|
||||
|
command = [self.conf('path'), group.get('destination_dir')] |
||||
|
log.info('Executing script command: %s ', command) |
||||
|
try: |
||||
|
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) |
||||
|
out = p.communicate() |
||||
|
log.info('Result from script: %s', str(out)) |
||||
|
return True |
||||
|
except OSError as e: |
||||
|
log.error('Unable to run script: %s', e) |
||||
|
|
||||
|
return False |
||||
|
|
||||
|
def test(self, **kwargs): |
||||
|
return { |
||||
|
'success': os.path.isfile(self.conf('path')) |
||||
|
} |
||||
|
|
||||
|
config = [{ |
||||
|
'name': 'script', |
||||
|
'groups': [ |
||||
|
{ |
||||
|
'tab': 'notifications', |
||||
|
'list': 'notification_providers', |
||||
|
'name': 'script', |
||||
|
'label': 'Script', |
||||
|
'options': [ |
||||
|
{ |
||||
|
'name': 'enabled', |
||||
|
'default': 0, |
||||
|
'type': 'enabler', |
||||
|
}, |
||||
|
{ |
||||
|
'name': 'path', |
||||
|
'description': 'The path to the script to execute.' |
||||
|
} |
||||
|
] |
||||
|
} |
||||
|
] |
||||
|
}] |
@ -0,0 +1,138 @@ |
|||||
|
# -*- coding: utf-8 -*- |
||||
|
# Copyright 2017 Ofir123 <ofirbrukner@gmail.com> |
||||
|
# |
||||
|
# This file is part of subliminal. |
||||
|
# |
||||
|
# subliminal is free software; you can redistribute it and/or modify it under |
||||
|
# the terms of the GNU Lesser General Public License as published by |
||||
|
# the Free Software Foundation; either version 3 of the License, or |
||||
|
# (at your option) any later version. |
||||
|
# |
||||
|
# subliminal is distributed in the hope that it will be useful, |
||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
|
# GNU Lesser General Public License for more details. |
||||
|
# |
||||
|
# You should have received a copy of the GNU Lesser General Public License |
||||
|
# along with subliminal. If not, see <http://www.gnu.org/licenses/>. |
||||
|
from . import ServiceBase |
||||
|
from ..exceptions import ServiceError |
||||
|
from ..language import language_set |
||||
|
from ..subtitles import get_subtitle_path, ResultSubtitle |
||||
|
from ..videos import Episode, Movie |
||||
|
from ..utils import to_unicode |
||||
|
import bisect |
||||
|
import logging |
||||
|
|
||||
|
logger = logging.getLogger(__name__) |
||||
|
|
||||
|
|
||||
|
class Wizdom(ServiceBase): |
||||
|
server = 'http://wizdom.xyz' |
||||
|
api_based = True |
||||
|
languages = language_set(['he']) |
||||
|
videos = [Episode, Movie] |
||||
|
require_video = False |
||||
|
|
||||
|
_tmdb_api_key = 'a51ee051bcd762543373903de296e0a3' |
||||
|
|
||||
|
def _search_imdb_id(self, title, year, is_movie): |
||||
|
"""Search the IMDB ID for the given `title` and `year`. |
||||
|
|
||||
|
:param str title: title to search for. |
||||
|
:param int year: year to search for (or 0 if not relevant). |
||||
|
:param bool is_movie: If True, IMDB ID will be searched for in TMDB instead of Wizdom. |
||||
|
:return: the IMDB ID for the given title and year (or None if not found). |
||||
|
:rtype: str |
||||
|
""" |
||||
|
# make the search |
||||
|
logger.info('Searching IMDB ID for %r%r', title, '' if not year else ' ({})'.format(year)) |
||||
|
category = 'movie' if is_movie else 'tv' |
||||
|
title = title.replace('\'', '') |
||||
|
# get TMDB ID first |
||||
|
r = self.session.get('http://api.tmdb.org/3/search/{}?api_key={}&query={}{}&language=en'.format( |
||||
|
category, self._tmdb_api_key, title, '' if not year else '&year={}'.format(year))) |
||||
|
r.raise_for_status() |
||||
|
tmdb_results = r.json().get('results') |
||||
|
if tmdb_results: |
||||
|
tmdb_id = tmdb_results[0].get('id') |
||||
|
if tmdb_id: |
||||
|
# get actual IMDB ID from TMDB |
||||
|
r = self.session.get('http://api.tmdb.org/3/{}/{}{}?api_key={}&language=en'.format( |
||||
|
category, tmdb_id, '' if is_movie else '/external_ids', self._tmdb_api_key)) |
||||
|
r.raise_for_status() |
||||
|
return str(r.json().get('imdb_id', '')) or None |
||||
|
return None |
||||
|
|
||||
|
def list_checked(self, video, languages): |
||||
|
series = None |
||||
|
season = None |
||||
|
episode = None |
||||
|
title = video.title |
||||
|
imdb_id = video.imdbid |
||||
|
year = video.year |
||||
|
if isinstance(video, Episode): |
||||
|
series = video.series |
||||
|
season = video.season |
||||
|
episode = video.episode |
||||
|
return self.query(video.path or video.release, languages, series, season, |
||||
|
episode, title, imdb_id, year) |
||||
|
|
||||
|
def query(self, filepath, languages=None, series=None, season=None, episode=None, title=None, imdbid=None, |
||||
|
year=None): |
||||
|
logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format( |
||||
|
series, season, episode, languages)) |
||||
|
# search for the IMDB ID if needed |
||||
|
is_movie = not (series and season and episode) |
||||
|
if is_movie and not title: |
||||
|
raise ServiceError('One or more parameters are missing') |
||||
|
# for TV series, we need the series IMDB ID, and not the specific episode ID |
||||
|
imdb_id = imdbid or self._search_imdb_id(title, year, is_movie) |
||||
|
|
||||
|
# search |
||||
|
logger.debug(u'Using IMDB ID {0}'.format(imdb_id)) |
||||
|
url = 'http://json.{}/{}.json'.format(self.server_url, imdb_id) |
||||
|
|
||||
|
# get the list of subtitles |
||||
|
logger.debug('Getting the list of subtitles') |
||||
|
r = self.session.get(url) |
||||
|
r.raise_for_status() |
||||
|
try: |
||||
|
results = r.json() |
||||
|
except ValueError: |
||||
|
return {} |
||||
|
|
||||
|
# filter irrelevant results |
||||
|
if not is_movie: |
||||
|
results = results.get('subs', {}).get(str(season), {}).get(str(episode), []) |
||||
|
else: |
||||
|
results = results.get('subs', []) |
||||
|
|
||||
|
# loop over results |
||||
|
subtitles = dict() |
||||
|
for result in results: |
||||
|
language_object = self.get_language('heb') |
||||
|
subtitle_id = result['id'] |
||||
|
release = result['version'] |
||||
|
subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi) |
||||
|
download_link = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle_id) |
||||
|
# add the release and increment downloaded count if we already have the subtitle |
||||
|
if subtitle_id in subtitles: |
||||
|
logger.debug(u'Found additional release {0} for subtitle {1}'.format(release, subtitle_id)) |
||||
|
bisect.insort_left(subtitles[subtitle_id].releases, release) # deterministic order |
||||
|
subtitles[subtitle_id].downloaded += 1 |
||||
|
continue |
||||
|
# otherwise create it |
||||
|
subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(), |
||||
|
download_link, release=to_unicode(release)) |
||||
|
logger.debug(u'Found subtitle {0}'.format(subtitle)) |
||||
|
subtitles[subtitle_id] = subtitle |
||||
|
|
||||
|
return subtitles.values() |
||||
|
|
||||
|
def download(self, subtitle): |
||||
|
self.download_zip_file(subtitle.link, subtitle.path) |
||||
|
return subtitle |
||||
|
|
||||
|
|
||||
|
Service = Wizdom |
@ -0,0 +1,190 @@ |
|||||
|
import os |
||||
|
import base64 |
||||
|
import logging |
||||
|
import argparse |
||||
|
|
||||
|
import requests |
||||
|
|
||||
|
LOG_LEVEL = logging.INFO |
||||
|
DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024 |
||||
|
TUS_VERSION = '1.0.0' |
||||
|
|
||||
|
logger = logging.getLogger(__name__) |
||||
|
logger.setLevel(logging.DEBUG) |
||||
|
logger.addHandler(logging.NullHandler()) |
||||
|
|
||||
|
|
||||
|
class TusError(Exception): |
||||
|
pass |
||||
|
|
||||
|
|
||||
|
def _init(): |
||||
|
fmt = "[%(asctime)s] %(levelname)s %(message)s" |
||||
|
h = logging.StreamHandler() |
||||
|
h.setLevel(LOG_LEVEL) |
||||
|
h.setFormatter(logging.Formatter(fmt)) |
||||
|
logger.addHandler(h) |
||||
|
|
||||
|
|
||||
|
def _create_parser(): |
||||
|
parser = argparse.ArgumentParser() |
||||
|
parser.add_argument('file', type=argparse.FileType('rb')) |
||||
|
parser.add_argument('--chunk-size', type=int, default=DEFAULT_CHUNK_SIZE) |
||||
|
parser.add_argument( |
||||
|
'--header', |
||||
|
action='append', |
||||
|
help="A single key/value pair" |
||||
|
" to be sent with all requests as HTTP header." |
||||
|
" Can be specified multiple times to send more then one header." |
||||
|
" Key and value must be separated with \":\".") |
||||
|
return parser |
||||
|
|
||||
|
|
||||
|
def _cmd_upload(): |
||||
|
_init() |
||||
|
|
||||
|
parser = _create_parser() |
||||
|
parser.add_argument('tus_endpoint') |
||||
|
parser.add_argument('--file_name') |
||||
|
parser.add_argument( |
||||
|
'--metadata', |
||||
|
action='append', |
||||
|
help="A single key/value pair to be sent in Upload-Metadata header." |
||||
|
" Can be specified multiple times to send more than one pair." |
||||
|
" Key and value must be separated with space.") |
||||
|
args = parser.parse_args() |
||||
|
|
||||
|
headers = dict([x.split(':') for x in args.header]) |
||||
|
metadata = dict([x.split(' ') for x in args.metadata]) |
||||
|
|
||||
|
upload( |
||||
|
args.file, |
||||
|
args.tus_endpoint, |
||||
|
chunk_size=args.chunk_size, |
||||
|
file_name=args.file_name, |
||||
|
headers=headers, |
||||
|
metadata=metadata) |
||||
|
|
||||
|
|
||||
|
def _cmd_resume(): |
||||
|
_init() |
||||
|
|
||||
|
parser = _create_parser() |
||||
|
parser.add_argument('file_endpoint') |
||||
|
args = parser.parse_args() |
||||
|
|
||||
|
headers = dict([x.split(':') for x in args.header]) |
||||
|
|
||||
|
resume( |
||||
|
args.file, |
||||
|
args.file_endpoint, |
||||
|
chunk_size=args.chunk_size, |
||||
|
headers=headers) |
||||
|
|
||||
|
|
||||
|
def upload(file_obj, |
||||
|
tus_endpoint, |
||||
|
chunk_size=DEFAULT_CHUNK_SIZE, |
||||
|
file_name=None, |
||||
|
headers=None, |
||||
|
metadata=None): |
||||
|
file_name = os.path.basename(file_obj.name) |
||||
|
file_size = _get_file_size(file_obj) |
||||
|
location = _create_file( |
||||
|
tus_endpoint, |
||||
|
file_name, |
||||
|
file_size, |
||||
|
extra_headers=headers, |
||||
|
metadata=metadata) |
||||
|
resume( |
||||
|
file_obj, location, chunk_size=chunk_size, headers=headers, offset=0) |
||||
|
|
||||
|
|
||||
|
def _get_file_size(f): |
||||
|
pos = f.tell() |
||||
|
f.seek(0, 2) |
||||
|
size = f.tell() |
||||
|
f.seek(pos) |
||||
|
return size |
||||
|
|
||||
|
|
||||
|
def _create_file(tus_endpoint, |
||||
|
file_name, |
||||
|
file_size, |
||||
|
extra_headers=None, |
||||
|
metadata=None): |
||||
|
logger.info("Creating file endpoint") |
||||
|
|
||||
|
headers = { |
||||
|
"Tus-Resumable": TUS_VERSION, |
||||
|
"Upload-Length": str(file_size), |
||||
|
} |
||||
|
|
||||
|
if extra_headers: |
||||
|
headers.update(extra_headers) |
||||
|
|
||||
|
if metadata: |
||||
|
l = [k + ' ' + base64.b64encode(v) for k, v in metadata.items()] |
||||
|
headers["Upload-Metadata"] = ','.join(l) |
||||
|
|
||||
|
response = requests.post(tus_endpoint, headers=headers) |
||||
|
if response.status_code != 201: |
||||
|
raise TusError("Create failed: %s" % response) |
||||
|
|
||||
|
location = response.headers["Location"] |
||||
|
logger.info("Created: %s", location) |
||||
|
return location |
||||
|
|
||||
|
|
||||
|
def resume(file_obj, |
||||
|
file_endpoint, |
||||
|
chunk_size=DEFAULT_CHUNK_SIZE, |
||||
|
headers=None, |
||||
|
offset=None): |
||||
|
if offset is None: |
||||
|
offset = _get_offset(file_endpoint, extra_headers=headers) |
||||
|
|
||||
|
total_sent = 0 |
||||
|
file_size = _get_file_size(file_obj) |
||||
|
while offset < file_size: |
||||
|
file_obj.seek(offset) |
||||
|
data = file_obj.read(chunk_size) |
||||
|
offset = _upload_chunk( |
||||
|
data, offset, file_endpoint, extra_headers=headers) |
||||
|
total_sent += len(data) |
||||
|
logger.info("Total bytes sent: %i", total_sent) |
||||
|
|
||||
|
|
||||
|
def _get_offset(file_endpoint, extra_headers=None): |
||||
|
logger.info("Getting offset") |
||||
|
|
||||
|
headers = {"Tus-Resumable": TUS_VERSION} |
||||
|
|
||||
|
if extra_headers: |
||||
|
headers.update(extra_headers) |
||||
|
|
||||
|
response = requests.head(file_endpoint, headers=headers) |
||||
|
response.raise_for_status() |
||||
|
|
||||
|
offset = int(response.headers["Upload-Offset"]) |
||||
|
logger.info("offset=%i", offset) |
||||
|
return offset |
||||
|
|
||||
|
|
||||
|
def _upload_chunk(data, offset, file_endpoint, extra_headers=None): |
||||
|
logger.info("Uploading chunk from offset: %i", offset) |
||||
|
|
||||
|
headers = { |
||||
|
'Content-Type': 'application/offset+octet-stream', |
||||
|
'Upload-Offset': str(offset), |
||||
|
'Tus-Resumable': TUS_VERSION, |
||||
|
} |
||||
|
|
||||
|
if extra_headers: |
||||
|
headers.update(extra_headers) |
||||
|
|
||||
|
response = requests.patch(file_endpoint, headers=headers, data=data) |
||||
|
if response.status_code != 204: |
||||
|
raise TusError("Upload chunk failed: %s" % response) |
||||
|
|
||||
|
return int(response.headers["Upload-Offset"]) |
Loading…
Reference in new issue