Browse Source
Added ignore/required words option to bet set individually for each show. Fixed issue with global ignore words not properly matching against releases. Fixed issue withtags/release_0.1.0
28 changed files with 654 additions and 124 deletions
@ -0,0 +1,216 @@ |
|||||
|
# -*- coding: utf-8 -*- |
||||
|
# Copyright 2012 Mr_Orange <mr_orange@hotmail.it> |
||||
|
# |
||||
|
# This file is part of subliminal. |
||||
|
# |
||||
|
# subliminal is free software; you can redistribute it and/or modify it under |
||||
|
# the terms of the GNU Lesser General Public License as published by |
||||
|
# the Free Software Foundation; either version 3 of the License, or |
||||
|
# (at your option) any later version. |
||||
|
# |
||||
|
# subliminal is distributed in the hope that it will be useful, |
||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
|
# GNU Lesser General Public License for more details. |
||||
|
# |
||||
|
# You should have received a copy of the GNU Lesser General Public License |
||||
|
# along with subliminal. If not, see <http://www.gnu.org/licenses/>. |
||||
|
from . import ServiceBase |
||||
|
from ..exceptions import DownloadFailedError, ServiceError |
||||
|
from ..cache import cachedmethod |
||||
|
from ..language import language_set, Language |
||||
|
from ..subtitles import get_subtitle_path, ResultSubtitle, EXTENSIONS |
||||
|
from ..utils import get_keywords |
||||
|
from ..videos import Episode |
||||
|
from bs4 import BeautifulSoup |
||||
|
import logging |
||||
|
import re |
||||
|
import os |
||||
|
import requests |
||||
|
import zipfile |
||||
|
import StringIO |
||||
|
import guessit |
||||
|
|
||||
|
from sickbeard.common import Quality |
||||
|
|
||||
|
logger = logging.getLogger("subliminal") |
||||
|
|
||||
|
|
||||
|
class Itasa(ServiceBase): |
||||
|
server_url = 'http://www.italiansubs.net/' |
||||
|
site_url = 'http://www.italiansubs.net/' |
||||
|
api_based = False |
||||
|
languages = language_set(['it']) |
||||
|
videos = [Episode] |
||||
|
require_video = False |
||||
|
required_features = ['permissive'] |
||||
|
quality_dict = {Quality.SDTV : '', |
||||
|
Quality.SDDVD : 'dvdrip', |
||||
|
Quality.RAWHDTV : '1080i', |
||||
|
Quality.HDTV : '720p', |
||||
|
Quality.FULLHDTV : ('1080p','720p'), |
||||
|
Quality.HDWEBDL : 'web-dl', |
||||
|
Quality.FULLHDWEBDL : 'web-dl', |
||||
|
Quality.HDBLURAY : ('bdrip', 'bluray'), |
||||
|
Quality.FULLHDBLURAY : ('bdrip', 'bluray'), |
||||
|
Quality.UNKNOWN : 'unknown' #Any subtitle will be downloaded |
||||
|
} |
||||
|
|
||||
|
def init(self): |
||||
|
|
||||
|
super(Itasa, self).init() |
||||
|
login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />' |
||||
|
|
||||
|
response = requests.get(self.server_url + 'index.php') |
||||
|
if response.status_code != 200: |
||||
|
raise ServiceError('Initiate failed') |
||||
|
|
||||
|
match = re.search(login_pattern, response.content, re.IGNORECASE | re.DOTALL) |
||||
|
if not match: |
||||
|
raise ServiceError('Can not find unique id parameter on page') |
||||
|
|
||||
|
login_parameter = {'username': 'sickbeard', |
||||
|
'passwd': 'subliminal', |
||||
|
'remember': 'yes', |
||||
|
'Submit': 'Login', |
||||
|
'remember': 'yes', |
||||
|
'option': 'com_user', |
||||
|
'task': 'login', |
||||
|
'silent': 'true', |
||||
|
'return': match.group(1), |
||||
|
match.group(2): match.group(3) |
||||
|
} |
||||
|
|
||||
|
self.session = requests.session() |
||||
|
r = self.session.post(self.server_url + 'index.php', data=login_parameter) |
||||
|
if not re.search('logouticon.png', r.content, re.IGNORECASE | re.DOTALL): |
||||
|
raise ServiceError('Itasa Login Failed') |
||||
|
|
||||
|
@cachedmethod |
||||
|
def get_series_id(self, name): |
||||
|
"""Get the show page and cache every show found in it""" |
||||
|
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=9') |
||||
|
soup = BeautifulSoup(r.content, self.required_features) |
||||
|
all_series = soup.find('div', attrs = {'id' : 'remositorycontainerlist'}) |
||||
|
for tv_series in all_series.find_all(href=re.compile('func=select')): |
||||
|
series_name = tv_series.text.lower().strip().replace(':','') |
||||
|
match = re.search('&id=([0-9]+)', tv_series['href']) |
||||
|
if match is None: |
||||
|
continue |
||||
|
series_id = int(match.group(1)) |
||||
|
self.cache_for(self.get_series_id, args=(series_name,), result=series_id) |
||||
|
return self.cached_value(self.get_series_id, args=(name,)) |
||||
|
|
||||
|
def get_episode_id(self, series, series_id, season, episode, quality): |
||||
|
"""Get the id subtitle for episode with the given quality""" |
||||
|
|
||||
|
season_link = None |
||||
|
quality_link = None |
||||
|
episode_id = None |
||||
|
|
||||
|
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=select&id=' + str(series_id)) |
||||
|
soup = BeautifulSoup(r.content, self.required_features) |
||||
|
all_seasons = soup.find('div', attrs = {'id' : 'remositorycontainerlist'}) |
||||
|
for seasons in all_seasons.find_all(href=re.compile('func=select')): |
||||
|
if seasons.text.lower().strip() == 'stagione %s' % str(season): |
||||
|
season_link = seasons['href'] |
||||
|
break |
||||
|
|
||||
|
if not season_link: |
||||
|
logger.debug(u'Could not find season %s for series %s' % (series, str(season))) |
||||
|
return None |
||||
|
|
||||
|
r = self.session.get(season_link) |
||||
|
soup = BeautifulSoup(r.content, self.required_features) |
||||
|
|
||||
|
all_qualities = soup.find('div', attrs = {'id' : 'remositorycontainerlist'}) |
||||
|
for qualities in all_qualities.find_all(href=re.compile('func=select')): |
||||
|
if qualities.text.lower().strip() in self.quality_dict[quality]: |
||||
|
quality_link = qualities['href'] |
||||
|
r = self.session.get(qualities['href']) |
||||
|
soup = BeautifulSoup(r.content, self.required_features) |
||||
|
break |
||||
|
|
||||
|
#If we want SDTV we are just on the right page so quality link will be None |
||||
|
if not quality == Quality.SDTV and not quality_link: |
||||
|
logger.debug(u'Could not find a subtitle with required quality for series %s season %s' % (series, str(season))) |
||||
|
return None |
||||
|
|
||||
|
all_episodes = soup.find('div', attrs = {'id' : 'remositoryfilelisting'}) |
||||
|
for episodes in all_episodes.find_all(href=re.compile('func=fileinfo')): |
||||
|
ep_string = "%(seasonnumber)dx%(episodenumber)02d" % {'seasonnumber': season, 'episodenumber': episode} |
||||
|
if re.search(ep_string, episodes.text, re.I) or re.search('completa$', episodes.text, re.I): |
||||
|
match = re.search('&id=([0-9]+)', episodes['href']) |
||||
|
if match: |
||||
|
episode_id = match.group(1) |
||||
|
return episode_id |
||||
|
|
||||
|
return episode_id |
||||
|
|
||||
|
def list_checked(self, video, languages): |
||||
|
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode) |
||||
|
|
||||
|
def query(self, filepath, languages, keywords, series, season, episode): |
||||
|
|
||||
|
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) |
||||
|
self.init_cache() |
||||
|
try: |
||||
|
series = series.lower().replace('(','').replace(')','') |
||||
|
series_id = self.get_series_id(series) |
||||
|
except KeyError: |
||||
|
logger.debug(u'Could not find series id for %s' % series) |
||||
|
return [] |
||||
|
|
||||
|
episode_id = self.get_episode_id(series, series_id, season, episode, Quality.nameQuality(filepath)) |
||||
|
if not episode_id: |
||||
|
logger.debug(u'Could not find subtitle for series %s' % series) |
||||
|
return [] |
||||
|
|
||||
|
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' + episode_id) |
||||
|
soup = BeautifulSoup(r.content) |
||||
|
|
||||
|
sub_link = soup.find('div', attrs = {'id' : 'remositoryfileinfo'}).find(href=re.compile('func=download'))['href'] |
||||
|
sub_language = self.get_language('it') |
||||
|
path = get_subtitle_path(filepath, sub_language, self.config.multi) |
||||
|
subtitle = ResultSubtitle(path, sub_language, self.__class__.__name__.lower(), sub_link) |
||||
|
|
||||
|
return [subtitle] |
||||
|
|
||||
|
def download(self, subtitle): |
||||
|
|
||||
|
logger.info(u'Downloading %s in %s' % (subtitle.link, subtitle.path)) |
||||
|
try: |
||||
|
r = self.session.get(subtitle.link, headers={'Referer': self.server_url, 'User-Agent': self.user_agent}) |
||||
|
zipcontent = StringIO.StringIO(r.content) |
||||
|
zipsub = zipfile.ZipFile(zipcontent) |
||||
|
|
||||
|
# if not zipsub.is_zipfile(zipcontent): |
||||
|
# raise DownloadFailedError('Downloaded file is not a zip file') |
||||
|
|
||||
|
subfile = '' |
||||
|
if len(zipsub.namelist()) == 1: |
||||
|
subfile = zipsub.namelist()[0] |
||||
|
else: |
||||
|
#Season Zip Retrive Season and episode Numbers from path |
||||
|
guess = guessit.guess_file_info(subtitle.path, 'episode') |
||||
|
ep_string = "s%(seasonnumber)02de%(episodenumber)02d" % {'seasonnumber': guess['season'], 'episodenumber': guess['episodeNumber']} |
||||
|
for file in zipsub.namelist(): |
||||
|
if re.search(ep_string, file, re.I): |
||||
|
subfile = file |
||||
|
break |
||||
|
if os.path.splitext(subfile)[1] in EXTENSIONS: |
||||
|
with open(subtitle.path, 'wb') as f: |
||||
|
f.write(zipsub.open(subfile).read()) |
||||
|
else: |
||||
|
zipsub.close() |
||||
|
raise DownloadFailedError('No subtitles found in zip file') |
||||
|
|
||||
|
zipsub.close() |
||||
|
except Exception as e: |
||||
|
if os.path.exists(subtitle.path): |
||||
|
os.remove(subtitle.path) |
||||
|
raise DownloadFailedError(str(e)) |
||||
|
|
||||
|
logger.debug(u'Download finished') |
||||
|
|
||||
|
Service = Itasa |
@ -0,0 +1,124 @@ |
|||||
|
# -*- coding: utf-8 -*- |
||||
|
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> |
||||
|
# |
||||
|
# This file is part of subliminal. |
||||
|
# |
||||
|
# subliminal is free software; you can redistribute it and/or modify it under |
||||
|
# the terms of the GNU Lesser General Public License as published by |
||||
|
# the Free Software Foundation; either version 3 of the License, or |
||||
|
# (at your option) any later version. |
||||
|
# |
||||
|
# subliminal is distributed in the hope that it will be useful, |
||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
|
# GNU Lesser General Public License for more details. |
||||
|
# |
||||
|
# You should have received a copy of the GNU Lesser General Public License |
||||
|
# along with subliminal. If not, see <http://www.gnu.org/licenses/>. |
||||
|
from . import ServiceBase |
||||
|
from ..exceptions import DownloadFailedError |
||||
|
from ..language import Language, language_set |
||||
|
from ..subtitles import ResultSubtitle |
||||
|
from ..utils import get_keywords |
||||
|
from ..videos import Episode, Movie |
||||
|
from bs4 import BeautifulSoup |
||||
|
import guessit |
||||
|
import logging |
||||
|
import re |
||||
|
from subliminal.subtitles import get_subtitle_path |
||||
|
|
||||
|
|
||||
|
logger = logging.getLogger("subliminal") |
||||
|
|
||||
|
|
||||
|
class PodnapisiWeb(ServiceBase): |
||||
|
server_url = 'http://simple.podnapisi.net' |
||||
|
site_url = 'http://www.podnapisi.net' |
||||
|
api_based = True |
||||
|
user_agent = 'Subliminal/0.6' |
||||
|
videos = [Episode, Movie] |
||||
|
require_video = False |
||||
|
required_features = ['xml'] |
||||
|
languages = language_set(['Albanian', 'Arabic', 'Spanish (Argentina)', 'Belarusian', 'Bosnian', 'Portuguese (Brazil)', 'Bulgarian', 'Catalan', |
||||
|
'Chinese', 'Croatian', 'Czech', 'Danish', 'Dutch', 'English', 'Estonian', 'Persian', |
||||
|
'Finnish', 'French', 'German', 'gre', 'Kalaallisut', 'Hebrew', 'Hindi', 'Hungarian', |
||||
|
'Icelandic', 'Indonesian', 'Irish', 'Italian', 'Japanese', 'Kazakh', 'Korean', 'Latvian', |
||||
|
'Lithuanian', 'Macedonian', 'Malay', 'Norwegian', 'Polish', 'Portuguese', 'Romanian', |
||||
|
'Russian', 'Serbian', 'Sinhala', 'Slovak', 'Slovenian', 'Spanish', 'Swedish', 'Thai', |
||||
|
'Turkish', 'Ukrainian', 'Vietnamese']) |
||||
|
language_map = {Language('Albanian'): 29, Language('Arabic'): 12, Language('Spanish (Argentina)'): 14, Language('Belarusian'): 50, |
||||
|
Language('Bosnian'): 10, Language('Portuguese (Brazil)'): 48, Language('Bulgarian'): 33, Language('Catalan'): 53, |
||||
|
Language('Chinese'): 17, Language('Croatian'): 38, Language('Czech'): 7, Language('Danish'): 24, |
||||
|
Language('Dutch'): 23, Language('English'): 2, Language('Estonian'): 20, Language('Persian'): 52, |
||||
|
Language('Finnish'): 31, Language('French'): 8, Language('German'): 5, Language('gre'): 16, |
||||
|
Language('Kalaallisut'): 57, Language('Hebrew'): 22, Language('Hindi'): 42, Language('Hungarian'): 15, |
||||
|
Language('Icelandic'): 6, Language('Indonesian'): 54, Language('Irish'): 49, Language('Italian'): 9, |
||||
|
Language('Japanese'): 11, Language('Kazakh'): 58, Language('Korean'): 4, Language('Latvian'): 21, |
||||
|
Language('Lithuanian'): 19, Language('Macedonian'): 35, Language('Malay'): 55, |
||||
|
Language('Norwegian'): 3, Language('Polish'): 26, Language('Portuguese'): 32, Language('Romanian'): 13, |
||||
|
Language('Russian'): 27, Language('Serbian'): 36, Language('Sinhala'): 56, Language('Slovak'): 37, |
||||
|
Language('Slovenian'): 1, Language('Spanish'): 28, Language('Swedish'): 25, Language('Thai'): 44, |
||||
|
Language('Turkish'): 30, Language('Ukrainian'): 46, Language('Vietnamese'): 51, |
||||
|
29: Language('Albanian'), 12: Language('Arabic'), 14: Language('Spanish (Argentina)'), 50: Language('Belarusian'), |
||||
|
10: Language('Bosnian'), 48: Language('Portuguese (Brazil)'), 33: Language('Bulgarian'), 53: Language('Catalan'), |
||||
|
17: Language('Chinese'), 38: Language('Croatian'), 7: Language('Czech'), 24: Language('Danish'), |
||||
|
23: Language('Dutch'), 2: Language('English'), 20: Language('Estonian'), 52: Language('Persian'), |
||||
|
31: Language('Finnish'), 8: Language('French'), 5: Language('German'), 16: Language('gre'), |
||||
|
57: Language('Kalaallisut'), 22: Language('Hebrew'), 42: Language('Hindi'), 15: Language('Hungarian'), |
||||
|
6: Language('Icelandic'), 54: Language('Indonesian'), 49: Language('Irish'), 9: Language('Italian'), |
||||
|
11: Language('Japanese'), 58: Language('Kazakh'), 4: Language('Korean'), 21: Language('Latvian'), |
||||
|
19: Language('Lithuanian'), 35: Language('Macedonian'), 55: Language('Malay'), 40: Language('Chinese'), |
||||
|
3: Language('Norwegian'), 26: Language('Polish'), 32: Language('Portuguese'), 13: Language('Romanian'), |
||||
|
27: Language('Russian'), 36: Language('Serbian'), 47: Language('Serbian'), 56: Language('Sinhala'), |
||||
|
37: Language('Slovak'), 1: Language('Slovenian'), 28: Language('Spanish'), 25: Language('Swedish'), |
||||
|
44: Language('Thai'), 30: Language('Turkish'), 46: Language('Ukrainian'), Language('Vietnamese'): 51} |
||||
|
|
||||
|
def list_checked(self, video, languages): |
||||
|
if isinstance(video, Movie): |
||||
|
return self.query(video.path or video.release, languages, video.title, year=video.year, |
||||
|
keywords=get_keywords(video.guess)) |
||||
|
if isinstance(video, Episode): |
||||
|
return self.query(video.path or video.release, languages, video.series, season=video.season, |
||||
|
episode=video.episode, keywords=get_keywords(video.guess)) |
||||
|
|
||||
|
def query(self, filepath, languages, title, season=None, episode=None, year=None, keywords=None): |
||||
|
params = {'sXML': 1, 'sK': title, 'sJ': ','.join([str(self.get_code(l)) for l in languages])} |
||||
|
if season is not None: |
||||
|
params['sTS'] = season |
||||
|
if episode is not None: |
||||
|
params['sTE'] = episode |
||||
|
if year is not None: |
||||
|
params['sY'] = year |
||||
|
if keywords is not None: |
||||
|
params['sR'] = keywords |
||||
|
r = self.session.get(self.server_url + '/ppodnapisi/search', params=params) |
||||
|
if r.status_code != 200: |
||||
|
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) |
||||
|
return [] |
||||
|
subtitles = [] |
||||
|
soup = BeautifulSoup(r.content, self.required_features) |
||||
|
for sub in soup('subtitle'): |
||||
|
if 'n' in sub.flags: |
||||
|
logger.debug(u'Skipping hearing impaired') |
||||
|
continue |
||||
|
language = self.get_language(sub.languageId.text) |
||||
|
confidence = float(sub.rating.text) / 5.0 |
||||
|
sub_keywords = set() |
||||
|
for release in sub.release.text.split(): |
||||
|
sub_keywords |= get_keywords(guessit.guess_file_info(release + '.srt', 'autodetect')) |
||||
|
sub_path = get_subtitle_path(filepath, language, self.config.multi) |
||||
|
subtitle = ResultSubtitle(sub_path, language, self.__class__.__name__.lower(), |
||||
|
sub.url.text, confidence=confidence, keywords=sub_keywords) |
||||
|
subtitles.append(subtitle) |
||||
|
return subtitles |
||||
|
|
||||
|
def download(self, subtitle): |
||||
|
r = self.session.get(subtitle.link) |
||||
|
if r.status_code != 200: |
||||
|
raise DownloadFailedError() |
||||
|
soup = BeautifulSoup(r.content) |
||||
|
self.download_zip_file(self.server_url + soup.find('a', href=re.compile('download'))['href'], subtitle.path) |
||||
|
return subtitle |
||||
|
|
||||
|
|
||||
|
Service = PodnapisiWeb |
@ -0,0 +1,99 @@ |
|||||
|
# -*- coding: utf-8 -*- |
||||
|
# Copyright 2013 Julien Goret <jgoret@gmail.com> |
||||
|
# |
||||
|
# This file is part of subliminal. |
||||
|
# |
||||
|
# subliminal is free software; you can redistribute it and/or modify it under |
||||
|
# the terms of the GNU Lesser General Public License as published by |
||||
|
# the Free Software Foundation; either version 3 of the License, or |
||||
|
# (at your option) any later version. |
||||
|
# |
||||
|
# subliminal is distributed in the hope that it will be useful, |
||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
|
# GNU Lesser General Public License for more details. |
||||
|
# |
||||
|
# You should have received a copy of the GNU Lesser General Public License |
||||
|
# along with subliminal. If not, see <http://www.gnu.org/licenses/>. |
||||
|
from . import ServiceBase |
||||
|
from ..exceptions import ServiceError |
||||
|
from ..language import language_set, Language |
||||
|
from ..subtitles import get_subtitle_path, ResultSubtitle |
||||
|
from ..utils import get_keywords, split_keyword |
||||
|
from ..videos import Episode |
||||
|
from bs4 import BeautifulSoup |
||||
|
import logging |
||||
|
import urllib |
||||
|
|
||||
|
logger = logging.getLogger("subliminal") |
||||
|
|
||||
|
class Usub(ServiceBase): |
||||
|
server_url = 'http://www.u-sub.net/sous-titres' |
||||
|
site_url = 'http://www.u-sub.net/' |
||||
|
api_based = False |
||||
|
languages = language_set(['fr']) |
||||
|
videos = [Episode] |
||||
|
require_video = False |
||||
|
#required_features = ['permissive'] |
||||
|
|
||||
|
def list_checked(self, video, languages): |
||||
|
return self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode) |
||||
|
|
||||
|
def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None): |
||||
|
|
||||
|
## Check if we really got informations about our episode |
||||
|
if series and season and episode: |
||||
|
request_series = series.lower().replace(' ', '-') |
||||
|
if isinstance(request_series, unicode): |
||||
|
request_series = request_series.encode('utf-8') |
||||
|
logger.debug(u'Getting subtitles for %s season %d episode %d with language %r' % (series, season, episode, languages)) |
||||
|
r = self.session.get('%s/%s/saison_%s' % (self.server_url, urllib.quote(request_series),season)) |
||||
|
if r.status_code == 404: |
||||
|
print "Error 404" |
||||
|
logger.debug(u'Could not find subtitles for %s' % (series)) |
||||
|
return [] |
||||
|
else: |
||||
|
print "One or more parameter missing" |
||||
|
raise ServiceError('One or more parameter missing') |
||||
|
|
||||
|
## Check if we didn't got an big and nasty http error |
||||
|
if r.status_code != 200: |
||||
|
print u'Request %s returned status code %d' % (r.url, r.status_code) |
||||
|
logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) |
||||
|
return [] |
||||
|
|
||||
|
## Editing episode informations to be able to use it with our search |
||||
|
if episode < 10 : |
||||
|
episode_num='0'+str(episode) |
||||
|
else : |
||||
|
episode_num=str(episode) |
||||
|
season_num = str(season) |
||||
|
series_name = series.lower().replace(' ', '.') |
||||
|
possible_episode_naming = [season_num+'x'+episode_num,season_num+episode_num] |
||||
|
|
||||
|
|
||||
|
## Actually parsing the page for the good subtitles |
||||
|
soup = BeautifulSoup(r.content, self.required_features) |
||||
|
subtitles = [] |
||||
|
subtitles_list = soup.find('table', {'id' : 'subtitles_list'}) |
||||
|
link_list = subtitles_list.findAll('a', {'class' : 'dl_link'}) |
||||
|
|
||||
|
for link in link_list : |
||||
|
link_url = link.get('href') |
||||
|
splited_link = link_url.split('/') |
||||
|
filename = splited_link[len(splited_link)-1] |
||||
|
for episode_naming in possible_episode_naming : |
||||
|
if episode_naming in filename : |
||||
|
for language in languages: |
||||
|
path = get_subtitle_path(filepath, language, self.config.multi) |
||||
|
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s' % (link_url)) |
||||
|
subtitles.append(subtitle) |
||||
|
return subtitles |
||||
|
|
||||
|
def download(self, subtitle): |
||||
|
## All downloaded files are zip files |
||||
|
self.download_zip_file(subtitle.link, subtitle.path) |
||||
|
return subtitle |
||||
|
|
||||
|
|
||||
|
Service = Usub |
Loading…
Reference in new issue