Browse Source

Added new SubsCenter provider.

pull/7248/head
ofir123 8 years ago
parent
commit
6106332baa
  1. 18
      libs/subliminal/services/__init__.py
  2. 183
      libs/subliminal/services/subscenter.py

18
libs/subliminal/services/__init__.py

@ -183,16 +183,21 @@ class ServiceBase(object):
return False
return True
def download_file(self, url, filepath):
def download_file(self, url, filepath, data=None):
"""Attempt to download a file and remove it in case of failure
:param string url: URL to download
:param string filepath: destination path
:param string data: data to add to the post request
"""
logger.info(u'Downloading %s in %s' % (url, filepath))
try:
r = self.session.get(url, timeout = 10, headers = {'Referer': url, 'User-Agent': self.user_agent})
headers = {'Referer': url, 'User-Agent': self.user_agent}
if data:
r = self.session.post(url, data=data, timeout=10, headers=headers)
else:
r = self.session.get(url, timeout=10, headers=headers)
with open(filepath, 'wb') as f:
f.write(r.content)
except Exception as e:
@ -202,18 +207,23 @@ class ServiceBase(object):
raise DownloadFailedError(str(e))
logger.debug(u'Download finished')
def download_zip_file(self, url, filepath):
def download_zip_file(self, url, filepath, data=None):
"""Attempt to download a zip file and extract any subtitle file from it, if any.
This cleans up after itself if anything fails.
:param string url: URL of the zip file to download
:param string filepath: destination path for the subtitle
:param string data: data to add to the post request
"""
logger.info(u'Downloading %s in %s' % (url, filepath))
try:
zippath = filepath + '.zip'
r = self.session.get(url, timeout = 10, headers = {'Referer': url, 'User-Agent': self.user_agent})
headers = {'Referer': url, 'User-Agent': self.user_agent}
if data:
r = self.session.post(url, data=data, timeout=10, headers=headers)
else:
r = self.session.get(url, timeout=10, headers=headers)
with open(zippath, 'wb') as f:
f.write(r.content)
if not zipfile.is_zipfile(zippath):

183
libs/subliminal/services/subscenter.py

@ -16,124 +16,147 @@
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import DownloadFailedError, ServiceError
from ..exceptions import ServiceError
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie
from ..utils import to_unicode, get_keywords
from bs4 import BeautifulSoup
from ..utils import to_unicode
import bisect
import json
import logging
from urllib import urlencode
logger = logging.getLogger(__name__)
class Subscenter(ServiceBase):
server = 'http://www.subscenter.info/he/'
api_based = False
server = 'http://www.cinemast.org/he/cinemast/api/'
api_based = True
languages = language_set(['he'])
videos = [Episode, Movie]
require_video = False
def _search_url_title(self, title, kind):
"""Search the URL title for the given `title`.
:param str title: title to search for.
:param str kind: kind of the title, ``movie`` or ``series``.
:return: the URL version of the title.
:rtype: str or None
"""
# make the search
logger.info('Searching title name for %r', title)
r = self.session.get(self.server + 'subtitle/search/', params={'q': title}, allow_redirects=False, timeout=10)
r.raise_for_status()
default_username = 'subliminal@gmail.com'
default_password = 'subliminal'
# if redirected, get the url title from the Location header
if r.is_redirect:
parts = r.headers['Location'].split('/')
def __init__(self, config=None):
super(Subscenter, self).__init__(config)
self.token = None
self.user_id = None
# check kind
if parts[-3] == kind:
return parts[-2]
def init(self):
super(Subscenter, self).init()
logger.debug('Logging in')
url = self.server_url + 'login/'
return None
# actual login
data = {'username': self.default_username, 'password': self.default_password}
r = self.session.post(url, data=urlencode(data), allow_redirects=False, timeout=10)
# otherwise, get the first valid suggestion
soup = BeautifulSoup(r.content, ['lxml', 'html.parser'])
suggestions = soup.select('#processes div.generalWindowTop a')
logger.debug('Found %d suggestions', len(suggestions))
for suggestion in suggestions:
parts = suggestion.attrs['href'].split('/')
if r.status_code != 200:
raise ServiceError('Login failed')
try:
result = r.json()
if 'token' not in result:
raise ServiceError('Login failed')
# check kind
if parts[-3] == kind:
return parts[-2]
logger.info('Logged in')
self.user_id = r.json().get('user')
self.token = r.json().get('token')
except ValueError:
raise ServiceError('Login failed')
def terminate(self):
super(Subscenter, self).terminate()
if self.token or self.user_id:
logger.info('Logged out')
self.token = None
self.user_id = None
def list_checked(self, video, languages):
series = None
season = None
episode = None
title = video.title
year = video.year
if isinstance(video, Episode):
series = video.series
season = video.season
episode = video.episode
return self.query(video.path or video.release, languages, get_keywords(video.guess), series, season,
episode, title)
return self.query(video.path or video.release, languages, series, season, episode, title, year)
def query(self, filepath, languages=None, keywords=None, series=None, season=None, episode=None, title=None):
def query(self, filepath, languages=None, series=None, season=None, episode=None, title=None, year=None):
logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format(
series, season, episode, languages))
# Set the correct parameters depending on the kind.
if series and season and episode:
url_series = self._search_url_title(series, 'series')
url = self.server + 'cst/data/series/sb/{}/{}/{}/'.format(url_series, season, episode)
query = {
'user': self.user_id,
'token': self.token
}
# episode
if season and episode:
query['q'] = series
query['type'] = 'series'
query['season'] = season
query['episode'] = episode
elif title:
url_title = self._search_url_title(title, 'movie')
url = self.server + 'cst/data/movie/sb/{}/'.format(url_title)
query['q'] = title
query['type'] = 'movies'
if year:
query['year_start'] = year - 1
query['year_end'] = year
else:
raise ServiceError('One or more parameters are missing')
logger.debug('Searching subtitles for title {0}, season {1}, episode {2}'.format(title, season, episode))
response = self.session.get(url)
if response.status_code != 200:
raise ServiceError('Request failed with status code {0}'.format(response.status_code))
# Loop over results.
subtitles = dict()
response_json = json.loads(response.content)
for language_code, language_data in response_json.items():
language_object = self.get_language(language_code)
if language_object in self.languages and language_object in languages:
for quality_data in language_data.values():
for quality, subtitles_data in quality_data.items():
for subtitle_item in subtitles_data.values():
# Read the item.
subtitle_id = subtitle_item['id']
subtitle_key = subtitle_item['key']
subtitle_version = subtitle_item['h_version']
release = subtitle_item['subtitle_version']
subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi)
download_link = self.server_url + 'subtitle/download/{0}/{1}/?v={2}&key={3}'.format(
language_code, subtitle_id, subtitle_version, subtitle_key)
# Add the release and increment downloaded count if we already have the subtitle.
if subtitle_id in subtitles:
logger.debug('Found additional release {0} for subtitle {1}'.format(
release, subtitle_id))
bisect.insort_left(subtitles[subtitle_id].release, release) # Deterministic order.
continue
# Otherwise create it.
subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(),
download_link, release=to_unicode(release))
logger.debug('Found subtitle %r', subtitle)
subtitles[subtitle_id] = subtitle
# get the list of subtitles
logger.debug('Getting the list of subtitles')
url = self.server_url + 'search/'
r = self.session.post(url, data=urlencode(query))
r.raise_for_status()
try:
results = r.json()
except ValueError:
return {}
# loop over results
subtitles = {}
for group_data in results.get('data', []):
for language_code, subtitles_data in group_data.get('subtitles', {}).items():
language_object = self.get_language(language_code)
for subtitle_item in subtitles_data:
# read the item
subtitle_id = subtitle_item['id']
subtitle_key = subtitle_item['key']
release = subtitle_item['version']
subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi)
download_link = self.server_url + 'subtitle/download/{0}/?v={1}&key={2}&sub_id={3}'.format(
language_code, release, subtitle_key, subtitle_id)
# Add the release and increment downloaded count if we already have the subtitle.
if subtitle_id in subtitles:
logger.debug('Found additional release {0} for subtitle {1}'.format(
release, subtitle_id))
bisect.insort_left(subtitles[subtitle_id].release, release) # Deterministic order.
continue
# Otherwise create it.
subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(),
download_link, release=to_unicode(release))
logger.debug('Found subtitle %r', subtitle)
subtitles[subtitle_id] = subtitle
return subtitles.values()
def download(self, subtitle):
try:
self.download_zip_file(subtitle.link, subtitle.path)
except DownloadFailedError:
# If no zip file was retrieved, daily downloads limit has exceeded.
raise ServiceError('Daily limit exceeded')
data = {
'user': self.user_id,
'token': self.token
}
self.download_zip_file(subtitle.link, subtitle.path, data=urlencode(data))
return subtitle

Loading…
Cancel
Save