From ac48077374c693987d3843eb40b4cd1c61820681 Mon Sep 17 00:00:00 2001 From: Prinz23 Date: Thu, 5 Aug 2021 19:00:49 +0100 Subject: [PATCH] Change api interface folders to api_* to fix legacy tmdb_api folder cleanup on new installs. Rename logger use of libtrakt to api_trakt. --- CHANGES.md | 1 + _cleaner.py | 5 + .../interfaces/default/config_notifications.tmpl | 2 +- lib/api_imdb/__init__.py | 0 lib/api_imdb/imdb_api.py | 217 ++++ lib/api_imdb/imdb_exceptions.py | 62 + lib/api_tmdb/__init__.py | 0 lib/api_tmdb/tmdb_api.py | 655 ++++++++++ lib/api_tmdb/tmdb_exceptions.py | 62 + lib/api_trakt/__init__.py | 2 + lib/api_trakt/exceptions.py | 49 + lib/api_trakt/indexerapiinterface.py | 348 ++++++ lib/api_trakt/trakt.py | 381 ++++++ lib/api_tvdb/UNLICENSE | 26 + lib/api_tvdb/__init__.py | 1 + lib/api_tvdb/tvdb_api.py | 1262 ++++++++++++++++++++ lib/api_tvdb/tvdb_cache.py | 251 ++++ lib/api_tvdb/tvdb_exceptions.py | 66 + lib/api_tvdb/tvdb_ui.py | 156 +++ lib/api_tvmaze/__init__.py | 0 lib/api_tvmaze/tvmaze_api.py | 666 +++++++++++ lib/api_tvmaze/tvmaze_exceptions.py | 62 + lib/imdb_api/__init__.py | 0 lib/imdb_api/imdb_api.py | 217 ---- lib/imdb_api/imdb_exceptions.py | 62 - lib/libtrakt/__init__.py | 2 - lib/libtrakt/exceptions.py | 49 - lib/libtrakt/indexerapiinterface.py | 348 ------ lib/libtrakt/trakt.py | 381 ------ lib/tmdb_api/__init__.py | 0 lib/tmdb_api/tmdb_api.py | 655 ---------- lib/tmdb_api/tmdb_exceptions.py | 62 - lib/tvdb_api/UNLICENSE | 26 - lib/tvdb_api/__init__.py | 1 - lib/tvdb_api/tvdb_api.py | 1262 -------------------- lib/tvdb_api/tvdb_cache.py | 251 ---- lib/tvdb_api/tvdb_exceptions.py | 66 - lib/tvdb_api/tvdb_ui.py | 156 --- lib/tvmaze_api/__init__.py | 0 lib/tvmaze_api/tvmaze_api.py | 666 ----------- lib/tvmaze_api/tvmaze_exceptions.py | 62 - sickbeard/__init__.py | 4 +- sickbeard/config.py | 2 +- sickbeard/indexers/indexer_config.py | 10 +- sickbeard/logger.py | 2 +- sickbeard/notifiers/__init__.py | 2 +- sickbeard/notifiers/trakt.py | 2 +- sickbeard/webserve.py | 4 +- 48 files changed, 4286 insertions(+), 4280 deletions(-) create mode 100644 lib/api_imdb/__init__.py create mode 100644 lib/api_imdb/imdb_api.py create mode 100644 lib/api_imdb/imdb_exceptions.py create mode 100644 lib/api_tmdb/__init__.py create mode 100644 lib/api_tmdb/tmdb_api.py create mode 100644 lib/api_tmdb/tmdb_exceptions.py create mode 100644 lib/api_trakt/__init__.py create mode 100644 lib/api_trakt/exceptions.py create mode 100644 lib/api_trakt/indexerapiinterface.py create mode 100644 lib/api_trakt/trakt.py create mode 100644 lib/api_tvdb/UNLICENSE create mode 100644 lib/api_tvdb/__init__.py create mode 100644 lib/api_tvdb/tvdb_api.py create mode 100644 lib/api_tvdb/tvdb_cache.py create mode 100644 lib/api_tvdb/tvdb_exceptions.py create mode 100644 lib/api_tvdb/tvdb_ui.py create mode 100644 lib/api_tvmaze/__init__.py create mode 100644 lib/api_tvmaze/tvmaze_api.py create mode 100644 lib/api_tvmaze/tvmaze_exceptions.py delete mode 100644 lib/imdb_api/__init__.py delete mode 100644 lib/imdb_api/imdb_api.py delete mode 100644 lib/imdb_api/imdb_exceptions.py delete mode 100644 lib/libtrakt/__init__.py delete mode 100644 lib/libtrakt/exceptions.py delete mode 100644 lib/libtrakt/indexerapiinterface.py delete mode 100644 lib/libtrakt/trakt.py delete mode 100644 lib/tmdb_api/__init__.py delete mode 100644 lib/tmdb_api/tmdb_api.py delete mode 100644 lib/tmdb_api/tmdb_exceptions.py delete mode 100644 lib/tvdb_api/UNLICENSE delete mode 100644 lib/tvdb_api/__init__.py delete mode 100644 lib/tvdb_api/tvdb_api.py delete mode 100644 lib/tvdb_api/tvdb_cache.py delete mode 100644 lib/tvdb_api/tvdb_exceptions.py delete mode 100644 lib/tvdb_api/tvdb_ui.py delete mode 100644 lib/tvmaze_api/__init__.py delete mode 100644 lib/tvmaze_api/tvmaze_api.py delete mode 100644 lib/tvmaze_api/tvmaze_exceptions.py diff --git a/CHANGES.md b/CHANGES.md index e87e75e..97eb5e7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -90,6 +90,7 @@ * Change revert all IMDb ids to 7 chars * Fix new unique_name in templates * Fix genre field in tvmaze_api +* Change api interface folders to api_* to fix legacy tmdb_api folder cleanup on new installs ### 0.24.15 (2021-08-05 11:45:00 UTC) diff --git a/_cleaner.py b/_cleaner.py index 4aaa1de..ada93b5 100644 --- a/_cleaner.py +++ b/_cleaner.py @@ -46,6 +46,11 @@ if old_magic != magic_number: # skip cleaned005 as used during dev by testers cleanups = [ + ['.cleaned007.tmp', ('lib', 'tvmaze_api'), [ + ('lib', 'imdb_api', '__pycache__'), ('lib', 'imdb_api'), + ('lib', 'libtrakt', '__pycache__'), ('lib', 'libtrakt'), + ('lib', 'tvdb_api', '__pycache__'), ('lib', 'tvdb_api'), + ('lib', 'tvmaze_api', '__pycache__'), ('lib', 'tvmaze_api')]], ['.cleaned006.tmp', ('lib', 'boto'), [ ('lib', 'boto'), ('lib', 'growl'), ('lib', 'hachoir', 'core'), ('lib', 'hachoir', 'field'), ('lib', 'hachoir', 'metadata'), diff --git a/gui/slick/interfaces/default/config_notifications.tmpl b/gui/slick/interfaces/default/config_notifications.tmpl index 328363a..d141ffa 100644 --- a/gui/slick/interfaces/default/config_notifications.tmpl +++ b/gui/slick/interfaces/default/config_notifications.tmpl @@ -1,7 +1,7 @@ #import base64 #import sickbeard #import re -#from lib.libtrakt import TraktAPI +#from lib.api_trakt import TraktAPI #from sickbeard.helpers import anon_url, starify #from sickbeard.notifiers import NotifierFactory <% def sg_var(varname, default=False): return getattr(sickbeard, varname, default) %>#slurp# diff --git a/lib/api_imdb/__init__.py b/lib/api_imdb/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/api_imdb/imdb_api.py b/lib/api_imdb/imdb_api.py new file mode 100644 index 0000000..8c86514 --- /dev/null +++ b/lib/api_imdb/imdb_api.py @@ -0,0 +1,217 @@ +# encoding:utf-8 +# author:Prinz23 +# project:imdb_api + +__author__ = 'Prinz23' +__version__ = '1.0' +__api_version__ = '1.0.0' + +import logging +import re +from .imdb_exceptions import * +from exceptions_helper import ex +from six import iteritems +from bs4_parser import BS4Parser +from lib import imdbpie +from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound +from lib.tvinfo_base import TVInfoBase, TVINFO_TRAKT, TVINFO_TMDB, TVINFO_TVDB, TVINFO_TVRAGE, TVINFO_IMDB, \ + Person, PersonGenders, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA, TVINFO_INSTAGRAM, Character, TVInfoShow, \ + TVInfoIDs +from sg_helpers import get_url, try_int +from lib.dateutil.parser import parser + +# noinspection PyUnreachableCode +if False: + from typing import Any, AnyStr, Dict, List, Optional, Union + from six import integer_types + +tz_p = parser() +log = logging.getLogger('imdb.api') +log.addHandler(logging.NullHandler()) + + +class IMDbIndexer(TVInfoBase): + # supported_id_searches = [TVINFO_IMDB] + supported_person_id_searches = [TVINFO_IMDB] + supported_id_searches = [TVINFO_IMDB] + + # noinspection PyUnusedLocal + # noinspection PyDefaultArgument + def __init__(self, *args, **kwargs): + super(IMDbIndexer, self).__init__(*args, **kwargs) + + def search(self, series): + # type: (AnyStr) -> List + """This searches for the series name + and returns the result list + """ + result = [] + cache_name_key = 's-title-%s' % series + is_none, shows = self._get_cache_entry(cache_name_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + result = imdbpie.Imdb().search_for_title(series) + except (BaseException, Exception): + pass + self._set_cache_entry(cache_name_key, result, expire=self.search_cache_expire) + else: + result = shows + return result + + def _search_show(self, name=None, ids=None, **kwargs): + # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] + """This searches IMDB for the series name, + """ + def _make_result_dict(s): + imdb_id = try_int(re.search(r'tt(\d+)', s.get('id') or s.get('imdb_id')).group(1), None) + tvs = TVInfoShow() + tvs.seriesname, tvs.id, tvs.firstaired, tvs.genre_list, tvs.overview, tvs.poster, tvs.ids = \ + s['title'], imdb_id, s.get('releaseDetails', {}).get('date') or s.get('year'), s.get('genres'), \ + s.get('plot', {}).get('outline', {}).get('text'), s.get('image') and s['image'].get('url'), \ + TVInfoIDs(imdb=imdb_id) + return tvs + + results = [] + if ids: + for t, p in iteritems(ids): + if t in self.supported_id_searches: + if t == TVINFO_IMDB: + cache_id_key = 's-id-%s-%s' % (TVINFO_IMDB, p) + is_none, shows = self._get_cache_entry(cache_id_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = imdbpie.Imdb().get_title_auxiliary('tt%07d' % p) + except (BaseException, Exception): + continue + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + else: + show = shows + if show: + results.extend([_make_result_dict(show)]) + if name: + for n in ([name], name)[isinstance(name, list)]: + try: + shows = self.search(n) + results.extend([_make_result_dict(s) for s in shows]) + except (BaseException, Exception) as e: + log.debug('Error searching for show: %s' % ex(e)) + seen = set() + results = [seen.add(r.id) or r for r in results if r.id not in seen] + return results + + @staticmethod + def _convert_person(person_obj, filmography=None, bio=None): + if isinstance(person_obj, dict) and 'imdb_id' in person_obj: + imdb_id = try_int(re.search(r'(\d+)', person_obj['imdb_id']).group(1)) + return Person(p_id=imdb_id, name=person_obj['name'], ids={TVINFO_IMDB: imdb_id}) + characters = [] + for known_for in (filmography and filmography['filmography']) or []: + if known_for['titleType'] not in ('tvSeries', 'tvMiniSeries'): + continue + for character in known_for.get('characters') or []: + show = TVInfoShow() + show.id = try_int(re.search(r'(\d+)', known_for.get('id')).group(1)) + show.ids.imdb = show.id + show.seriesname = known_for.get('title') + show.firstaired = known_for.get('year') + characters.append( + Character(name=character, show=show, start_year=known_for.get('startYear'), + end_year=known_for.get('endYear')) + ) + try: + birthdate = person_obj['base']['birthDate'] and tz_p.parse(person_obj['base']['birthDate']).date() + except (BaseException, Exception): + birthdate = None + try: + deathdate = person_obj['base']['deathDate'] and tz_p.parse(person_obj['base']['deathDate']).date() + except (BaseException, Exception): + deathdate = None + imdb_id = try_int(re.search(r'(\d+)', person_obj['id']).group(1)) + return Person(p_id=imdb_id, name=person_obj['base'].get('name'), ids={TVINFO_IMDB: imdb_id}, + gender=PersonGenders.imdb_map.get(person_obj['base'].get('gender'), PersonGenders.unknown), + image=person_obj['base'].get('image', {}).get('url'), + birthplace=person_obj['base'].get('birthPlace'), birthdate=birthdate, deathdate=deathdate, + height=person_obj['base'].get('heightCentimeters'), characters=characters, + deathplace=person_obj['base'].get('deathPlace'), + nicknames=set((person_obj['base'].get('nicknames') and person_obj['base'].get('nicknames')) + or []), + real_name=person_obj['base'].get('realName'), + akas=set((person_obj['base'].get('akas') and person_obj['base'].get('akas')) or []), bio=bio + ) + + def _search_person(self, name=None, ids=None): + # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] + """ + search for person by name + :param name: name to search for + :param ids: dict of ids to search + :return: list of found person's + """ + results, ids = [], ids or {} + for tv_src in self.supported_person_id_searches: + if tv_src in ids: + if TVINFO_IMDB == tv_src: + try: + p = self.get_person(ids[tv_src]) + except (BaseException, Exception): + p = None + if p: + results.append(p) + if name: + cache_name_key = 'p-name-%s' % name + is_none, ps = self._get_cache_entry(cache_name_key) + if None is ps and not is_none: + try: + ps = imdbpie.Imdb().search_for_name(name) + except (BaseException, Exception): + ps = None + self._set_cache_entry(cache_name_key, ps) + if ps: + for cp in ps: + if not any(1 for c in results if cp['imdb_id'] == 'nm%07d' % c.id): + results.append(self._convert_person(cp)) + return results + + def _get_bio(self, p_id): + try: + bio = get_url('https://www.imdb.com/name/nm%07d/bio' % p_id, headers={'Accept-Language': 'en'}) + if not bio: + return + with BS4Parser(bio) as bio_item: + bv = bio_item.find(string='Mini Bio', recursive=True).find_next('p') + for a in bv.findAll('a'): + a.replaceWithChildren() + for b in bv.findAll('br'): + b.replaceWith('\n') + return bv.get_text().strip() + except (BaseException, Exception): + return + + def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): + # type: (integer_types, bool, bool, Any) -> Optional[Person] + if not p_id: + return + cache_main_key, cache_bio_key, cache_credits_key = 'p-main-%s' % p_id, 'p-bio-%s' % p_id, 'p-credits-%s' % p_id + is_none, p = self._get_cache_entry(cache_main_key) + if None is p and not is_none: + try: + p = imdbpie.Imdb().get_name(imdb_id='nm%07d' % p_id) + except (BaseException, Exception): + p = None + self._set_cache_entry(cache_main_key, p) + is_none, bio = self._get_cache_entry(cache_bio_key) + if None is bio and not is_none: + bio = self._get_bio(p_id) + self._set_cache_entry(cache_bio_key, bio) + fg = None + if get_show_credits: + is_none, fg = self._get_cache_entry(cache_credits_key) + if None is fg and not is_none: + try: + fg = imdbpie.Imdb().get_name_filmography(imdb_id='nm%07d' % p_id) + except (BaseException, Exception): + fg = None + self._set_cache_entry(cache_credits_key, fg) + if p: + return self._convert_person(p, filmography=fg, bio=bio) + diff --git a/lib/api_imdb/imdb_exceptions.py b/lib/api_imdb/imdb_exceptions.py new file mode 100644 index 0000000..eaf267a --- /dev/null +++ b/lib/api_imdb/imdb_exceptions.py @@ -0,0 +1,62 @@ +# encoding:utf-8 + +"""Custom exceptions used or raised by tvmaze_api +""" + +__author__ = 'Prinz23' +__version__ = '1.0' + +__all__ = ['IMDbException', 'IMDbError', 'IMDbUserabort', 'IMDbShownotfound', + 'IMDbSeasonnotfound', 'IMDbEpisodenotfound', 'IMDbAttributenotfound', 'IMDbTokenexpired'] + +from lib.tvinfo_base.exceptions import * + + +class IMDbException(BaseTVinfoException): + """Any exception generated by tvdb_api + """ + pass + + +class IMDbError(BaseTVinfoError, IMDbException): + """An error with thetvdb.com (Cannot connect, for example) + """ + pass + + +class IMDbUserabort(BaseTVinfoUserabort, IMDbError): + """User aborted the interactive selection (via + the q command, ^c etc) + """ + pass + + +class IMDbShownotfound(BaseTVinfoShownotfound, IMDbError): + """Show cannot be found on thetvdb.com (non-existant show) + """ + pass + + +class IMDbSeasonnotfound(BaseTVinfoSeasonnotfound, IMDbError): + """Season cannot be found on thetvdb.com + """ + pass + + +class IMDbEpisodenotfound(BaseTVinfoEpisodenotfound, IMDbError): + """Episode cannot be found on thetvdb.com + """ + pass + + +class IMDbAttributenotfound(BaseTVinfoAttributenotfound, IMDbError): + """Raised if an episode does not have the requested + attribute (such as a episode name) + """ + pass + + +class IMDbTokenexpired(BaseTVinfoAuthenticationerror, IMDbError): + """token expired or missing thetvdb.com + """ + pass diff --git a/lib/api_tmdb/__init__.py b/lib/api_tmdb/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/api_tmdb/tmdb_api.py b/lib/api_tmdb/tmdb_api.py new file mode 100644 index 0000000..ec7b78f --- /dev/null +++ b/lib/api_tmdb/tmdb_api.py @@ -0,0 +1,655 @@ +# encoding:utf-8 +# author:Prinz23 +# project:tmdb_api + +__author__ = 'Prinz23' +__version__ = '1.0' +__api_version__ = '1.0.0' + +import json +import logging +import datetime +import re + +from six import iteritems +from sg_helpers import get_url, try_int +from lib.dateutil.parser import parser +from lib.dateutil.tz.tz import _datetime_to_timestamp +from lib.exceptions_helper import ConnectionSkipException, ex +from .tmdb_exceptions import * +from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImageType, Character, Crew, \ + crew_type_names, Person, RoleTypes, TVInfoEpisode, TVInfoIDs, TVInfoSeason, PersonGenders, TVINFO_TVMAZE, \ + TVINFO_TVDB, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TWITTER, TVINFO_INSTAGRAM, TVINFO_FACEBOOK, TVInfoShow, \ + TVInfoSocialIDs, TVInfoNetwork +from lib import tmdbsimple + +# noinspection PyUnreachableCode +if False: + from typing import Any, AnyStr, Dict, List, Optional, Union + from six import integer_types + +log = logging.getLogger('tmdb.api') +log.addHandler(logging.NullHandler()) +tz_p = parser() +tmdbsimple.API_KEY = 'edc5f123313769de83a71e157758030b' + +id_map = {TVINFO_IMDB: 'imdb_id', TVINFO_TVDB: 'tvdb_id', TVINFO_FACEBOOK: 'facebook_id', TVINFO_TWITTER: 'twitter_id', + TVINFO_INSTAGRAM: 'instagram_id'} + +tv_show_map = {'name': 'seriesname', 'id': 'id', 'first_air_date': 'firstaired', 'status': 'status', + 'original_language': 'language'} + + +def tmdb_GET(self, path, params=None): + url = self._get_complete_url(path) + params = self._get_params(params) + return get_url(url=url, params=params, json=True, raise_skip_exception=True) + + +def tmdb_POST(self, path, params=None, payload=None): + url = self._get_complete_url(path) + params = self._get_params(params) + data = json.dumps(payload) if payload else payload + return get_url(url=url, params=params, post_data=data, json=True, raise_skip_exception=True) + + +tmdbsimple.base.TMDB._GET = tmdb_GET +tmdbsimple.base.TMDB._POST = tmdb_POST + +_TMDB_CONSTANTS_CACHE = {'date': datetime.datetime(2000, 1, 1), 'data': {}} + + +def get_tmdb_constants(): + # type: (...) -> Dict + """return tmdbsimple Configuration().info() or cached copy""" + global _TMDB_CONSTANTS_CACHE + # only retrieve info data if older then 3 days + if 3 < (datetime.datetime.now() - _TMDB_CONSTANTS_CACHE['date']).days or not _TMDB_CONSTANTS_CACHE['data']: + try: + tv_genres = {g['id']: g['name'] for g in tmdbsimple.Genres().tv_list()['genres']} + response = tmdbsimple.Configuration().info() + sorted_poster_sizes = sorted((try_int(_p.replace('w', '')) for _p in response['images']['poster_sizes'] + if 'original' != _p), reverse=True) + sorted_backdrop_sizes = sorted((try_int(_p.replace('w', '')) for _p in response['images']['backdrop_sizes'] + if 'original' != _p), reverse=True) + sorted_profile_sizes = sorted((try_int(_p.replace('w', '')) for _p in response['images']['profile_sizes'] + if 'original' != _p and not _p.startswith('h')), reverse=True) + _TMDB_CONSTANTS_CACHE = { + 'date': datetime.datetime.now(), + 'data': { + 'genres': tv_genres, + 'img_base_url': response['images']['secure_base_url'], + 'img_profile_sizes': response['images']['profile_sizes'], + 'poster_sizes': response['images']['poster_sizes'], + 'backdrop_sizes': response['images']['backdrop_sizes'], + 'logo_sizes': response['images']['logo_sizes'], + 'still_sizes': response['images']['still_sizes'], + 'change_keys': response['change_keys'], + 'size_map': { + TVInfoImageType.poster: { + TVInfoImageSize.original: 'original', + TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_poster_sizes if s < 400), 342), + TVInfoImageSize.small: 'w%s' % next((s for s in sorted_poster_sizes if s < 200), 185) + }, + TVInfoImageType.fanart: { + TVInfoImageSize.original: 'original', + TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 1000), 780), + TVInfoImageSize.small: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 500), 300) + }, + TVInfoImageType.person_poster: { + TVInfoImageSize.original: 'original', + TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_profile_sizes if s < 400), 185), + TVInfoImageSize.small: 'w%s' % next((s for s in sorted_profile_sizes if s < 150), 45) + } + } + } + } + except (BaseException, Exception): + poster_sizes = ['w92', 'w154', 'w185', 'w342', 'w500', 'w780', 'original'] + sorted_poster_sizes = sorted((try_int(_p.replace('w', '')) for _p in poster_sizes + if 'original' != _p), reverse=True) + backdrop_sizes = ['w300', 'w780', 'w1280', 'original'] + sorted_backdrop_sizes = sorted((try_int(_p.replace('w', '')) for _p in backdrop_sizes + if 'original' != _p), reverse=True) + profile_sizes = ['w45', 'w185', 'h632', 'original'] + sorted_profile_sizes = sorted((try_int(_p.replace('w', '')) for _p in profile_sizes + if 'original' != _p and not _p.startswith('h')), reverse=True) + _TMDB_CONSTANTS_CACHE['data'] = { + 'genres': {10759: 'Action & Adventure', 16: 'Animation', 35: 'Comedy', 80: 'Crime', 99: 'Documentary', + 18: 'Drama', 10751: 'Family', 10762: 'Kids', 9648: 'Mystery', 10763: 'News', + 10764: 'Reality', 10765: 'Sci-Fi & Fantasy', 10766: 'Soap', 10767: 'Talk', + 10768: 'War & Politics', 37: 'Western'}, + 'img_base_url': r'https://image.tmdb.org/t/p/', + 'img_profile_sizes': ['w45', 'w185', 'h632', 'original'], + 'poster_sizes': poster_sizes, + 'backdrop_sizes': backdrop_sizes, + 'logo_sizes': ['w45', 'w92', 'w154', 'w185', 'w300', 'w500', 'original'], + 'still_sizes': ['w92', 'w185', 'w300', 'original'], + 'change_keys': ['adult', 'air_date', 'also_known_as', 'alternative_titles', 'biography', 'birthday', + 'budget', 'cast', 'certifications', 'character_names', 'created_by', 'crew', 'deathday', + 'episode', 'episode_number', 'episode_run_time', 'freebase_id', 'freebase_mid', + 'general', 'genres', 'guest_stars', 'homepage', 'images', 'imdb_id', 'languages', + 'name', 'network', 'origin_country', 'original_name', 'original_title', 'overview', + 'parts', 'place_of_birth', 'plot_keywords', 'production_code', 'production_companies', + 'production_countries', 'releases', 'revenue', 'runtime', 'season', 'season_number', + 'season_regular', 'spoken_languages', 'status', 'tagline', 'title', 'translations', + 'tvdb_id', 'tvrage_id', 'type', 'video', 'videos'], + 'size_map': { + TVInfoImageType.poster: { + TVInfoImageSize.original: 'original', + TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_poster_sizes if s < 400), 342), + TVInfoImageSize.small: 'w%s' % next((s for s in sorted_poster_sizes if s < 200), 185) + }, + TVInfoImageType.fanart: { + TVInfoImageSize.original: 'original', + TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 1000), 780), + TVInfoImageSize.small: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 500), 300) + }, + TVInfoImageType.person_poster: { + TVInfoImageSize.original: 'original', + TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_profile_sizes if s < 400), 185), + TVInfoImageSize.small: 'w%s' % next((s for s in sorted_profile_sizes if s < 150), 45) + } + } + } + pass + return _TMDB_CONSTANTS_CACHE['data'] + + +class TmdbIndexer(TVInfoBase): + API_KEY = tmdbsimple.API_KEY + supported_person_id_searches = [TVINFO_TMDB, TVINFO_IMDB, TVINFO_TWITTER, TVINFO_INSTAGRAM, TVINFO_FACEBOOK] + supported_id_searches = [TVINFO_TMDB, TVINFO_IMDB, TVINFO_TVDB] + + # noinspection PyUnusedLocal + # noinspection PyDefaultArgument + def __init__(self, *args, **kwargs): + super(TmdbIndexer, self).__init__(*args, **kwargs) + response = get_tmdb_constants() + self.img_base_url = response.get('img_base_url') + self.size_map = response.get('size_map') + self.tv_genres = response.get('genres') + + def _search_show(self, name=None, ids=None, **kwargs): + # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] + """This searches TMDB for the series name, + """ + def _make_result_dict(s): + tvs = TVInfoShow() + tvs.seriesname, tvs.id, tvs.firstaired, tvs.genre_list, tvs.overview, tvs.poster, tvs.ids = \ + s['name'], s['id'], s.get('first_air_date'), \ + [self.tv_genres.get(g) for g in s.get('genre_ids') or []], \ + s.get('overview'), s.get('poster_path') and '%s%s%s' % ( + self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original], + s.get('poster_path')), \ + TVInfoIDs(tvdb=s.get('external_ids') and s['external_ids'].get('tvdb_id'), + tmdb=s['id'], rage=s.get('external_ids') and s['external_ids'].get('tvrage_id'), + imdb=s.get('external_ids') and s['external_ids'].get('imdb_id') and + try_int(s['external_ids'].get('imdb_id', '').replace('tt', ''), None)) + return tvs + + results = [] + if ids: + for t, p in iteritems(ids): + if t in self.supported_id_searches: + if t == TVINFO_TMDB: + cache_id_key = 's-id-%s-%s' % (TVINFO_TMDB, p) + is_none, shows = self._get_cache_entry(cache_id_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = tmdbsimple.TV(id=p).info(append_to_response='external_ids') + except (BaseException, Exception): + continue + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + else: + show = shows + if show: + results.extend([_make_result_dict(show)]) + elif t in (TVINFO_IMDB, TVINFO_TVDB): + cache_id_key = 's-id-%s-%s' % (t, p) + is_none, shows = self._get_cache_entry(cache_id_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = tmdbsimple.Find(id=(p, 'tt%07d' % p)[t == TVINFO_IMDB]).info( + external_source=id_map[t]) + if show.get('tv_results') and 1 == len(show['tv_results']): + show = tmdbsimple.TV(id=show['tv_results'][0]['id']).info( + append_to_response='external_ids') + except (BaseException, Exception): + continue + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + else: + show = shows + if show: + results.extend([_make_result_dict(s) + for s in show.get('tv_results') or (show.get('id') and [show]) or []]) + if name: + for n in ([name], name)[isinstance(name, list)]: + cache_name_key = 's-name-%s' % n + is_none, shows = self._get_cache_entry(cache_name_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + shows = tmdbsimple.Search().tv(query=n) + self._set_cache_entry(cache_name_key, shows, expire=self.search_cache_expire) + results.extend([_make_result_dict(s) for s in shows.get('results') or []]) + except (BaseException, Exception) as e: + log.debug('Error searching for show: %s' % ex(e)) + else: + results.extend([_make_result_dict(s) for s in (shows and shows.get('results')) or []]) + seen = set() + results = [seen.add(r.id) or r for r in results if r.id not in seen] + return results + + def _convert_person_obj(self, person_obj): + gender = PersonGenders.tmdb_map.get(person_obj.get('gender'), PersonGenders.unknown) + try: + birthdate = person_obj.get('birthday') and tz_p.parse(person_obj.get('birthday')).date() + except (BaseException, Exception): + birthdate = None + try: + deathdate = person_obj.get('deathday') and tz_p.parse(person_obj.get('deathday')).date() + except (BaseException, Exception): + deathdate = None + + cast = person_obj.get('cast') or person_obj.get('tv_credits', {}).get('cast') + + characters = [] + for character in cast or []: + show = TVInfoShow() + show.id = character.get('id') + show.ids = TVInfoIDs(ids={TVINFO_TMDB: show.id}) + show.seriesname = character.get('original_name') + show.overview = character.get('overview') + show.firstaired = character.get('first_air_date') + characters.append( + Character(name=character.get('character'), show=show) + ) + + pi = person_obj.get('images') + image_url, main_image, thumb_url, main_thumb, image_list = None, None, None, None, [] + if pi: + for i in sorted(pi['profiles'], key=lambda a: a['vote_average'] or 0, reverse=True): + if not any((main_image, main_thumb)): + if 500 < i['height'] and not image_url: + image_url = '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original], + i['file_path']) + thumb_url = '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.medium], + i['file_path']) + elif not thumb_url: + thumb_url = '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original], + i['file_path']) + if image_url and thumb_url: + main_image_url, main_thumb = image_url, thumb_url + image_list.append( + TVInfoImage( + image_type=TVInfoImageType.person_poster, + sizes={_s: '%s%s%s' % (self.img_base_url, + self.size_map[TVInfoImageType.person_poster][_s], i['file_path']) + for _s in (TVInfoImageSize.original, TVInfoImageSize.medium, TVInfoImageSize.small)}, + aspect_ratio=i['aspect_ratio'], + height=i['height'], + width=i['width'], + lang=i['iso_639_1'], + rating=i['vote_average'], + votes=i['vote_count'] + ) + ) + + return Person(p_id=person_obj.get('id'), gender=gender, name=person_obj.get('name'), birthdate=birthdate, + deathdate=deathdate, bio=person_obj.get('biography'), birthplace=person_obj.get('place_of_birth'), + homepage=person_obj.get('homepage'), characters=characters, image=main_image, + thumb_url=main_thumb, images=image_list, akas=set(person_obj.get('also_known_as') or []), + ids={TVINFO_TMDB: person_obj.get('id'), + TVINFO_IMDB: + person_obj.get('imdb_id') and try_int(person_obj['imdb_id'].replace('nm', ''), None)}) + + def _search_person(self, name=None, ids=None): + # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] + """ + search for person by name + :param name: name to search for + :param ids: dict of ids to search + :return: list of found person's + """ + results, ids = [], ids or {} + search_text_obj = tmdbsimple.Search() + for tv_src in self.supported_person_id_searches: + if tv_src in ids: + if TVINFO_TMDB == tv_src: + try: + people_obj = self.get_person(ids[tv_src]) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + people_obj = None + if people_obj and not any(1 for r in results if r.id == people_obj.id): + results.append(people_obj) + elif tv_src in (TVINFO_IMDB, TVINFO_TMDB): + try: + cache_key_name = 'p-src-%s-%s' % (tv_src, ids.get(tv_src)) + is_none, result_objs = self._get_cache_entry(cache_key_name) + if None is result_objs and not is_none: + result_objs = tmdbsimple.Find(id=(ids.get(tv_src), + 'nm%07d' % ids.get(tv_src))[TVINFO_IMDB == tv_src]).info( + external_source=id_map[tv_src]).get('person_results') + self._set_cache_entry(cache_key_name, result_objs) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + result_objs = None + if result_objs: + for person_obj in result_objs: + if not any(1 for r in results if r.id == person_obj['id']): + results.append(self._convert_person_obj(person_obj)) + else: + continue + if name: + cache_key_name = 'p-src-text-%s' % name + is_none, people_objs = self._get_cache_entry(cache_key_name) + if None is people_objs and not is_none: + try: + people_objs = search_text_obj.person(query=name, include_adult=True) + self._set_cache_entry(cache_key_name, people_objs) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + people_objs = None + if people_objs and people_objs.get('results'): + for person_obj in people_objs['results']: + if not any(1 for r in results if r.id == person_obj['id']): + results.append(self._convert_person_obj(person_obj)) + + return results + + def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): + # type: (integer_types, bool, bool, Any) -> Optional[Person] + kw = {} + to_append = [] + if get_show_credits: + to_append.append('tv_credits') + if get_images: + to_append.append('images') + if to_append: + kw['append_to_response'] = ','.join(to_append) + + cache_key_name = 'p-%s-%s' % (p_id, '-'.join(to_append)) + is_none, people_obj = self._get_cache_entry(cache_key_name) + if None is people_obj and not is_none: + try: + people_obj = tmdbsimple.People(id=p_id).info(**kw) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + people_obj = None + self._set_cache_entry(cache_key_name, people_obj) + + if people_obj: + return self._convert_person_obj(people_obj) + + def _convert_show(self, show_dict): + # type: (Dict) -> TVInfoShow + tv_s = TVInfoShow() + if show_dict: + tv_s.seriesname = show_dict.get('name') or show_dict.get('original_name') or show_dict.get('original_title') + org_title = show_dict.get('original_name') or show_dict.get('original_title') + if org_title != tv_s.seriesname: + tv_s.aliases = [org_title] + tv_s.id = show_dict.get('id') + tv_s.seriesid = tv_s.id + tv_s.language = show_dict.get('original_language') + tv_s.overview = show_dict.get('overview') + tv_s.firstaired = show_dict.get('first_air_date') + tv_s.vote_count = show_dict.get('vote_count') + tv_s.vote_average = show_dict.get('vote_average') + tv_s.popularity = show_dict.get('popularity') + tv_s.origin_countries = show_dict.get('origin_country') or [] + tv_s.genre_list = [] + for g in show_dict.get('genre_ids') or []: + if g in self.tv_genres: + tv_s.genre_list.append(self.tv_genres.get(g)) + tv_s.genre = ', '.join(tv_s.genre_list) + image_url = show_dict.get('poster_path') and '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original], + show_dict.get('poster_path')) + thumb_image_url = show_dict.get('poster_path') and '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.small], + show_dict.get('poster_path')) + backdrop_url = show_dict.get('backdrop_path') and '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.fanart][TVInfoImageSize.original], + show_dict.get('backdrop_path')) + tv_s.poster = image_url + tv_s.poster_thumb = thumb_image_url + tv_s.fanart = backdrop_url + tv_s.ids = TVInfoIDs(tmdb=tv_s.id) + return tv_s + + def _get_show_list(self, src_method, result_count, **kwargs): + result = [] + try: + c_page = 1 + while len(result) < result_count: + results = src_method(page=c_page, **kwargs) + t_pages = results.get('total_pages') + if c_page != results.get('page') or c_page >= t_pages: + break + c_page += 1 + if results and 'results' in results: + result += [self._convert_show(t) for t in results['results']] + else: + break + except (BaseException, Exception): + pass + return result[:result_count] + + def get_trending(self, result_count=100, time_window='day', **kwargs): + """ + list of trending tv shows for day or week + :param result_count: + :param time_window: valid values: 'day', 'week' + """ + t_windows = ('day', 'week')['week' == time_window] + return self._get_show_list(tmdbsimple.Trending(media_type='tv', time_window=t_windows).info, result_count) + + def get_popular(self, result_count=100, **kwargs): + return self._get_show_list(tmdbsimple.TV().popular, result_count) + + def get_top_rated(self, result_count=100, **kwargs): + return self._get_show_list(tmdbsimple.TV().top_rated, result_count) + + def discover(self, result_count=100, **kwargs): + """ + Discover TV shows by different types of data like average rating, + number of votes, genres, the network they aired on and air dates. + + Discover also supports a nice list of sort options. See below for all + of the available options. + + Also note that a number of filters support being comma (,) or pipe (|) + separated. Comma's are treated like an AND and query while pipe's are + an OR. + + Some examples of what can be done with discover can be found at + https://www.themoviedb.org/documentation/api/discover. + + kwargs: + language: (optional) ISO 639-1 code. + sort_by: (optional) Available options are 'vote_average.desc', + 'vote_average.asc', 'first_air_date.desc', + 'first_air_date.asc', 'popularity.desc', 'popularity.asc' + sort_by: (optional) Allowed values: vote_average.desc, + vote_average.asc, first_air_date.desc, first_air_date.asc, + popularity.desc, popularity.asc + Default: popularity.desc + air_date.gte: (optional) Filter and only include TV shows that have + a air date (by looking at all episodes) that is greater or + equal to the specified value. + air_date.lte: (optional) Filter and only include TV shows that have + a air date (by looking at all episodes) that is less than or + equal to the specified value. + first_air_date.gte: (optional) Filter and only include TV shows + that have a original air date that is greater or equal to the + specified value. Can be used in conjunction with the + "include_null_first_air_dates" filter if you want to include + items with no air date. + first_air_date.lte: (optional) Filter and only include TV shows + that have a original air date that is less than or equal to the + specified value. Can be used in conjunction with the + "include_null_first_air_dates" filter if you want to include + items with no air date. + first_air_date_year: (optional) Filter and only include TV shows + that have a original air date year that equal to the specified + value. Can be used in conjunction with the + "include_null_first_air_dates" filter if you want to include + items with no air date. + timezone: (optional) Used in conjunction with the air_date.gte/lte + filter to calculate the proper UTC offset. Default + America/New_York. + vote_average.gte: (optional) Filter and only include movies that + have a rating that is greater or equal to the specified value. + Minimum 0. + vote_count.gte: (optional) Filter and only include movies that have + a rating that is less than or equal to the specified value. + Minimum 0. + with_genres: (optional) Comma separated value of genre ids that you + want to include in the results. + with_networks: (optional) Comma separated value of network ids that + you want to include in the results. + without_genres: (optional) Comma separated value of genre ids that + you want to exclude from the results. + with_runtime.gte: (optional) Filter and only include TV shows with + an episode runtime that is greater than or equal to a value. + with_runtime.lte: (optional) Filter and only include TV shows with + an episode runtime that is less than or equal to a value. + include_null_first_air_dates: (optional) Use this filter to include + TV shows that don't have an air date while using any of the + "first_air_date" filters. + with_original_language: (optional) Specify an ISO 639-1 string to + filter results by their original language value. + without_keywords: (optional) Exclude items with certain keywords. + You can comma and pipe seperate these values to create an 'AND' + or 'OR' logic. + screened_theatrically: (optional) Filter results to include items + that have been screened theatrically. + with_companies: (optional) A comma separated list of production + company ID's. Only include movies that have one of the ID's + added as a production company. + with_keywords: (optional) A comma separated list of keyword ID's. + Only includes TV shows that have one of the ID's added as a + keyword. + + :param result_count: + """ + return self._get_show_list(tmdbsimple.Discover().tv, result_count, **kwargs) + + def _get_show_data(self, sid, language, get_ep_info=False, banners=False, posters=False, seasons=False, + seasonwides=False, fanart=False, actors=False, **kwargs): + # type: (integer_types, AnyStr, bool, bool, bool, bool, bool, bool, bool, Optional[Any]) -> bool + # note: this is only working for images fetching currently + self.show_not_found = False + to_append = ['external_ids', 'alternative_titles', 'content_ratings'] + if any((banners, posters, seasons, seasonwides, fanart)): + to_append.append('images') + if actors: + to_append.append('aggregate_credits') + if get_ep_info: + to_append.append('episode_groups') + try: + tmdb = tmdbsimple.TV(sid) + show_data = tmdb.info(append_to_response=','.join(to_append)) + except (BaseException, Exception): + self.show_not_found = True + return False + + if not show_data: + self.show_not_found = True + return False + + self._set_show_data(sid, 'seriesid', show_data['id']) + + runtime = None + for r in sorted(show_data['episode_run_time'], reverse=True): + if 40 < r < 50: + runtime = r + break + if 20 < r < 40: + runtime = r + break + if not runtime and show_data['episode_run_time']: + runtime = max(show_data['episode_run_time'] or [0]) or None + self._set_show_data(sid, 'runtime', runtime) + + image_url = show_data.get('poster_path') and '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original], + show_data.get('poster_path')) + if image_url: + self._set_show_data(sid, 'poster', image_url) + thumb_image_url = show_data.get('poster_path') and '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.small], + show_data.get('poster_path')) + self._set_show_data(sid, 'poster_thumb', thumb_image_url) + + backdrop_url = show_data.get('backdrop_path') and '%s%s%s' % \ + (self.img_base_url, self.size_map[TVInfoImageType.fanart][TVInfoImageSize.original], + show_data.get('backdrop_path')) + if backdrop_url: + self._set_show_data(sid, 'fanart', backdrop_url) + + self.shows[sid].genre_list = [] + for g in show_data.get('genre_ids') or []: + if g in self.tv_genres: + self.shows[sid].genre_list.append(self.tv_genres.get(g)) + self._set_show_data(sid, 'genre', ', '.join(self.shows[sid].genre_list)) + + self.shows[sid].networks = [ + TVInfoNetwork(name=n.get('name'), n_id=n.get('id'), country_code=n.get('origin_country')) + for n in show_data['networks'] or [] + ] + + if show_data['networks']: + self.shows[sid].network = show_data['networks'][0]['name'] + self.shows[sid].network_id = show_data['networks'][0].get('id') + self.shows[sid].network_country_code = show_data['networks'][0].get('origin_country') + + for k, v in iteritems(show_data): + if k in tv_show_map: + self._set_show_data(sid, tv_show_map.get(k, k), v) + + self._set_show_data(sid, 'ids', + TVInfoIDs( + tvdb=show_data['external_ids'].get('tvdb_id'), + tmdb=show_data['id'], + rage=show_data['external_ids'].get('tvrage_id'), + imdb=show_data['external_ids'].get('imdb_id') + and try_int(show_data['external_ids'].get('imdb_id', '').replace('tt', ''), None))) + self._set_show_data(sid, 'social_ids', + TVInfoSocialIDs(twitter=show_data['external_ids'].get('twitter_id'), + instagram=show_data['external_ids'].get('instagram_id'), + facebook=show_data['external_ids'].get('facebook_id'))) + if 'images' in show_data: + show_obj = self.shows[sid] # type: TVInfoShow + show_obj.poster_loaded = True + show_obj.banner_loaded = True + show_obj.fanart_loaded = True + for img_type, img_list in iteritems(show_data['images']): + img_type = {'backdrops': TVInfoImageType.fanart, 'posters': TVInfoImageType.poster}.get(img_type) + for img in img_list: + show_obj.images.setdefault(img_type, []).append( + TVInfoImage( + image_type=img_type, + sizes={ + t_s: '%s%s%s' % (self.img_base_url, self.size_map[img_type][t_s], img['file_path']) + for t_s in [TVInfoImageSize.original, TVInfoImageSize.medium, TVInfoImageSize.small] + }, + rating=img['vote_average'], + votes=img['vote_count'], + lang=img['iso_639_1'], + height=img['height'], + width=img['width'], + aspect_ratio=img['aspect_ratio'] + ) + ) + + return True diff --git a/lib/api_tmdb/tmdb_exceptions.py b/lib/api_tmdb/tmdb_exceptions.py new file mode 100644 index 0000000..773a8b5 --- /dev/null +++ b/lib/api_tmdb/tmdb_exceptions.py @@ -0,0 +1,62 @@ +# encoding:utf-8 + +"""Custom exceptions used or raised by tmdb_api +""" + +__author__ = 'Prinz23' +__version__ = '1.0' + +__all__ = ['TmdbException', 'TmdbError', 'TmdbUserabort', 'TmdbShownotfound', + 'TmdbSeasonnotfound', 'TmdbEpisodenotfound', 'TmdbAttributenotfound', 'TmdbTokenexpired'] + +from lib.tvinfo_base.exceptions import * + + +class TmdbException(BaseTVinfoException): + """Any exception generated by tvdb_api + """ + pass + + +class TmdbError(BaseTVinfoError, TmdbException): + """An error with thetvdb.com (Cannot connect, for example) + """ + pass + + +class TmdbUserabort(BaseTVinfoUserabort, TmdbError): + """User aborted the interactive selection (via + the q command, ^c etc) + """ + pass + + +class TmdbShownotfound(BaseTVinfoShownotfound, TmdbError): + """Show cannot be found on thetvdb.com (non-existant show) + """ + pass + + +class TmdbSeasonnotfound(BaseTVinfoSeasonnotfound, TmdbError): + """Season cannot be found on thetvdb.com + """ + pass + + +class TmdbEpisodenotfound(BaseTVinfoEpisodenotfound, TmdbError): + """Episode cannot be found on thetvdb.com + """ + pass + + +class TmdbAttributenotfound(BaseTVinfoAttributenotfound, TmdbError): + """Raised if an episode does not have the requested + attribute (such as a episode name) + """ + pass + + +class TmdbTokenexpired(BaseTVinfoAuthenticationerror, TmdbError): + """token expired or missing thetvdb.com + """ + pass diff --git a/lib/api_trakt/__init__.py b/lib/api_trakt/__init__.py new file mode 100644 index 0000000..5255ce3 --- /dev/null +++ b/lib/api_trakt/__init__.py @@ -0,0 +1,2 @@ +from .trakt import TraktAPI +from .indexerapiinterface import TraktIndexer diff --git a/lib/api_trakt/exceptions.py b/lib/api_trakt/exceptions.py new file mode 100644 index 0000000..9e330c6 --- /dev/null +++ b/lib/api_trakt/exceptions.py @@ -0,0 +1,49 @@ +class TraktException(Exception): + pass + + +class TraktAuthException(TraktException): + pass + + +class TraktServerBusy(TraktException): + pass + + +class TraktShowNotFound(TraktException): + pass + + +class TraktCloudFlareException(TraktException): + pass + + +class TraktMethodNotExisting(TraktException): + pass + + +class TraktTimeout(TraktException): + pass + + +class TraktValueError(TraktException): + pass + + +class TraktServerError(TraktException): + def __init__(self, *args, **kwargs): + self.error_code = kwargs.get('error_code') + kwargs = {} + if 0 < len(args): + args = tuple(['%s, Server Error: %s' % (args[0], self.error_code)]) + else: + args = tuple(['Server Error: %s' % self.error_code]) + super(TraktServerError, self).__init__(*args, **kwargs) + + +class TraktLockedUserAccount(TraktException): + pass + + +class TraktInvalidGrant(TraktException): + pass diff --git a/lib/api_trakt/indexerapiinterface.py b/lib/api_trakt/indexerapiinterface.py new file mode 100644 index 0000000..afcfc95 --- /dev/null +++ b/lib/api_trakt/indexerapiinterface.py @@ -0,0 +1,348 @@ +import logging +import re +from .exceptions import TraktException +from exceptions_helper import ConnectionSkipException, ex +from six import iteritems +from .trakt import TraktAPI +from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound +from lib.tvinfo_base import TVInfoBase, TVINFO_TRAKT, TVINFO_TMDB, TVINFO_TVDB, TVINFO_TVRAGE, TVINFO_IMDB, \ + TVINFO_SLUG, Person, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA, TVINFO_INSTAGRAM, Character, TVInfoShow, \ + TVInfoIDs, TVINFO_TRAKT_SLUG +from sg_helpers import try_int +from lib.dateutil.parser import parser + +# noinspection PyUnreachableCode +if False: + from typing import Any, AnyStr, Dict, List, Optional, Union + from six import integer_types + +id_map = { + 'trakt': TVINFO_TRAKT, + 'slug': TVINFO_SLUG, + 'tvdb': TVINFO_TVDB, + 'imdb': TVINFO_IMDB, + 'tmdb': TVINFO_TMDB, + 'tvrage': TVINFO_TVRAGE +} + +id_map_reverse = {v: k for k, v in iteritems(id_map)} + +tz_p = parser() +log = logging.getLogger('api_trakt.api') +log.addHandler(logging.NullHandler()) + + +def _convert_imdb_id(src, s_id): + if TVINFO_IMDB == src: + try: + return try_int(re.search(r'(\d+)', s_id).group(1), s_id) + except (BaseException, Exception): + pass + return s_id + + +class TraktSearchTypes(object): + text = 1 + trakt_id = 'trakt' + trakt_slug = 'trakt_slug' + tvdb_id = 'tvdb' + imdb_id = 'imdb' + tmdb_id = 'tmdb' + tvrage_id = 'tvrage' + all = [text, trakt_id, tvdb_id, imdb_id, tmdb_id, tvrage_id, trakt_slug] + + def __init__(self): + pass + + +map_id_search = {TVINFO_TVDB: TraktSearchTypes.tvdb_id, TVINFO_IMDB: TraktSearchTypes.imdb_id, + TVINFO_TMDB: TraktSearchTypes.tmdb_id, TVINFO_TRAKT: TraktSearchTypes.trakt_id, + TVINFO_TRAKT_SLUG: TraktSearchTypes.trakt_slug} + + +class TraktResultTypes(object): + show = 'show' + episode = 'episode' + movie = 'movie' + person = 'person' + list = 'list' + all = [show, episode, movie, person, list] + + def __init__(self): + pass + + +class TraktIndexer(TVInfoBase): + supported_id_searches = [TVINFO_TVDB, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TRAKT_SLUG] + supported_person_id_searches = [TVINFO_TRAKT, TVINFO_IMDB, TVINFO_TMDB] + + # noinspection PyUnusedLocal + # noinspection PyDefaultArgument + def __init__(self, custom_ui=None, sleep_retry=None, search_type=TraktSearchTypes.text, + result_types=[TraktResultTypes.show], *args, **kwargs): + super(TraktIndexer, self).__init__(*args, **kwargs) + self.config.update({ + 'apikey': '', + 'debug_enabled': False, + 'custom_ui': custom_ui, + 'proxy': None, + 'cache_enabled': False, + 'cache_location': '', + 'valid_languages': [], + 'langabbv_to_id': {}, + 'language': 'en', + 'base_url': '', + 'search_type': search_type if search_type in TraktSearchTypes.all else TraktSearchTypes.text, + 'sleep_retry': sleep_retry, + 'result_types': result_types if isinstance(result_types, list) and all( + [x in TraktResultTypes.all for x in result_types]) else [TraktResultTypes.show], + }) + + @staticmethod + def _make_result_obj(shows, results): + if shows: + try: + for s in shows: + if s['ids']['trakt'] not in [i['ids'].trakt for i in results]: + s['id'] = s['ids']['trakt'] + s['ids'] = TVInfoIDs( + trakt=s['ids']['trakt'], tvdb=s['ids']['tvdb'], tmdb=s['ids']['tmdb'], + rage=s['ids']['tvrage'], + imdb=s['ids']['imdb'] and try_int(s['ids']['imdb'].replace('tt', ''), None)) + results.append(s) + except (BaseException, Exception) as e: + log.debug('Error creating result dict: %s' % ex(e)) + + def _search_show(self, name=None, ids=None, **kwargs): + # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] + """This searches Trakt for the series name, + If a custom_ui UI is configured, it uses this to select the correct + series. + """ + results = [] + if ids: + for t, p in iteritems(ids): + if t in self.supported_id_searches: + if t in (TVINFO_TVDB, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TRAKT_SLUG): + cache_id_key = 's-id-%s-%s' % (t, p) + is_none, shows = self._get_cache_entry(cache_id_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = self.search(p, search_type=map_id_search[t]) + except (BaseException, Exception): + continue + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + else: + show = shows + else: + continue + self._make_result_obj(show, results) + if name: + names = ([name], name)[isinstance(name, list)] + len_names = len(names) + for i, n in enumerate(names, 1): + cache_name_key = 's-name-%s' % n + is_none, shows = self._get_cache_entry(cache_name_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + all_series = self.search(n) + self._set_cache_entry(cache_name_key, all_series, expire=self.search_cache_expire) + except (BaseException, Exception): + all_series = [] + else: + all_series = shows + if not isinstance(all_series, list): + all_series = [all_series] + + if i == len_names and 0 == len(all_series) and not results: + log.debug('Series result returned zero') + raise BaseTVinfoShownotfound('Show-name search returned zero results (cannot find show on TVDB)') + + if all_series: + if None is not self.config['custom_ui']: + log.debug('Using custom UI %s' % self.config['custom_ui'].__name__) + custom_ui = self.config['custom_ui'] + ui = custom_ui(config=self.config) + self._make_result_obj(ui.select_series(all_series), results) + + else: + self._make_result_obj(all_series, results) + + seen = set() + results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] + return results + + @staticmethod + def _dict_prevent_none(d, key, default): + v = None + if isinstance(d, dict): + v = d.get(key, default) + return (v, default)[None is v] + + def search(self, series, search_type=None): + # type: (AnyStr, Union[int, AnyStr]) -> List + search_type = search_type or self.config['search_type'] + if TraktSearchTypes.trakt_slug == search_type: + url = '/shows/%s?extended=full' % series + elif TraktSearchTypes.text != search_type: + url = '/search/%s/%s?type=%s&extended=full&limit=100' % (search_type, (series, 'tt%07d' % series)[ + TraktSearchTypes.imdb_id == search_type and not str(series).startswith('tt')], + ','.join(self.config['result_types'])) + else: + url = '/search/%s?query=%s&extended=full&limit=100' % (','.join(self.config['result_types']), series) + filtered = [] + kwargs = {} + if None is not self.config['sleep_retry']: + kwargs['sleep_retry'] = self.config['sleep_retry'] + try: + from sickbeard.helpers import clean_data + resp = TraktAPI().trakt_request(url, failure_monitor=False, raise_skip_exception=False, **kwargs) + if len(resp): + if isinstance(resp, dict): + resp = [{'type': 'show', 'score': 1, 'show': resp}] + for d in resp: + if isinstance(d, dict) and 'type' in d and d['type'] in self.config['result_types']: + for k, v in iteritems(d): + d[k] = clean_data(v) + if 'show' in d and TraktResultTypes.show == d['type']: + d.update(d['show']) + del d['show'] + d['seriesname'] = self._dict_prevent_none(d, 'title', '') + d['genres_list'] = d.get('genres', []) + d['genres'] = ', '.join(['%s' % v for v in d.get('genres', []) or [] if v]) + d['firstaired'] = (d.get('first_aired') and + re.sub(r'T.*$', '', str(d.get('first_aired'))) or d.get('year')) + filtered.append(d) + except (ConnectionSkipException, TraktException) as e: + log.debug('Could not connect to Trakt service: %s' % ex(e)) + + return filtered + + @staticmethod + def _convert_person_obj(person_obj): + # type: (Dict) -> Person + try: + birthdate = person_obj['birthday'] and tz_p.parse(person_obj['birthday']).date() + except (BaseException, Exception): + birthdate = None + try: + deathdate = person_obj['death'] and tz_p.parse(person_obj['death']).date() + except (BaseException, Exception): + deathdate = None + + return Person(p_id=person_obj['ids']['trakt'], + name=person_obj['name'], + bio=person_obj['biography'], + birthdate=birthdate, + deathdate=deathdate, + homepage=person_obj['homepage'], + birthplace=person_obj['birthplace'], + social_ids={TVINFO_TWITTER: person_obj['social_ids']['twitter'], + TVINFO_FACEBOOK: person_obj['social_ids']['facebook'], + TVINFO_INSTAGRAM: person_obj['social_ids']['instagram'], + TVINFO_WIKIPEDIA: person_obj['social_ids']['wikipedia'] + }, + ids={TVINFO_TRAKT: person_obj['ids']['trakt'], TVINFO_SLUG: person_obj['ids']['slug'], + TVINFO_IMDB: + person_obj['ids']['imdb'] and + try_int(person_obj['ids']['imdb'].replace('nm', ''), None), + TVINFO_TMDB: person_obj['ids']['tmdb'], + TVINFO_TVRAGE: person_obj['ids']['tvrage']}) + + def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): + # type: (integer_types, bool, bool, Any) -> Optional[Person] + """ + get person's data for id or list of matching persons for name + + :param p_id: persons id + :param get_show_credits: get show credits (only for native id) + :param get_images: get images for person + :return: person object + """ + if not p_id: + return + + urls = [('/people/%s?extended=full' % p_id, False)] + if get_show_credits: + urls.append(('/people/%s/shows?extended=full' % p_id, True)) + + if not urls: + return + + result = None + + for url, show_credits in urls: + try: + cache_key_name = 'p-%s-%s' % (('main', 'credits')[show_credits], p_id) + is_none, resp = self._get_cache_entry(cache_key_name) + if None is resp and not is_none: + resp = TraktAPI().trakt_request(url, **kwargs) + self._set_cache_entry(cache_key_name, resp) + if resp: + if show_credits: + pc = [] + for c in resp.get('cast') or []: + show = TVInfoShow() + show.id = c['show']['ids'].get('trakt') + show.seriesname = c['show']['title'] + show.ids = TVInfoIDs(ids={id_map[src]: _convert_imdb_id(id_map[src], sid) + for src, sid in iteritems(c['show']['ids']) if src in id_map}) + show.network = c['show']['network'] + show.firstaired = c['show']['first_aired'] + show.overview = c['show']['overview'] + show.status = c['show']['status'] + show.imdb_id = c['show']['ids'].get('imdb') + show.runtime = c['show']['runtime'] + show.genre_list = c['show']['genres'] + for ch in c.get('characters') or []: + pc.append( + Character( + name=ch, regular=c.get('series_regular'), + show=show + ) + ) + result.characters = pc + else: + result = self._convert_person_obj(resp) + except ConnectionSkipException as e: + raise e + except TraktException as e: + log.debug('Could not connect to Trakt service: %s' % ex(e)) + return result + + def _search_person(self, name=None, ids=None): + # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] + urls, result, ids = [], [], ids or {} + for tv_src in self.supported_person_id_searches: + if tv_src in ids: + if TVINFO_TRAKT == tv_src: + url = '/people/%s?extended=full' % ids.get(tv_src) + elif tv_src in (TVINFO_IMDB, TVINFO_TMDB): + url = '/search/%s/%s?type=person&extended=full&limit=100' % \ + (id_map_reverse[tv_src], (ids.get(tv_src), 'nm%07d' % ids.get(tv_src))[TVINFO_IMDB == tv_src]) + else: + continue + urls.append((tv_src, ids.get(tv_src), url)) + if name: + urls.append(('text', name, '/search/person?query=%s&extended=full&limit=100' % name)) + + for src, s_id, url in urls: + try: + cache_key_name = 'p-src-%s-%s' % (src, s_id) + is_none, resp = self._get_cache_entry(cache_key_name) + if None is resp and not is_none: + resp = TraktAPI().trakt_request(url) + self._set_cache_entry(cache_key_name, resp) + if resp: + for per in (resp, [{'person': resp, 'type': 'person'}])[url.startswith('/people')]: + if 'person' != per['type']: + continue + person = per['person'] + if not any(1 for p in result if person['ids']['trakt'] == p.id): + result.append(self._convert_person_obj(person)) + except ConnectionSkipException as e: + raise e + except TraktException as e: + log.debug('Could not connect to Trakt service: %s' % ex(e)) + + return result diff --git a/lib/api_trakt/trakt.py b/lib/api_trakt/trakt.py new file mode 100644 index 0000000..589f14c --- /dev/null +++ b/lib/api_trakt/trakt.py @@ -0,0 +1,381 @@ +import requests +import certifi +import json +import sickbeard +import time +import datetime +import logging +from exceptions_helper import ex, ConnectionSkipException +from sg_helpers import get_url, try_int + +from .exceptions import * + +# noinspection PyUnreachableCode +if False: + from typing import Any, AnyStr, Dict + +log = logging.getLogger('api_trakt') +log.addHandler(logging.NullHandler()) + + +class TraktAccount(object): + max_auth_fail = 9 + + def __init__(self, account_id=None, token='', refresh_token='', auth_fail=0, last_fail=None, token_valid_date=None): + self.account_id = account_id + self._name = '' + self._slug = '' + self.token = token + self.refresh_token = refresh_token + self.auth_fail = auth_fail + self.last_fail = last_fail + self.token_valid_date = token_valid_date + + def get_name_slug(self): + try: + resp = TraktAPI().trakt_request('users/settings', send_oauth=self.account_id, sleep_retry=20) + self.reset_auth_failure() + if 'user' in resp: + self._name = resp['user']['username'] + self._slug = resp['user']['ids']['slug'] + except TraktAuthException: + self.inc_auth_failure() + self._name = '' + except TraktException: + pass + + @property + def slug(self): + if self.token and self.active: + if not self._slug: + self.get_name_slug() + else: + self._slug = '' + return self._slug + + @property + def name(self): + if self.token and self.active: + if not self._name: + self.get_name_slug() + else: + self._name = '' + + return self._name + + def reset_name(self): + self._name = '' + + @property + def active(self): + return self.auth_fail < self.max_auth_fail and self.token + + @property + def needs_refresh(self): + return not self.token_valid_date or self.token_valid_date - datetime.datetime.now() < datetime.timedelta(days=3) + + @property + def token_expired(self): + return self.token_valid_date and self.token_valid_date < datetime.datetime.now() + + def reset_auth_failure(self): + if 0 != self.auth_fail: + self.auth_fail = 0 + self.last_fail = None + + def inc_auth_failure(self): + self.auth_fail += 1 + self.last_fail = datetime.datetime.now() + + def auth_failure(self): + if self.auth_fail < self.max_auth_fail: + if self.last_fail: + time_diff = datetime.datetime.now() - self.last_fail + if 0 == self.auth_fail % 3: + if datetime.timedelta(days=1) < time_diff: + self.inc_auth_failure() + sickbeard.save_config() + elif datetime.timedelta(minutes=15) < time_diff: + self.inc_auth_failure() + if self.auth_fail == self.max_auth_fail or datetime.timedelta(hours=6) < time_diff: + sickbeard.save_config() + else: + self.inc_auth_failure() + + +class TraktAPI(object): + max_retrys = 3 + + def __init__(self, timeout=None): + + self.session = requests.Session() + self.verify = sickbeard.TRAKT_VERIFY and certifi.where() + self.timeout = timeout or sickbeard.TRAKT_TIMEOUT + self.auth_url = sickbeard.TRAKT_BASE_URL + self.api_url = sickbeard.TRAKT_BASE_URL + self.headers = {'Content-Type': 'application/json', + 'trakt-api-version': '2', + 'trakt-api-key': sickbeard.TRAKT_CLIENT_ID} + + @staticmethod + def build_config_string(data): + return '!!!'.join('%s|%s|%s|%s|%s|%s' % ( + value.account_id, value.token, value.refresh_token, value.auth_fail, + value.last_fail.strftime('%Y%m%d%H%M') if value.last_fail else '0', + value.token_valid_date.strftime('%Y%m%d%H%M%S') if value.token_valid_date else '0') + for (key, value) in data.items()) + + @staticmethod + def read_config_string(data): + return dict((int(a.split('|')[0]), TraktAccount( + int(a.split('|')[0]), a.split('|')[1], a.split('|')[2], int(a.split('|')[3]), + datetime.datetime.strptime(a.split('|')[4], '%Y%m%d%H%M') if a.split('|')[4] != '0' else None, + datetime.datetime.strptime(a.split('|')[5], '%Y%m%d%H%M%S') if a.split('|')[5] != '0' else None)) + for a in data.split('!!!') if data) + + @staticmethod + def add_account(token, refresh_token, token_valid_date): + k = max(sickbeard.TRAKT_ACCOUNTS.keys() or [0]) + 1 + sickbeard.TRAKT_ACCOUNTS[k] = TraktAccount(account_id=k, token=token, refresh_token=refresh_token, + token_valid_date=token_valid_date) + sickbeard.save_config() + return k + + @staticmethod + def replace_account(account, token, refresh_token, token_valid_date, refresh): + if account in sickbeard.TRAKT_ACCOUNTS: + sickbeard.TRAKT_ACCOUNTS[account].token = token + sickbeard.TRAKT_ACCOUNTS[account].refresh_token = refresh_token + sickbeard.TRAKT_ACCOUNTS[account].token_valid_date = token_valid_date + if not refresh: + sickbeard.TRAKT_ACCOUNTS[account].reset_name() + sickbeard.TRAKT_ACCOUNTS[account].reset_auth_failure() + sickbeard.save_config() + return True + return False + + @staticmethod + def delete_account(account): + if account in sickbeard.TRAKT_ACCOUNTS: + try: + TraktAPI().trakt_request('/oauth/revoke', send_oauth=account, method='POST') + except TraktException: + log.info('Failed to remove account from trakt.tv') + sickbeard.TRAKT_ACCOUNTS.pop(account) + sickbeard.save_config() + return True + return False + + def trakt_token(self, trakt_pin=None, refresh=False, count=0, account=None): + if self.max_retrys <= count: + return False + 0 < count and time.sleep(3) + + data = { + 'client_id': sickbeard.TRAKT_CLIENT_ID, + 'client_secret': sickbeard.TRAKT_CLIENT_SECRET, + 'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob' + } + + if refresh: + if None is not account and account in sickbeard.TRAKT_ACCOUNTS: + data['grant_type'] = 'refresh_token' + data['refresh_token'] = sickbeard.TRAKT_ACCOUNTS[account].refresh_token + else: + return False + else: + data['grant_type'] = 'authorization_code' + if trakt_pin: + data['code'] = trakt_pin + + headers = {'Content-Type': 'application/json'} + + try: + now = datetime.datetime.now() + resp = self.trakt_request('oauth/token', data=data, headers=headers, url=self.auth_url, + count=count, sleep_retry=0) + except TraktInvalidGrant: + if None is not account and account in sickbeard.TRAKT_ACCOUNTS: + sickbeard.TRAKT_ACCOUNTS[account].token = '' + sickbeard.TRAKT_ACCOUNTS[account].refresh_token = '' + sickbeard.TRAKT_ACCOUNTS[account].token_valid_date = None + sickbeard.save_config() + return False + except (TraktAuthException, TraktException): + return False + + if 'access_token' in resp and 'refresh_token' in resp and 'expires_in' in resp: + token_valid_date = now + datetime.timedelta(seconds=try_int(resp['expires_in'])) + if refresh or (not refresh and None is not account and account in sickbeard.TRAKT_ACCOUNTS): + return self.replace_account(account, resp['access_token'], resp['refresh_token'], + token_valid_date, refresh) + return self.add_account(resp['access_token'], resp['refresh_token'], token_valid_date) + + return False + + def trakt_request(self, path, data=None, headers=None, url=None, count=0, sleep_retry=60, + send_oauth=None, method=None, raise_skip_exception=True, failure_monitor=True, **kwargs): + # type: (AnyStr, Dict, Dict, AnyStr, int, int, AnyStr, AnyStr, bool, bool, Any) -> Dict + + if method not in ['GET', 'POST', 'PUT', 'DELETE', None]: + return {} + if None is method: + method = ('GET', 'POST')['data' in kwargs.keys() or None is not data] + if 'oauth/token' != path and None is send_oauth and method in ['POST', 'PUT', 'DELETE']: + return {} + + count += 1 + if count > self.max_retrys: + return {} + + # wait before retry + if 'users/settings' != path: + 1 < count and time.sleep(sleep_retry) + + headers = headers or self.headers + if None is not send_oauth and send_oauth in sickbeard.TRAKT_ACCOUNTS: + if sickbeard.TRAKT_ACCOUNTS[send_oauth].active: + if sickbeard.TRAKT_ACCOUNTS[send_oauth].needs_refresh: + self.trakt_token(refresh=True, count=0, account=send_oauth) + if sickbeard.TRAKT_ACCOUNTS[send_oauth].token_expired or \ + not sickbeard.TRAKT_ACCOUNTS[send_oauth].active: + return {} + headers['Authorization'] = 'Bearer %s' % sickbeard.TRAKT_ACCOUNTS[send_oauth].token + else: + return {} + + kwargs = dict(headers=headers, timeout=self.timeout, verify=self.verify) + if data: + kwargs['data'] = json.dumps(data) + + url = url or self.api_url + try: + resp = get_url('%s%s' % (url, path), session=self.session, use_method=method, return_response=True, + raise_exceptions=True, raise_status_code=True, raise_skip_exception=raise_skip_exception, + failure_monitor=failure_monitor, **kwargs) + # resp = self.session.request(method, '%s%s' % (url, path), **kwargs) + + if 'DELETE' == method: + result = None + if 204 == resp.status_code: + result = {'result': 'success'} + elif 404 == resp.status_code: + result = {'result': 'failed'} + if result and None is not send_oauth and send_oauth in sickbeard.TRAKT_ACCOUNTS: + sickbeard.TRAKT_ACCOUNTS[send_oauth].reset_auth_failure() + return result + resp.raise_for_status() + return {} + + # check for http errors and raise if any are present + resp.raise_for_status() + + # convert response to json + resp = resp.json() + + except requests.RequestException as e: + code = getattr(e.response, 'status_code', None) + if not code: + if 'timed out' in ex(e): + log.warning(u'Timeout connecting to Trakt') + if count >= self.max_retrys: + raise TraktTimeout() + return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, + send_oauth=send_oauth, method=method) + # This is pretty much a fatal error if there is no status_code + # It means there basically was no response at all + else: + log.warning(u'Could not connect to Trakt. Error: %s' % ex(e)) + raise TraktException('Could not connect to Trakt. Error: %s' % ex(e)) + + elif 502 == code: + # Retry the request, Cloudflare had a proxying issue + log.warning(u'Retrying Trakt api request: %s' % path) + if count >= self.max_retrys: + raise TraktCloudFlareException() + return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, + send_oauth=send_oauth, method=method) + + elif 401 == code and 'oauth/token' != path: + if None is not send_oauth: + if sickbeard.TRAKT_ACCOUNTS[send_oauth].needs_refresh: + if self.trakt_token(refresh=True, count=count, account=send_oauth): + return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, + send_oauth=send_oauth, method=method) + + log.warning(u'Unauthorized. Please check your Trakt settings') + sickbeard.TRAKT_ACCOUNTS[send_oauth].auth_failure() + raise TraktAuthException() + + # sometimes the trakt server sends invalid token error even if it isn't + sickbeard.TRAKT_ACCOUNTS[send_oauth].auth_failure() + if count >= self.max_retrys: + raise TraktAuthException() + + return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, + send_oauth=send_oauth, method=method) + + raise TraktAuthException() + elif code in (500, 501, 503, 504, 520, 521, 522): + if count >= self.max_retrys: + log.warning(u'Trakt may have some issues and it\'s unavailable. Code: %s' % code) + raise TraktServerError(error_code=code) + # http://docs.trakt.apiary.io/#introduction/status-codes + log.warning(u'Trakt may have some issues and it\'s unavailable. Trying again') + return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, + send_oauth=send_oauth, method=method) + elif 404 == code: + log.warning(u'Trakt error (404) the resource does not exist: %s%s' % (url, path)) + raise TraktMethodNotExisting('Trakt error (404) the resource does not exist: %s%s' % (url, path)) + elif 429 == code: + if count >= self.max_retrys: + log.warning(u'Trakt replied with Rate-Limiting, maximum retries exceeded.') + raise TraktServerError(error_code=code) + r_headers = getattr(e.response, 'headers', None) + if None is not r_headers: + wait_seconds = min(try_int(r_headers.get('Retry-After', 60), 60), 150) + else: + wait_seconds = 60 + log.warning('Trakt replied with Rate-Limiting, waiting %s seconds.' % wait_seconds) + wait_seconds = (wait_seconds, 60)[0 > wait_seconds] + wait_seconds -= sleep_retry + if 0 < wait_seconds: + time.sleep(wait_seconds) + return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, + send_oauth=send_oauth, method=method) + elif 423 == code: + # locked account + log.error('An application that is NOT SickGear has flooded the Trakt API and they have locked access' + ' to your account. They request you contact their support at https://support.trakt.tv/' + ' This is not a fault of SickGear because it does *not* sync data or send the type of data' + ' that triggers a Trakt access lock.' + ' SickGear may only send a notification on a media process completion if set up for it.') + raise TraktLockedUserAccount() + elif 400 == code and 'invalid_grant' in getattr(e, 'text', ''): + raise TraktInvalidGrant('Error: invalid_grant. The provided authorization grant is invalid, expired, ' + 'revoked, does not match the redirection URI used in the authorization request,' + ' or was issued to another client.') + else: + log.error(u'Could not connect to Trakt. Code error: {0}'.format(code)) + raise TraktException('Could not connect to Trakt. Code error: %s' % code) + except ConnectionSkipException as e: + log.error('Failure handling error') + raise e + except ValueError as e: + log.error(u'Value Error: %s' % ex(e)) + raise TraktValueError(u'Value Error: %s' % ex(e)) + except (BaseException, Exception) as e: + log.error('Exception: %s' % ex(e)) + raise TraktException('Could not connect to Trakt. Code error: %s' % ex(e)) + + # check and confirm Trakt call did not fail + if isinstance(resp, dict) and 'failure' == resp.get('status', None): + if 'message' in resp: + raise TraktException(resp['message']) + if 'error' in resp: + raise TraktException(resp['error']) + raise TraktException('Unknown Error') + + if None is not send_oauth and send_oauth in sickbeard.TRAKT_ACCOUNTS: + sickbeard.TRAKT_ACCOUNTS[send_oauth].reset_auth_failure() + return resp diff --git a/lib/api_tvdb/UNLICENSE b/lib/api_tvdb/UNLICENSE new file mode 100644 index 0000000..c4205d4 --- /dev/null +++ b/lib/api_tvdb/UNLICENSE @@ -0,0 +1,26 @@ +Copyright 2011-2012 Ben Dickson (dbr) + +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/lib/api_tvdb/__init__.py b/lib/api_tvdb/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/lib/api_tvdb/__init__.py @@ -0,0 +1 @@ + diff --git a/lib/api_tvdb/tvdb_api.py b/lib/api_tvdb/tvdb_api.py new file mode 100644 index 0000000..3c73441 --- /dev/null +++ b/lib/api_tvdb/tvdb_api.py @@ -0,0 +1,1262 @@ +# !/usr/bin/env python2 +# encoding:utf-8 +# author:dbr/Ben +# project:tvdb_api +# repository:http://github.com/dbr/tvdb_api +# license:un license (http://unlicense.org/) + +from __future__ import absolute_import +from functools import wraps + +__author__ = 'dbr/Ben' +__version__ = '2.0' +__api_version__ = '3.0.0' + +import copy +import datetime +import getpass +import logging +import os +import random +import re +import requests +import requests.exceptions +import tempfile +import time +import warnings + +from bs4_parser import BS4Parser +from collections import OrderedDict +from sg_helpers import clean_data, get_url, try_int +from sickbeard import ENV + +from lib.cachecontrol import CacheControl, caches +from lib.dateutil.parser import parse +from lib.exceptions_helper import ConnectionSkipException +from lib.tvinfo_base import CastList, Character, CrewList, Person, RoleTypes, \ + TVINFO_TVDB, TVINFO_TVDB_SLUG, TVInfoBase, TVInfoIDs + +from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired +from .tvdb_ui import BaseUI, ConsoleUI + +from _23 import filter_list, list_keys, list_values, map_list +from six import integer_types, iteritems, PY2, string_types + +# noinspection PyUnreachableCode +if False: + # noinspection PyUnresolvedReferences + from typing import Any, AnyStr, Dict, List, Optional, Union + from lib.tvinfo_base import TVInfoShow + + +THETVDB_V2_API_TOKEN = {'token': None, 'datetime': datetime.datetime.fromordinal(1)} +log = logging.getLogger('tvdb.api') +log.addHandler(logging.NullHandler()) + + +# noinspection PyUnusedLocal +def _record_hook(r, *args, **kwargs): + r.hook_called = True + if 301 == r.status_code and isinstance(r.headers.get('Location'), string_types) \ + and r.headers.get('Location').startswith('http://api.thetvdb.com/'): + r.headers['Location'] = r.headers['Location'].replace('http://', 'https://') + return r + + +def retry(exception_to_check, tries=4, delay=3, backoff=2): + """Retry calling the decorated function using an exponential backoff. + + http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ + original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry + + :param exception_to_check: the exception to check. may be a tuple of + exceptions to check + :type exception_to_check: Exception or tuple + :param tries: number of times to try (not retry) before giving up + :type tries: int + :param delay: initial delay between retries in seconds + :type delay: int + :param backoff: backoff multiplier e.g. value of 2 will double the delay + each retry + :type backoff: int + """ + + def deco_retry(f): + + @wraps(f) + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay + auth_error = 0 + while 1 < mtries: + try: + return f(*args, **kwargs) + except exception_to_check as e: + msg = '%s, Retrying in %d seconds...' % (str(e), mdelay) + log.warning(msg) + time.sleep(mdelay) + if isinstance(e, TvdbTokenexpired) and not auth_error: + auth_error += 1 + else: + mtries -= 1 + mdelay *= backoff + except ConnectionSkipException as e: + raise e + try: + return f(*args, **kwargs) + except TvdbTokenexpired: + if not auth_error: + return f(*args, **kwargs) + raise TvdbTokenexpired + except ConnectionSkipException as e: + raise e + + return f_retry # true decorator + + return deco_retry + + +class Actors(list): + """Holds all Actor instances for a show + """ + pass + + +class Actor(dict): + """Represents a single actor. Should contain.. + + id, + image, + name, + role, + sortorder + """ + + def __repr__(self): + return '' % self.get('name') + + +class Tvdb(TVInfoBase): + """Create easy-to-use interface to name of season/episode name + >> t = Tvdb() + >> t['Scrubs'][1][24]['episodename'] + u'My Last Day' + """ + supported_id_searches = [TVINFO_TVDB, TVINFO_TVDB_SLUG] + + # noinspection PyUnusedLocal + def __init__(self, + interactive=False, + select_first=False, + debug=False, + cache=True, + banners=False, + fanart=False, + posters=False, + seasons=False, + seasonwides=False, + actors=False, + custom_ui=None, + language=None, + search_all_languages=False, + apikey=None, + dvdorder=False, + proxy=None, + *args, + **kwargs): + + """interactive (True/False): + When True, uses built-in console UI is used to select the correct show. + When False, the first search result is used. + + select_first (True/False): + Automatically selects the first series search result (rather + than showing the user a list of more than one series). + Is overridden by interactive = False, or specifying a custom_ui + + debug (True/False) DEPRECATED: + Replaced with proper use of logging module. To show debug messages: + + >> import logging + >> logging.basicConfig(level = logging.DEBUG) + + cache (True/False/str/unicode/urllib2 opener): + Retrieved XML are persisted to to disc. If true, stores in + tvdb_api folder under your systems TEMP_DIR, if set to + str/unicode instance it will use this as the cache + location. If False, disables caching. Can also be passed + an arbitrary Python object, which is used as a urllib2 + opener, which should be created by urllib2.build_opener + + banners (True/False): + Retrieves the banners for a show. These are accessed + via the banners key of a Show(), for example: + + >> Tvdb(banners=True)['scrubs']['banners'].keys() + ['fanart', 'poster', 'series', 'season'] + + actors (True/False): + Retrieves a list of the actors for a show. These are accessed + via the actors key of a Show(), for example: + + >> t = Tvdb(actors=True) + >> t['scrubs']['actors'][0]['name'] + u'Zach Braff' + + custom_ui (tvdb_ui.BaseUI subclass): + A callable subclass of tvdb_ui.BaseUI (overrides interactive option) + + language (2 character language abbreviation): + The language of the returned data. Is also the language search + uses. Default is "en" (English). For full list, run.. + + >> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS + ['da', 'fi', 'nl', ...] + + search_all_languages (True/False): + By default, Tvdb will only search in the language specified using + the language option. When this is True, it will search for the + show in and language + + apikey (str/unicode): + Override the default thetvdb.com API key. By default it will use + tvdb_api's own key (fine for small scripts), but you can use your + own key if desired - this is recommended if you are embedding + tvdb_api in a larger application) + See http://thetvdb.com/?tab=apiregister to get your own key + + """ + + super(Tvdb, self).__init__(*args, **kwargs) + self.config = {} + + if None is not apikey: + self.config['apikey'] = apikey + else: + self.config['apikey'] = '0629B785CE550C8D' # tvdb_api's API key + + self.config['debug_enabled'] = debug # show debugging messages + + self.config['custom_ui'] = custom_ui + + self.config['interactive'] = interactive # prompt for correct series? + + self.config['select_first'] = select_first + + self.config['search_all_languages'] = search_all_languages + + self.config['dvdorder'] = dvdorder + + self.config['proxy'] = proxy + + if cache is True: + self.config['cache_enabled'] = True + self.config['cache_location'] = self._get_temp_dir() + elif cache is False: + self.config['cache_enabled'] = False + elif isinstance(cache, string_types): + self.config['cache_enabled'] = True + self.config['cache_location'] = cache + else: + raise ValueError('Invalid value for Cache %r (type was %s)' % (cache, type(cache))) + + self.config['banners_enabled'] = banners + self.config['posters_enabled'] = posters + self.config['seasons_enabled'] = seasons + self.config['seasonwides_enabled'] = seasonwides + self.config['fanart_enabled'] = fanart + self.config['actors_enabled'] = actors + + if self.config['debug_enabled']: + warnings.warn('The debug argument to tvdb_api.__init__ will be removed in the next version. ' + + 'To enable debug messages, use the following code before importing: ' + + 'import logging; logging.basicConfig(level=logging.DEBUG)') + logging.basicConfig(level=logging.DEBUG) + + # List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml + # Hard-coded here as it is relatively static, and saves another HTTP request, as + # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml + self.config['valid_languages'] = [ + 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', + 'he', 'hr', 'hu', 'it', 'ja', 'ko', 'nl', 'no', + 'pl', 'pt', 'ru', 'sl', 'sv', 'tr', 'zh' + ] + + # not mapped: el, sl, tr. added as guess: fin, pol. unknown: _1 + self.config['langabbv_23'] = { + 'cs': 'ces', 'da': 'dan', 'de': 'deu', 'en': 'eng', 'es': 'spa', 'fi': 'fin', 'fr': 'fra', + 'he': 'heb', 'hr': 'hrv', 'hu': 'hun', 'it': 'ita', 'ja': 'jpn', 'ko': 'kor', 'nb': 'nor', + 'nl': 'nld', 'no': 'nor', + 'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp', + } + self.config['valid_languages_3'] = list_values(self.config['langabbv_23']) + + # TheTvdb.com should be based around numeric language codes, + # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16 + # requires the language ID, thus this mapping is required (mainly + # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations) + self.config['langabbv_to_id'] = { + 'cs': 28, 'da': 10, 'de': 14, 'el': 20, 'en': 7, 'es': 16, 'fi': 11, 'fr': 17, + 'he': 24, 'hr': 31, 'hu': 19, 'it': 15, 'ja': 25, 'ko': 32, 'nl': 13, 'no': 9, + 'pl': 18, 'pt': 26, 'ru': 22, 'sl': 30, 'sv': 8, 'tr': 21, 'zh': 27 + } + + if not language: + self.config['language'] = 'en' + else: + if language not in self.config['valid_languages']: + raise ValueError('Invalid language %s, options are: %s' % (language, self.config['valid_languages'])) + else: + self.config['language'] = language + + # The following url_ configs are based of the + # http://thetvdb.com/wiki/index.php/Programmers_API + self.config['base_url'] = 'https://thetvdb.com/' + self.config['api3_url'] = 'https://api.thetvdb.com/' + + self.config['url_search_series'] = '%(api3_url)ssearch/series' % self.config + self.config['params_search_series'] = {'name': ''} + + self.config['url_series_episodes_info'] = '%(api3_url)sseries/%%s/episodes?page=%%s' % self.config + + self.config['url_series_info'] = '%(api3_url)sseries/%%s' % self.config + self.config['url_episodes_info'] = '%(api3_url)sepisodes/%%s' % self.config + self.config['url_actors_info'] = '%(api3_url)sseries/%%s/actors' % self.config + + self.config['url_series_images'] = '%(api3_url)sseries/%%s/images/query?keyType=%%s' % self.config + self.config['url_artworks'] = 'https://artworks.thetvdb.com/banners/%s' + + self.config['url_people'] = '%(base_url)speople/%%s' % self.config + self.config['url_series_people'] = '%(base_url)sseries/%%s/people' % self.config + self.config['url_series_all'] = '%(base_url)sseries/%%s/allseasons/official' % self.config + self.config['url_series_dvd'] = '%(base_url)sseries/%%s/allseasons/dvd' % self.config + self.config['url_series_abs'] = '%(base_url)sseries/%%s/seasons/absolute/1' % self.config + + def _search_show(self, name=None, ids=None, **kwargs): + # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] + def map_data(data): + data['poster'] = data.get('image') + data['ids'] = TVInfoIDs( + tvdb=data.get('id'), + imdb=data.get('imdb_id') and try_int(data.get('imdb_id', '').replace('tt', ''), None)) + return data + + results = [] + if ids: + if ids.get(TVINFO_TVDB): + cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB]) + is_none, shows = self._get_cache_entry(cache_id_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + d_m = self._get_show_data(ids.get(TVINFO_TVDB), self.config['language'], direct_data=True) + self._set_cache_entry(cache_id_key, d_m, expire=self.search_cache_expire) + except (BaseException, Exception): + d_m = None + else: + d_m = shows + if d_m: + results = map_list(map_data, [d_m['data']]) + if ids.get(TVINFO_TVDB_SLUG): + cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG]) + is_none, shows = self._get_cache_entry(cache_id_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + d_m = self.get_series(ids.get(TVINFO_TVDB_SLUG).replace('-', ' ')) + self._set_cache_entry(cache_id_key, d_m, expire=self.search_cache_expire) + except (BaseException, Exception): + d_m = None + else: + d_m = shows + if d_m: + for r in d_m: + if ids.get(TVINFO_TVDB_SLUG) == r['slug']: + results = map_list(map_data, [r]) + break + if name: + for n in ([name], name)[isinstance(name, list)]: + cache_name_key = 's-name-%s' % n + is_none, shows = self._get_cache_entry(cache_name_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + r = self.get_series(n) + self._set_cache_entry(cache_name_key, r, expire=self.search_cache_expire) + except (BaseException, Exception): + r = None + else: + r = shows + if r: + results.extend(map_list(map_data, r)) + + seen = set() + results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] + return results + + def get_new_token(self): + global THETVDB_V2_API_TOKEN + token = THETVDB_V2_API_TOKEN.get('token', None) + dt = THETVDB_V2_API_TOKEN.get('datetime', datetime.datetime.fromordinal(1)) + url = '%s%s' % (self.config['api3_url'], 'login') + params = {'apikey': self.config['apikey']} + resp = get_url(url.strip(), post_json=params, parse_json=True, raise_skip_exception=True) + if resp: + if 'token' in resp: + token = resp['token'] + dt = datetime.datetime.now() + + return {'token': token, 'datetime': dt} + + def get_token(self): + global THETVDB_V2_API_TOKEN + if None is THETVDB_V2_API_TOKEN.get( + 'token') or datetime.datetime.now() - THETVDB_V2_API_TOKEN.get( + 'datetime', datetime.datetime.fromordinal(1)) > datetime.timedelta(hours=23): + THETVDB_V2_API_TOKEN = self.get_new_token() + if not THETVDB_V2_API_TOKEN.get('token'): + raise TvdbError('Could not get Authentification Token') + return THETVDB_V2_API_TOKEN.get('token') + + @staticmethod + def _get_temp_dir(): + """Returns the [system temp dir]/tvdb_api-u501 (or + tvdb_api-myuser) + """ + if hasattr(os, 'getuid'): + uid = 'u%d' % (os.getuid()) + else: + # For Windows + try: + uid = getpass.getuser() + except ImportError: + return os.path.join(tempfile.gettempdir(), 'tvdb_api') + + return os.path.join(tempfile.gettempdir(), 'tvdb_api-%s' % uid) + + def _match_url_pattern(self, pattern, url): + if pattern in self.config: + try: + if PY2: + return None is not re.search('^%s$' % re.escape(self.config[pattern]).replace('\\%s', '[^/]+'), url) + else: + return None is not re.search('^%s$' % re.escape(self.config[pattern]).replace(r'%s', '[^/]+'), url) + except (BaseException, Exception): + pass + return False + + def is_apikey(self, check_url=None): + return bool(self.config['apikey']) and (None is check_url or '://api' in check_url) + + @retry((TvdbError, TvdbTokenexpired)) + def _load_url(self, url, params=None, language=None, parse_json=False, **kwargs): + log.debug('Retrieving URL %s' % url) + + parse_json = parse_json or self.is_apikey(url) + session = requests.session() + + if self.config['cache_enabled']: + session = CacheControl(session, cache=caches.FileCache(self.config['cache_location'])) + + if self.config['proxy']: + log.debug('Using proxy for URL: %s' % url) + session.proxies = {'http': self.config['proxy'], 'https': self.config['proxy']} + + headers = {'Accept-Encoding': 'gzip,deflate'} + if self.is_apikey(url): + headers.update({'Authorization': 'Bearer %s' % self.get_token(), + 'Accept': 'application/vnd.thetvdb.v%s' % __api_version__}) + + if None is not language and language in self.config['valid_languages']: + headers.update({'Accept-Language': language}) + + resp = None + is_series_info = self._match_url_pattern('url_series_info', url) + if is_series_info: + self.show_not_found = False + self.not_found = False + try: + resp = get_url(url.strip(), params=params, session=session, headers=headers, parse_json=parse_json, + raise_status_code=True, raise_exceptions=True, raise_skip_exception=True, **kwargs) + except ConnectionSkipException as e: + raise e + except requests.exceptions.HTTPError as e: + if 401 == e.response.status_code: + if self.is_apikey(url): + # token expired, get new token, raise error to retry + global THETVDB_V2_API_TOKEN + THETVDB_V2_API_TOKEN = self.get_new_token() + raise TvdbTokenexpired + elif 404 == e.response.status_code: + if is_series_info: + self.show_not_found = True + elif self._match_url_pattern('url_series_episodes_info', url): + resp = {'data': []} + self.not_found = True + elif 404 != e.response.status_code: + raise TvdbError + except (BaseException, Exception): + raise TvdbError + + if is_series_info and isinstance(resp, dict) and isinstance(resp.get('data'), dict) and \ + isinstance(resp['data'].get('seriesName'), string_types) and \ + re.search(r'^[*]\s*[*]\s*[*]', resp['data'].get('seriesName', ''), flags=re.I): + self.show_not_found = True + self.not_found = True + + map_show = {'airstime': 'airs_time', 'airsdayofweek': 'airs_dayofweek', 'imdbid': 'imdb_id', + 'writers': 'writer', 'siterating': 'rating'} + + def map_show_keys(data): + keep_data = {} + del_keys = [] + new_data = {} + for k, v in iteritems(data): + k_org = k + k = k.lower() + if None is not v: + if k in ['banner', 'fanart', 'poster'] and v: + v = self.config['url_artworks'] % v + elif 'genre' == k: + keep_data['genre_list'] = v + v = '|%s|' % '|'.join([clean_data(c) for c in v if isinstance(c, string_types)]) + elif 'gueststars' == k: + keep_data['gueststars_list'] = v + v = '|%s|' % '|'.join([clean_data(c) for c in v if isinstance(c, string_types)]) + elif 'writers' == k: + keep_data[k] = v + v = '|%s|' % '|'.join([clean_data(c) for c in v if isinstance(c, string_types)]) + elif 'rating' == k: + new_data['contentrating'] = v + elif 'firstaired' == k: + if v: + try: + v = parse(v, fuzzy=True).strftime('%Y-%m-%d') + except (BaseException, Exception): + v = None + else: + v = None + elif 'imdbid' == k: + if v: + if re.search(r'^(tt)?\d{1,9}$', v, flags=re.I): + v = clean_data(v) + else: + v = '' + else: + v = clean_data(v) + + if not v and 'seriesname' == k: + if isinstance(data.get('aliases'), list) and 0 < len(data.get('aliases')): + v = data['aliases'].pop(0) + # this is a invalid show, it has no Name + if not v: + return None + + if k in map_show: + k = map_show[k] + if k_org is not k: + del_keys.append(k_org) + new_data[k] = v + else: + data[k] = v + for d in del_keys: + del (data[d]) + if isinstance(data, dict): + data.update(new_data) + data.update(keep_data) + return data + + if resp and isinstance(resp, dict): + if isinstance(resp.get('data'), dict): + resp['data'] = map_show_keys(resp['data']) + elif isinstance(resp.get('data'), list): + data_list = [] + for idx, row in enumerate(resp['data']): + if isinstance(row, dict): + cr = map_show_keys(row) + if None is not cr: + data_list.append(cr) + resp['data'] = data_list + return resp + return dict([(u'data', (None, resp)[isinstance(resp, string_types)])]) + + def _getetsrc(self, url, params=None, language=None, parse_json=False): + """Loads a URL using caching + """ + try: + src = self._load_url(url, params=params, language=language, parse_json=parse_json) + if isinstance(src, dict): + if None is not src['data']: + data = src['data'] + else: + data = {} + # data = src['data'] or {} + if isinstance(data, list): + if 0 < len(data): + data = data[0] + # data = data[0] or {} + if None is data or (isinstance(data, dict) and 1 > len(data.keys())): + raise ValueError + return src + except (KeyError, IndexError, Exception): + pass + + @staticmethod + def clean_overview(text): + """replace newlines with period and space, remove multiple spaces""" + return ' '.join(['%s.' % re.sub(r'[\s][\s]+', r' ', x).strip().rstrip('.') for x in text.split('\r\n')]) + + def get_show_info(self, sid, language=None): + # type: (int, Optional[str]) -> Optional[dict] + results = self.search_tvs(sid, language=language) + for cur_result in (isinstance(results, dict) and results.get('results') or []): + result = filter_list(lambda r: 'series' == r['type'] and sid == r['id'], + cur_result.get('nbHits') and cur_result.get('hits') or []) + if 1 == len(result): + result[0]['overview'] = self.clean_overview( + result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng']) + # remap + for from_key, to_key in iteritems({ + 'name': 'seriesname', 'first_air_date': 'firstaired' + }): + result[0][to_key] = result[0][from_key] + del result[0][from_key] # delete also prevents false +ve with the following new key notifier + + # notify of new keys + if ENV.get('SG_DEV_MODE'): + new_keys = set(list_keys(result[0])).difference({ + '_highlightResult', 'aliases', 'banner', + 'fanart', 'firstaired', 'follower_count', + 'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable', + 'seriesname', 'network', + 'objectID', 'overviews', 'poster', 'release_year', + 'slug', 'status', + 'translations', 'type', + 'url', 'uuid' + }) + if new_keys: + log.warning('DEV_MODE: New get_show_info tvdb attrs for %s %r' % (sid, new_keys)) + + return result[0] + + # fallback : e.g. https://thetvdb.com/?tab=series&id=349309&lid=7 + response = self._load_url(self.config['base_url'], params={ + 'tab': 'series', 'id': sid, 'lid': self.config['langabbv_to_id'].get(language, 7)}) + series = {} + + def get_value(tag, contains): + try: + rc_contains = re.compile(r'(?i)%s' % contains) + parent = copy.copy(tag.find(string=rc_contains, recursive=True).find_parent(class_=re.compile('item'))) + return ', '.join(re.sub(r'(?i)(\s)([\s]+)', r'\1', i.get_text(strip=True)) + for i in parent.find_all('span')) + except(BaseException, Exception): + pass + + with BS4Parser(response.get('data', '')) as soup: + basic_info = soup.find(id='series_basic_info') + series_id = try_int(get_value(basic_info, r'series\sid'), None) + if None is not series_id: + series['id'] = series_id + series['firstaired'] = None # fill from ep listings page + series['genrelist'] = get_value(basic_info, 'genres').split(', ') # extra field + series['genre'] = '|%s|' % '|'.join(series['genrelist']) + series['language'] = language + series['seriesname'] = soup.find(id='series_title').get_text(strip=True) + series['networklist'] = get_value(basic_info, 'network').split(', ') # extra field + series['network'] = '|%s|' % '|'.join(series['networklist']) # e.g. '|network|network n|network 10|' + series['status'] = get_value(basic_info, 'status') + series['type'] = 'series' # extra field + + airs_at = get_value(basic_info, 'airs') + airs = airs_at and airs_at.split(', ') or [] + if 0 < len(airs): + series['airs_time'] = 'at ' in airs[-1] \ + and re.sub(r'(?i)\s+([ap]m)', r'\1', airs[-1]).split()[-1] or '' + series['airs_dayofweek'] = ', '.join(airs[0:-1]) + else: + series['airs_time'] = airs_at + series['airs_dayofweek'] = '' + + # alias list + series['aliases'] = [] + try: + lang_tag = soup.find(id='translations').select('.change_translation_text[data-language="%s"]' % ( + self.config['langabbv_23'].get(language) or 'eng'))[0] + series['aliases'] = [t.get_text(strip=True) for t in lang_tag + .find(string=re.compile('(?i)alias'), recursive=True).find_parent() + .find_next_sibling('ul').find_all('li')] + except(BaseException, Exception): + pass + + # images + series['image'] = series['poster'] = (soup.find(rel=re.compile('artwork_posters')) or {}).get('href') + series['banner'] = (soup.find(rel=re.compile('artwork_banners')) or {}).get('href') + series['fanart'] = (soup.find(rel=re.compile('artwork_backgrounds')) or {}).get('href') + + series['imdb_id'] = re.sub(r'.*(tt\d+)', r'\1', + (soup.find(href=re.compile(r'imdb\.com')) or {}).get('href', '')) + + # {lang: overview} + series.setdefault('overviews', {}) + for cur_tag in soup.find_all(class_='change_translation_text'): + try: + lang = cur_tag.attrs.get('data-language') + if None is not lang: + text = cur_tag.p.get_text(strip=True) + if text: + text = self.clean_overview(text) + series['overviews'].setdefault(lang, text) # extra field + if lang == self.config['langabbv_23'].get(language): + series['overview'] = text + except(BaseException, Exception): + pass + + runtime = get_value(basic_info, 'runtime') + runtime_often = None + if ', ' in runtime: + try: + # sort runtimes by most number of episodes (e.g. '25 minutes (700 episodes)') + runtime_often = sorted([re.findall(r'([^(]+)\((\d+).*', i)[0] for i in runtime.split(', ')], + key=lambda x: try_int(x[1]), reverse=True) + runtime_often = next(iter(runtime_often))[0].strip() # first item is most frequent runtime + except(BaseException, Exception): + runtime_often = None + series['runtime'] = runtime_often and re.sub('^([0-9]+).*', r'\1', runtime_often) or runtime + + series['season'] = None + try: + last_season = sorted([x.get('href') + for x in soup.find_all(href=re.compile(r'/seasons/official/(\d+)'))])[-1] + series['season'] = re.findall(r'(\d+)$', last_season)[0] + except(BaseException, Exception): + pass + + series['slug'] = series['url'] = '' + try: + rc_slug = re.compile('(?i)/series/(?P[^/]+)/(?:episode|season)') + series['slug'] = rc_slug.search(soup.find(href=rc_slug).get('href')).group('slug') + series['url'] = '%sseries/%s' % (self.config['base_url'], series['slug']) # extra field + except(BaseException, Exception): + pass + + # {lang: show title in lang} # extra field + series['translations'] = {t.attrs.get('data-language'): t.attrs.get('data-title') + for t in soup.find_all(class_='change_translation_text') + if all(t.attrs.get(a) for a in ('data-title', 'data-language'))} + + return series + + def search_tvs(self, terms, language=None): + # type: (Union[int, str], Optional[str]) -> Optional[dict] + try: + src = self._load_url( + 'https://tvshow''time-%s.algo''lia.net/1/' + 'indexes/*/queries' % random.choice([1, 2, 3, 'dsn']), + params={'x-algo''lia-agent': 'Alg''olia for vani''lla JavaScript (lite) 3.3''2.0;' + 'instant''search.js (3.5''.3);JS Helper (2.2''8.0)', + 'x-algo''lia''-app''lication-id': 'tvshow''time', + 'x-algo''lia''-ap''i-key': '3d''978dd96c457390f21cec6131ce5d''9c'[::-1]}, + post_json={'requests': [ + {'indexName': 'TVDB', + 'params': '&'.join( + ['query=%s' % terms, 'maxValuesPerFacet=10', 'page=0', + 'facetFilters=[["type:series", "type:person"]]', + 'tagFilters=', 'analytics=false', 'advancedSyntax=true', + 'highlightPreTag=__ais-highlight__', 'highlightPostTag=__/ais-highlight__' + ]) + }]}, + language=language, parse_json=True, failure_monitor=False) + return src + except (KeyError, IndexError, Exception): + pass + + def search(self, series): + # type: (AnyStr) -> List + """This searches TheTVDB.com for the series name + and returns the result list + """ + if PY2: + series = series.encode('utf-8') + self.config['params_search_series']['name'] = series + log.debug('Searching for show %s' % series) + + try: + series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'], + language=self.config['language']) + if series_found: + return list_values(series_found)[0] + except (BaseException, Exception): + pass + + return [] + + def get_series(self, series): + """This searches TheTVDB.com for the series name, + If a custom_ui UI is configured, it uses this to select the correct + series. If not, and interactive == True, ConsoleUI is used, if not + BaseUI is used to select the first result. + """ + all_series = self.search(series) + if not isinstance(all_series, list): + all_series = [all_series] + + if 0 == len(all_series): + log.debug('Series result returned zero') + raise TvdbShownotfound('Show-name search returned zero results (cannot find show on TVDB)') + + if None is not self.config['custom_ui']: + log.debug('Using custom UI %s' % self.config['custom_ui'].__name__) + custom_ui = self.config['custom_ui'] + ui = custom_ui(config=self.config) + else: + if not self.config['interactive']: + log.debug('Auto-selecting first search result using BaseUI') + ui = BaseUI(config=self.config) + else: + log.debug('Interactively selecting show using ConsoleUI') + ui = ConsoleUI(config=self.config) + + return ui.select_series(all_series) + + def _parse_banners(self, sid, img_list): + banners = {} + + try: + for cur_banner in img_list: + bid = cur_banner['id'] + btype = (cur_banner['keytype'], 'banner')['series' == cur_banner['keytype']] + btype2 = (cur_banner['resolution'], try_int(cur_banner['subkey'], cur_banner['subkey']))[ + btype in ('season', 'seasonwide')] + if None is btype or None is btype2: + continue + + for k, v in iteritems(cur_banner): + if None is k or None is v: + continue + + k, v = k.lower(), v.lower() if isinstance(v, string_types) else v + if 'filename' == k: + k = 'bannerpath' + v = self.config['url_artworks'] % v + elif 'thumbnail' == k: + k = 'thumbnailpath' + v = self.config['url_artworks'] % v + elif 'keytype' == k: + k = 'bannertype' + banners.setdefault(btype, OrderedDict()).setdefault(btype2, OrderedDict()).setdefault(bid, {})[ + k] = v + + except (BaseException, Exception): + pass + + self._set_show_data(sid, '_banners', banners, add=True) + + def _parse_actors(self, sid, actor_list, actor_list_alt): + + a = [] + cast = CastList() + try: + alts = {} + if actor_list_alt: + with BS4Parser(actor_list_alt) as soup: + rc_role = re.compile(r'/series/(?P[^/]+)/people/(?P\d+)/?$') + rc_img = re.compile(r'/(?Pperson/(?P[0-9]+)/(?P[^/]+)\..*)') + rc_img_v3 = re.compile(r'/(?Pactors/(?P[^/]+)\..*)') + max_people = 5 + rc_clean = re.compile(r'[^a-z0-9]') + for cur_enum, cur_role in enumerate(soup.find_all('a', href=rc_role) or []): + try: + image = person_id = None + for cur_rc in (rc_img, rc_img_v3): + img_tag = cur_role.find('img', src=cur_rc) + if img_tag: + img_parsed = cur_rc.search(img_tag.get('src')) + image, person_id = [x in img_parsed.groupdict() and img_parsed.group(x) + for x in ('url', 'person_id')] + break + lines = [x.strip() for x in cur_role.get_text().split('\n') if x.strip()][0:2] + name = role = '' + if len(lines): + name = lines[0] + for line in lines[1:]: + if line.lower().startswith('as '): + role = line[3:] + break + if not person_id and max_people: + max_people -= 1 + results = self.search_tvs(name) + try: + for cur_result in (isinstance(results, dict) and results.get('results') or []): + # sorts 'banners/images/missing/' to last before filter + people = filter_list( + lambda r: 'person' == r['type'] + and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''), + cur_result.get('nbHits') + and sorted(cur_result.get('hits'), + key=lambda x: len(x['image']), reverse=True) or []) + if ENV.get('SG_DEV_MODE'): + for person in people: + new_keys = set(list_keys(person)).difference({ + '_highlightResult', 'banner', 'id', 'image', + 'is_tvdb_searchable', 'is_tvt_searchable', 'name', + 'objectID', 'people_birthdate', 'people_died', + 'poster', 'type', 'url' + }) + if new_keys: + log.warning('DEV_MODE: New _parse_actors tvdb attrs for %s %r' + % (person['id'], new_keys)) + + person_ok = False + for person in people: + if image: + people_data = self._load_url(person['url'])['data'] + person_ok = re.search(re.escape(image), people_data) + if not image or person_ok: + person_id = person['id'] + raise ValueError('value okay, id found') + except (BaseException, Exception): + pass + + rid = int(rc_role.search(cur_role.get('href')).group('role_id')) + alts.setdefault(rid, {'id': rid, 'person_id': person_id or None, 'name': name, 'role': role, + 'image': image, 'sortorder': cur_enum, 'lastupdated': 0}) + except(BaseException, Exception): + pass + if not self.is_apikey(): # for the future when apikey == '' + actor_list = sorted([d for _, d in iteritems(alts)], key=lambda x: x.get('sortorder')) + + unique_c_p, c_p_list, new_actor_list = set(), [], [] + for actor in sorted(actor_list, key=lambda x: x.get('lastupdated'), reverse=True): + c_p_list.append((actor['name'], actor['role'])) + if (actor['name'], actor['role']) not in unique_c_p: + unique_c_p.add((actor['name'], actor['role'])) + new_actor_list.append(actor) + for n in sorted(new_actor_list, key=lambda x: x['sortorder']): + role_image = (alts.get(n['id'], {}).get('image'), n.get('image'))[ + any([n.get('image')]) and 1 == c_p_list.count((n['name'], n['role']))] + if role_image: + role_image = self.config['url_artworks'] % role_image + character_name = n.get('role', '').strip() or alts.get(n['id'], {}).get('role', '') + person_name = n.get('name', '').strip() or alts.get(n['id'], {}).get('name', '') + try: + person_id = try_int(re.search(r'^person/(\d+)/', n.get('image', '')).group(1), None) + except (BaseException, Exception): + person_id = None + person_id = person_id or alts.get(n['id'], {}).get('person_id') + character_id = n.get('id', None) or alts.get(n['id'], {}).get('rid') + a.append({'character': {'id': character_id, + 'name': character_name, + 'url': None, # not supported by tvdb + 'image': role_image, + }, + 'person': {'id': person_id, + 'name': person_name, + 'url': person_id and (self.config['url_people'] % person_id) or None, + 'image': None, # not supported by tvdb + 'birthday': None, # not supported by tvdb + 'deathday': None, # not supported by tvdb + 'gender': None, # not supported by tvdb + 'country': None, # not supported by tvdb + }, + }) + cast[RoleTypes.ActorMain].append( + Character(p_id=character_id, name=character_name, + person=[Person(p_id=person_id, name=person_name)], image=role_image)) + except (BaseException, Exception): + pass + self._set_show_data(sid, 'actors', a) + self._set_show_data(sid, 'cast', cast) + self.shows[sid].actors_loaded = True + + def get_episode_data(self, epid): + # Parse episode information + data = None + log.debug('Getting all episode data for %s' % epid) + url = self.config['url_episodes_info'] % epid + episode_data = self._getetsrc(url, language=self.config['language']) + + if episode_data and 'data' in episode_data: + data = episode_data['data'] + if isinstance(data, dict): + for k, v in iteritems(data): + k = k.lower() + + if None is not v: + if 'filename' == k and v: + v = self.config['url_artworks'] % v + else: + v = clean_data(v) + data[k] = v + + return data + + def _parse_images(self, sid, language, show_data, image_type, enabled_type, type_bool): + mapped_img_types = {'banner': 'series'} + excluded_main_data = enabled_type in ['seasons_enabled', 'seasonwides_enabled'] + loaded_name = '%s_loaded' % image_type + if (type_bool or self.config[enabled_type]) and not getattr(self.shows.get(sid), loaded_name, False): + image_data = self._getetsrc(self.config['url_series_images'] % + (sid, mapped_img_types.get(image_type, image_type)), language=language) + if image_data and 0 < len(image_data.get('data', '') or ''): + image_data['data'] = sorted(image_data['data'], reverse=True, + key=lambda x: (x['ratingsinfo']['average'], x['ratingsinfo']['count'])) + if not excluded_main_data: + url_image = self.config['url_artworks'] % image_data['data'][0]['filename'] + url_thumb = self.config['url_artworks'] % image_data['data'][0]['thumbnail'] + self._set_show_data(sid, image_type, url_image) + self._set_show_data(sid, u'%s_thumb' % image_type, url_thumb) + excluded_main_data = True # artwork found so prevent fallback + self._parse_banners(sid, image_data['data']) + self.shows[sid].__dict__[loaded_name] = True + + # fallback image thumbnail for none excluded_main_data if artwork is not found + if not excluded_main_data and show_data['data'].get(image_type): + self._set_show_data(sid, u'%s_thumb' % image_type, + re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I)) + + def _get_show_data(self, + sid, # type: integer_types + language, # type: AnyStr + get_ep_info=False, # type: bool + banners=False, # type: bool + posters=False, # type: bool + seasons=False, # type: bool + seasonwides=False, # type: bool + fanart=False, # type: bool + actors=False, # type: bool + direct_data=False, # type: bool + **kwargs # type: Optional[Any] + ): # type: (...) -> Optional[bool, dict] + """Takes a series ID, gets the epInfo URL and parses the TVDB + XML file into the shows dict in layout: + shows[series_id][season_number][episode_number] + """ + + # Parse show information + url = self.config['url_series_info'] % sid + if direct_data or sid not in self.shows or None is self.shows[sid].id: + log.debug('Getting all series data for %s' % sid) + show_data = self._getetsrc(url, language=language) + if not show_data or not show_data.get('data'): + show_data = {'data': self.get_show_info(sid, language=language)} + if direct_data: + return show_data + + # check and make sure we have data to process and that it contains a series name + if not (show_data and 'seriesname' in show_data.get('data', {}) or {}): + return False + + for k, v in iteritems(show_data['data']): + self._set_show_data(sid, k, v) + self._set_show_data(sid, 'ids', + TVInfoIDs( + tvdb=show_data['data'].get('id'), + imdb=show_data['data'].get('imdb_id') + and try_int(show_data['data'].get('imdb_id', '').replace('tt', ''), None))) + else: + show_data = {'data': {}} + + for img_type, en_type, p_type in [(u'poster', 'posters_enabled', posters), + (u'banner', 'banners_enabled', banners), + (u'fanart', 'fanart_enabled', fanart), + (u'season', 'seasons_enabled', seasons), + (u'seasonwide', 'seasonwides_enabled', seasonwides)]: + self._parse_images(sid, language, show_data, img_type, en_type, p_type) + + if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): + actor_data = self._getetsrc(self.config['url_actors_info'] % sid, language=language) + actor_data_alt = self._getetsrc(self.config['url_series_people'] % sid, language=language) + if actor_data and 0 < len(actor_data.get('data', '') or '') or actor_data_alt and actor_data_alt['data']: + self._parse_actors(sid, actor_data['data'], actor_data_alt and actor_data_alt['data']) + + if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False): + # Parse episode data + log.debug('Getting all episodes of %s' % sid) + + page = 1 + episodes = [] + while page <= 400: + episode_data = {} + if self.is_apikey(): + episode_data = self._getetsrc( + self.config['url_series_episodes_info'] % (sid, page), language=language) + + if not episode_data: + response = {'data': None} + items_found = False + # fallback to page 'all' if dvd is enabled and response has no items + for page_type in ('url_series_dvd', 'url_series_all'): + if 'dvd' not in page_type or self.config['dvdorder']: + response = self._load_url(self.config[page_type] % show_data.get('data').get('slug')) + with BS4Parser(response.get('data') or '') as soup: + items_found = bool(soup.find_all(class_='list-group-item')) + if items_found: + break + if not items_found: + break + + episode_data = {'data': []} + with BS4Parser(response.get('data')) as soup: + items = soup.find_all(class_='list-group-item') + rc_sxe = re.compile(r'(?i)s(?:pecial\s*)?(\d+)\s*[xe]\s*(\d+)') # Special nxn or SnnEnn + rc_episode = re.compile(r'(?i)/series/%s/episodes?/(?P\d+)' % show_data['data']['slug']) + rc_date = re.compile(r'\s\d{4}\s*$') + season_type, episode_type = ['%s%s' % (('aired', 'dvd')['dvd' in page_type], x) + for x in ('season', 'episodenumber')] + for cur_item in items: + try: + heading_tag = cur_item.find(class_='list-group-item-heading') + sxe = heading_tag.find(class_='episode-label').get_text(strip=True) + ep_season, ep_episode = [try_int(x) for x in rc_sxe.findall(sxe)[0]] + link_ep_tag = heading_tag.find(href=rc_episode) or {} + link_match = rc_episode.search(link_ep_tag.get('href', '')) + ep_id = link_match and try_int(link_match.group('ep_id'), None) + ep_name = link_ep_tag.get_text(strip=True) + # ep_network = None # extra field + ep_aired = None + for cur_tag in cur_item.find('ul').find_all('li'): + text = cur_tag.get_text(strip=True) + if rc_date.search(text): + ep_aired = parse(text).strftime('%Y-%m-%d') + # elif text in show_data['data']['network']: # unreliable data + # ep_network = text + ep_overview = None + item_tag = cur_item.find(class_='list-group-item-text') + if item_tag: + ep_overview = self.clean_overview(item_tag.get_text() or '') + ep_filename = None + link_ep_tag = item_tag.find(href=rc_episode) or None + if link_ep_tag: + ep_filename = (link_ep_tag.find('img') or {}).get('src', '') + + episode_data['data'].append({ + 'id': ep_id, season_type: ep_season, episode_type: ep_episode, + 'episodename': ep_name, 'firstaired': ep_aired, 'overview': ep_overview, + 'filename': ep_filename, # 'network': ep_network + }) + + if not show_data['data']['firstaired'] and ep_aired \ + and (1, 1) == (ep_season, ep_episode): + show_data['data']['firstaired'] = ep_aired + + episode_data['fallback'] = True + except (BaseException, Exception): + continue + + if None is episode_data: + raise TvdbError('Exception retrieving episodes for show') + if isinstance(episode_data, dict) and not episode_data.get('data', []): + if 1 != page: + self.not_found = False + break + if not getattr(self, 'not_found', False) and None is not episode_data.get('data'): + episodes.extend(episode_data['data']) + next_link = episode_data.get('links', {}).get('next', None) + # check if page is a valid following page + if not isinstance(next_link, integer_types) or next_link <= page: + next_link = None + if not next_link and isinstance(episode_data, dict) \ + and isinstance(episode_data.get('data', []), list) and \ + (100 > len(episode_data.get('data', [])) or episode_data.get('fallback')): + break + if next_link: + page = next_link + else: + page += 1 + + ep_map_keys = {'absolutenumber': u'absolute_number', 'airedepisodenumber': u'episodenumber', + 'airedseason': u'seasonnumber', 'airedseasonid': u'seasonid', + 'dvdepisodenumber': u'dvd_episodenumber', 'dvdseason': u'dvd_season'} + + for cur_ep in episodes: + if self.config['dvdorder']: + log.debug('Using DVD ordering.') + use_dvd = None is not cur_ep.get('dvdseason') and None is not cur_ep.get('dvdepisodenumber') + else: + use_dvd = False + + if use_dvd: + elem_seasnum, elem_epno = cur_ep.get('dvdseason'), cur_ep.get('dvdepisodenumber') + else: + elem_seasnum, elem_epno = cur_ep.get('airedseason'), cur_ep.get('airedepisodenumber') + + if None is elem_seasnum or None is elem_epno: + log.warning('An episode has incomplete season/episode number (season: %r, episode: %r)' % ( + elem_seasnum, elem_epno)) + continue # Skip to next episode + + # float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data + seas_no = int(float(elem_seasnum)) + ep_no = int(float(elem_epno)) + + if not cur_ep.get('network'): + cur_ep['network'] = self.shows[sid].network + for k, v in iteritems(cur_ep): + k = k.lower() + + if None is not v: + if 'filename' == k and v: + if '://' not in v: + v = self.config['url_artworks'] % v + else: + v = clean_data(v) + + if k in ep_map_keys: + k = ep_map_keys[k] + self._set_item(sid, seas_no, ep_no, k, v) + + crew = CrewList() + cast = CastList() + try: + for director in cur_ep.get('directors', []): + crew[RoleTypes.CrewDirector].append(Person(name=director)) + except (BaseException, Exception): + pass + try: + for guest in cur_ep.get('gueststars_list', []): + cast[RoleTypes.ActorGuest].append(Character(person=[Person(name=guest)])) + except (BaseException, Exception): + pass + try: + for writers in cur_ep.get('writers', []): + crew[RoleTypes.CrewWriter].append(Person(name=writers)) + except (BaseException, Exception): + pass + self._set_item(sid, seas_no, ep_no, 'crew', crew) + self._set_item(sid, seas_no, ep_no, 'cast', cast) + + self.shows[sid].ep_loaded = True + + return True + + def _name_to_sid(self, name): + """Takes show name, returns the correct series ID (if the show has + already been grabbed), or grabs all episodes and returns + the correct SID. + """ + if name in self.corrections: + log.debug('Correcting %s to %s' % (name, self.corrections[name])) + return self.corrections[name] + else: + log.debug('Getting show %s' % name) + selected_series = self.get_series(name) + if isinstance(selected_series, dict): + selected_series = [selected_series] + sids = [int(x['id']) for x in selected_series if + self._get_show_data(int(x['id']), self.config['language'])] + self.corrections.update(dict([(x['seriesname'], int(x['id'])) for x in selected_series])) + return sids + + +def main(): + """Simple example of using tvdb_api - it just + grabs an episode name interactively. + """ + import logging + + logging.basicConfig(level=logging.DEBUG) + + tvdb_instance = Tvdb(interactive=True, cache=False) + print(tvdb_instance['Lost']['seriesname']) + print(tvdb_instance['Lost'][1][4]['episodename']) + + +if '__main__' == __name__: + main() diff --git a/lib/api_tvdb/tvdb_cache.py b/lib/api_tvdb/tvdb_cache.py new file mode 100644 index 0000000..9edc9b9 --- /dev/null +++ b/lib/api_tvdb/tvdb_cache.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python2 +#encoding:utf-8 +#author:dbr/Ben +#project:tvdb_api +#repository:http://github.com/dbr/tvdb_api +#license:unlicense (http://unlicense.org/) + +""" +urllib2 caching handler +Modified from http://code.activestate.com/recipes/491261/ +""" +from __future__ import with_statement + +__author__ = "dbr/Ben" +__version__ = "1.9" + +import os +import time +import errno +from hashlib import md5 +from threading import RLock +from six import StringIO +from six.moves import urllib, http_client as httplib + +cache_lock = RLock() + +def locked_function(origfunc): + """Decorator to execute function under lock""" + def wrapped(*args, **kwargs): + cache_lock.acquire() + try: + return origfunc(*args, **kwargs) + finally: + cache_lock.release() + return wrapped + +def calculate_cache_path(cache_location, url): + """Checks if [cache_location]/[hash_of_url].headers and .body exist + """ + thumb = md5(url).hexdigest() + header = os.path.join(cache_location, thumb + ".headers") + body = os.path.join(cache_location, thumb + ".body") + return header, body + +def check_cache_time(path, max_age): + """Checks if a file has been created/modified in the [last max_age] seconds. + False means the file is too old (or doesn't exist), True means it is + up-to-date and valid""" + if not os.path.isfile(path): + return False + cache_modified_time = os.stat(path).st_mtime + time_now = time.time() + if cache_modified_time < time_now - max_age: + # Cache is old + return False + else: + return True + +@locked_function +def exists_in_cache(cache_location, url, max_age): + """Returns if header AND body cache file exist (and are up-to-date)""" + hpath, bpath = calculate_cache_path(cache_location, url) + if os.path.exists(hpath) and os.path.exists(bpath): + return( + check_cache_time(hpath, max_age) + and check_cache_time(bpath, max_age) + ) + else: + # File does not exist + return False + +@locked_function +def store_in_cache(cache_location, url, response): + """Tries to store response in cache.""" + hpath, bpath = calculate_cache_path(cache_location, url) + try: + outf = open(hpath, "wb") + headers = str(response.info()) + outf.write(headers) + outf.close() + + outf = open(bpath, "wb") + outf.write(response.read()) + outf.close() + except IOError: + return True + else: + return False + +@locked_function +def delete_from_cache(cache_location, url): + """Deletes a response in cache.""" + hpath, bpath = calculate_cache_path(cache_location, url) + try: + if os.path.exists(hpath): + os.remove(hpath) + if os.path.exists(bpath): + os.remove(bpath) + except IOError: + return True + else: + return False + +class CacheHandler(urllib.request.BaseHandler): + """Stores responses in a persistant on-disk cache. + + If a subsequent GET request is made for the same URL, the stored + response is returned, saving time, resources and bandwidth + """ + @locked_function + def __init__(self, cache_location, max_age = 21600): + """The location of the cache directory""" + self.max_age = max_age + self.cache_location = cache_location + if not os.path.exists(self.cache_location): + try: + os.mkdir(self.cache_location) + except OSError as e: + if e.errno == errno.EEXIST and os.path.isdir(self.cache_location): + # File exists, and it's a directory, + # another process beat us to creating this dir, that's OK. + pass + else: + # Our target dir is already a file, or different error, + # relay the error! + raise + + def default_open(self, request): + """Handles GET requests, if the response is cached it returns it + """ + if "GET" != request.get_method(): + return None # let the next handler try to handle the request + + if exists_in_cache( + self.cache_location, request.get_full_url(), self.max_age + ): + return CachedResponse( + self.cache_location, + request.get_full_url(), + set_cache_header = True + ) + else: + return None + + def http_response(self, request, response): + """Gets a HTTP response, if it was a GET request and the status code + starts with 2 (200 OK etc) it caches it and returns a CachedResponse + """ + if ("GET" == request.get_method() + and str(response.code).startswith("2")): + if 'x-local-cache' not in response.info(): + # Response is not cached + set_cache_header = store_in_cache( + self.cache_location, + request.get_full_url(), + response + ) + else: + set_cache_header = True + + return CachedResponse( + self.cache_location, + request.get_full_url(), + set_cache_header = set_cache_header + ) + else: + return response + +class CachedResponse(StringIO): + """An urllib2.response-like object for cached responses. + + To determine if a response is cached or coming directly from + the network, check the x-local-cache header rather than the object type. + """ + + @locked_function + def __init__(self, cache_location, url, set_cache_header=True): + self.cache_location = cache_location + hpath, bpath = calculate_cache_path(cache_location, url) + + StringIO.__init__(self, open(bpath, "rb").read()) + + self.url = url + self.code = 200 + self.msg = "OK" + headerbuf = open(hpath, "rb").read() + if set_cache_header: + headerbuf += "x-local-cache: %s\r\n" % (bpath) + self.headers = httplib.HTTPMessage(StringIO(headerbuf)) + + def info(self): + """Returns headers + """ + return self.headers + + def geturl(self): + """Returns original URL + """ + return self.url + + @locked_function + def recache(self): + new_request = urllib.request.urlopen(self.url) + set_cache_header = store_in_cache( + self.cache_location, + new_request.url, + new_request + ) + CachedResponse.__init__(self, self.cache_location, self.url, True) + + @locked_function + def delete_cache(self): + delete_from_cache( + self.cache_location, + self.url + ) + + +if __name__ == "__main__": + def main(): + """Quick test/example of CacheHandler""" + opener = urllib.request.build_opener(CacheHandler("/tmp/")) + response = opener.open('http://google.com') + print(response.headers) + print('Response:', response.read()) + + response.recache() + print(response.headers) + print('After recache:', response.read()) + + # Test usage in threads + from threading import Thread + + class CacheThreadTest(Thread): + lastdata = None + + def run(self): + req = opener.open("http://google.com") + newdata = req.read() + if None is self.lastdata: + self.lastdata = newdata + assert self.lastdata == newdata, "Data was not consistent, uhoh" + req.recache() + threads = [CacheThreadTest() for _ in range(50)] + print('Starting threads') + [t.start() for t in threads] + print('..done') + print('Joining threads') + [t.join() for t in threads] + print('..done') + main() diff --git a/lib/api_tvdb/tvdb_exceptions.py b/lib/api_tvdb/tvdb_exceptions.py new file mode 100644 index 0000000..9a22354 --- /dev/null +++ b/lib/api_tvdb/tvdb_exceptions.py @@ -0,0 +1,66 @@ +# encoding:utf-8 +# author:dbr/Ben +# project:tvdb_api +# repository:http://github.com/dbr/tvdb_api +# license:unlicense (http://unlicense.org/) + +"""Custom exceptions used or raised by tvdb_api +""" + +__author__ = 'dbr/Ben' +__version__ = '1.9' + +__all__ = ['TvdbException', 'TvdbError', 'TvdbUserabort', 'TvdbShownotfound', + 'TvdbSeasonnotfound', 'TvdbEpisodenotfound', 'TvdbAttributenotfound', 'TvdbTokenexpired'] + +from lib.tvinfo_base.exceptions import * + + +class TvdbException(BaseTVinfoException): + """Any exception generated by tvdb_api + """ + pass + + +class TvdbError(BaseTVinfoError, TvdbException): + """An error with thetvdb.com (Cannot connect, for example) + """ + pass + + +class TvdbUserabort(BaseTVinfoUserabort, TvdbError): + """User aborted the interactive selection (via + the q command, ^c etc) + """ + pass + + +class TvdbShownotfound(BaseTVinfoShownotfound, TvdbError): + """Show cannot be found on thetvdb.com (non-existant show) + """ + pass + + +class TvdbSeasonnotfound(BaseTVinfoSeasonnotfound, TvdbError): + """Season cannot be found on thetvdb.com + """ + pass + + +class TvdbEpisodenotfound(BaseTVinfoEpisodenotfound, TvdbError): + """Episode cannot be found on thetvdb.com + """ + pass + + +class TvdbAttributenotfound(BaseTVinfoAttributenotfound, TvdbError): + """Raised if an episode does not have the requested + attribute (such as a episode name) + """ + pass + + +class TvdbTokenexpired(BaseTVinfoAuthenticationerror, TvdbError): + """token expired or missing thetvdb.com + """ + pass diff --git a/lib/api_tvdb/tvdb_ui.py b/lib/api_tvdb/tvdb_ui.py new file mode 100644 index 0000000..fae4383 --- /dev/null +++ b/lib/api_tvdb/tvdb_ui.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python2 +# encoding:utf-8 +# author:dbr/Ben +# project:tvdb_api +# repository:http://github.com/dbr/tvdb_api +# license:unlicense (http://unlicense.org/) + +"""Contains included user interfaces for Tvdb show selection. + +A UI is a callback. A class, it's __init__ function takes two arguments: + +- config, which is the Tvdb config dict, setup in tvdb_api.py +- log, which is Tvdb's logger instance (which uses the logging module). You can +call log.info() log.warning() etc + +It must have a method "select_series", this is passed a list of dicts, each dict +contains the the keys "name" (human readable show name), and "sid" (the shows +ID as on thetvdb.com). For example: + +[{'name': u'Lost', 'sid': u'73739'}, + {'name': u'Lost Universe', 'sid': u'73181'}] + +The "select_series" method must return the appropriate dict, or it can raise +tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show +cannot be found). + +A simple example callback, which returns a random series: + +# >>> import random +# >>> from tvdb_ui import BaseUI +# >>> class RandomUI(BaseUI): +# ... def select_series(self, allSeries): +# ... import random +# ... return random.choice(allSeries) + +Then to use it.. + +# >>> from tvdb_api import Tvdb +# >>> t = Tvdb(custom_ui = RandomUI) +# >>> random_matching_series = t['Lost'] +# >>> type(random_matching_series) +# +""" + +__author__ = "dbr/Ben" +__version__ = "1.9" + +import logging +import warnings + +from .tvdb_exceptions import TvdbUserabort +from six import moves + + +def log(): + return logging.getLogger(__name__) + + +class BaseUI(object): + """Default non-interactive UI, which auto-selects first results + """ + def __init__(self, config, log=None): + self.config = config + if None is not log: + warnings.warn("the UI's log parameter is deprecated, instead use\n" + "use import logging; logging.getLogger('ui').info('blah')\n" + "The self.log attribute will be removed in the next version") + self.log = logging.getLogger(__name__) + + def select_series(self, all_series): + return all_series[0] + + +class ConsoleUI(BaseUI): + """Interactively allows the user to select a show from a console based UI + """ + + @staticmethod + def _displaySeries(all_series, limit=6): + """Helper function, lists series with corresponding ID + """ + if None is not limit: + toshow = all_series[:limit] + else: + toshow = all_series + + print('TVDB Search Results:') + for i, cshow in enumerate(toshow): + i_show = i + 1 # Start at more human readable number 1 (not 0) + log().debug('Showing allSeries[%s], series %s)' % (i_show, all_series[i]['seriesname'])) + if 0 == i: + extra = " (default)" + else: + extra = "" + + print ('%s -> %s [%s] # http://thetvdb.com/?tab=series&id=%s&lid=%s%s' % ( + i_show, + cshow['seriesname'].encode('UTF-8', 'ignore'), + cshow['language'].encode('UTF-8', 'ignore'), + str(cshow['id']), + cshow['lid'], + extra + )) + + def select_series(self, all_series): + self._displaySeries(all_series) + + if 1 == len(all_series): + # Single result, return it! + print('Automatically selecting only result') + return all_series[0] + + if self.config['select_first'] is True: + print('Automatically returning first search result') + return all_series[0] + + while True: # return breaks this loop + try: + print('Enter choice (first number, return for default, \'all\', ? for help):') + ans = moves.input() + except KeyboardInterrupt: + raise TvdbUserabort("User aborted (^c keyboard interupt)") + except EOFError: + raise TvdbUserabort("User aborted (EOF received)") + + log().debug('Got choice of: %s' % ans) + try: + selected_id = int(ans) - 1 # The human entered 1 as first result, not zero + except ValueError: # Input was not number + if 0 == len(ans.strip()): + # Default option + log().debug('Default option, returning first series') + return all_series[0] + if "q" == ans: + log().debug('Got quit command (q)') + raise TvdbUserabort("User aborted ('q' quit command)") + elif "?" == ans: + print('## Help') + print('# Enter the number that corresponds to the correct show.') + print('# a - display all results') + print('# all - display all results') + print('# ? - this help') + print('# q - abort tvnamer') + print('# Press return with no input to select first result') + elif ans.lower() in ["a", "all"]: + self._displaySeries(all_series, limit=None) + else: + log().debug('Unknown keypress %s' % ans) + else: + log().debug('Trying to return ID: %d' % selected_id) + try: + return all_series[selected_id] + except IndexError: + log().debug('Invalid show number entered!') + print('Invalid number (%s) selected!') + self._displaySeries(all_series) diff --git a/lib/api_tvmaze/__init__.py b/lib/api_tvmaze/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/api_tvmaze/tvmaze_api.py b/lib/api_tvmaze/tvmaze_api.py new file mode 100644 index 0000000..ad1865e --- /dev/null +++ b/lib/api_tvmaze/tvmaze_api.py @@ -0,0 +1,666 @@ +# encoding:utf-8 +# author:Prinz23 +# project:tvmaze_api + +__author__ = 'Prinz23' +__version__ = '1.0' +__api_version__ = '1.0.0' + +import datetime +import logging +import re + +import requests +from urllib3.util.retry import Retry +from requests.adapters import HTTPAdapter + +from six import integer_types, iteritems +from sg_helpers import get_url, try_int +from lib.dateutil.parser import parser +# noinspection PyProtectedMember +from lib.dateutil.tz.tz import _datetime_to_timestamp +from lib.exceptions_helper import ConnectionSkipException, ex +# from .tvmaze_exceptions import * +from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImageType, Character, Crew, \ + crew_type_names, Person, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoSeason, PersonGenders, \ + TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB +from lib.pytvmaze import tvmaze + +# noinspection PyUnreachableCode +if False: + from typing import Any, AnyStr, Dict, List, Optional + from lib.pytvmaze.tvmaze import Episode as TVMazeEpisode, Show as TVMazeShow + +log = logging.getLogger('tvmaze.api') +log.addHandler(logging.NullHandler()) + + +# Query TVmaze free endpoints +def tvmaze_endpoint_standard_get(url): + s = requests.Session() + retries = Retry(total=5, + backoff_factor=0.1, + status_forcelist=[429]) + # noinspection HttpUrlsUsage + s.mount('http://', HTTPAdapter(max_retries=retries)) + s.mount('https://', HTTPAdapter(max_retries=retries)) + # noinspection PyProtectedMember + return get_url(url, json=True, session=s, hooks={'response': tvmaze._record_hook}, raise_skip_exception=True) + + +tvmaze.TVmaze.endpoint_standard_get = staticmethod(tvmaze_endpoint_standard_get) +tvm_obj = tvmaze.TVmaze() +empty_ep = TVInfoEpisode() +empty_se = TVInfoSeason() +tz_p = parser() + +img_type_map = { + 'poster': TVInfoImageType.poster, + 'banner': TVInfoImageType.banner, + 'background': TVInfoImageType.fanart, + 'typography': TVInfoImageType.typography, +} + +img_size_map = { + 'original': TVInfoImageSize.original, + 'medium': TVInfoImageSize.medium, +} + +show_map = { + 'id': 'maze_id', + 'ids': 'externals', + # 'slug': '', + 'seriesid': 'maze_id', + 'seriesname': 'name', + 'aliases': 'akas', + # 'season': '', + 'classification': 'type', + # 'genre': '', + 'genre_list': 'genres', + # 'actors': '', + # 'cast': '', + # 'show_type': '', + # 'network': 'network', + # 'network_id': '', + # 'network_timezone': '', + # 'network_country': '', + # 'network_country_code': '', + # 'network_is_stream': '', + # 'runtime': 'runtime', + 'language': 'language', + 'official_site': 'official_site', + # 'imdb_id': '', + # 'zap2itid': '', + # 'airs_dayofweek': '', + # 'airs_time': '', + # 'time': '', + 'firstaired': 'premiered', + # 'added': '', + # 'addedby': '', + # 'siteratingcount': '', + # 'lastupdated': '', + # 'contentrating': '', + 'rating': 'rating', + 'status': 'status', + 'overview': 'summary', + # 'poster': 'image', + # 'poster_thumb': '', + # 'banner': '', + # 'banner_thumb': '', + # 'fanart': '', + # 'banners': '', + 'updated_timestamp': 'updated', +} +season_map = { + 'id': 'id', + 'number': 'season_number', + 'name': 'name', + # 'actors': '', + # 'cast': '', + # 'network': '', + # 'network_id': '', + # 'network_timezone': '', + # 'network_country': '', + # 'network_country_code': '', + # 'network_is_stream': '', + 'ordered': '', + 'start_date': 'premiere_date', + 'end_date': 'end_date', + # 'poster': '', + 'summery': 'summary', + 'episode_order': 'episode_order', +} + + +class TvMaze(TVInfoBase): + supported_id_searches = [TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB] + supported_person_id_searches = [TVINFO_TVMAZE] + + def __init__(self, *args, **kwargs): + super(TvMaze, self).__init__(*args, **kwargs) + + def _search_show(self, name=None, ids=None, **kwargs): + def _make_result_dict(s): + return {'seriesname': s.name, 'id': s.id, 'firstaired': s.premiered, + 'network': s.network and s.network.name, + 'genres': s.genres, 'overview': s.summary, + 'aliases': [a.name for a in s.akas], 'image': s.image and s.image.get('original'), + 'ids': TVInfoIDs( + tvdb=s.externals.get('thetvdb'), rage=s.externals.get('tvrage'), tvmaze=s.id, + imdb=s.externals.get('imdb') and try_int(s.externals.get('imdb').replace('tt', ''), None))} + results = [] + if ids: + for t, p in iteritems(ids): + if t in self.supported_id_searches: + cache_id_key = 's-id-%s-%s' % (t, ids[t]) + is_none, shows = self._get_cache_entry(cache_id_key) + if t == TVINFO_TVDB: + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = tvmaze.lookup_tvdb(p) + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + except (BaseException, Exception): + continue + else: + show = shows + elif t == TVINFO_IMDB: + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = tvmaze.lookup_imdb((p, 'tt%07d' % p)[not str(p).startswith('tt')]) + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + except (BaseException, Exception): + continue + else: + show = shows + elif t == TVINFO_TVMAZE: + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + show = tvm_obj.get_show(maze_id=p) + self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) + except (BaseException, Exception): + continue + else: + show = shows + else: + continue + if show: + try: + if show.id not in [i['id'] for i in results]: + results.append(_make_result_dict(show)) + except (BaseException, Exception) as e: + log.debug('Error creating result dict: %s' % ex(e)) + if name: + for n in ([name], name)[isinstance(name, list)]: + cache_name_key = 's-name-%s' % n + is_none, shows = self._get_cache_entry(cache_name_key) + if not self.config.get('cache_search') or (None is shows and not is_none): + try: + shows = tvmaze.show_search(n) + except (BaseException, Exception) as e: + log.debug('Error searching for show: %s' % ex(e)) + continue + results.extend([_make_result_dict(s) for s in shows or []]) + + seen = set() + results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] + return results + + def _set_episode(self, sid, ep_obj): + for _k, _s in ( + ('seasonnumber', 'season_number'), ('episodenumber', 'episode_number'), + ('episodename', 'title'), ('overview', 'summary'), ('firstaired', 'airdate'), + ('airtime', 'airtime'), ('runtime', 'runtime'), + ('seriesid', 'maze_id'), ('id', 'maze_id'), ('is_special', 'special'), ('filename', 'image')): + if 'filename' == _k: + image = getattr(ep_obj, _s, {}) or {} + image = image.get('original') or image.get('medium') + self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, _k, image) + else: + self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, _k, + getattr(ep_obj, _s, getattr(empty_ep, _k))) + + if ep_obj.airstamp: + try: + at = _datetime_to_timestamp(tz_p.parse(ep_obj.airstamp)) + self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, 'timestamp', at) + except (BaseException, Exception): + pass + + @staticmethod + def _set_network(show_obj, network, is_stream): + show_obj['network'] = network.name + show_obj['network_timezone'] = network.timezone + show_obj['network_country'] = network.country + show_obj['network_country_code'] = network.code + show_obj['network_id'] = network.maze_id + show_obj['network_is_stream'] = is_stream + + def _get_tvm_show(self, show_id, get_ep_info): + try: + self.show_not_found = False + return tvm_obj.get_show(maze_id=show_id, embed='cast%s' % ('', ',episodeswithspecials')[get_ep_info]) + except tvmaze.ShowNotFound: + self.show_not_found = True + except (BaseException, Exception): + log.debug('Error getting data for tvmaze show id: %s' % show_id) + + def _get_show_data(self, sid, language, get_ep_info=False, banners=False, posters=False, seasons=False, + seasonwides=False, fanart=False, actors=False, **kwargs): + log.debug('Getting all series data for %s' % sid) + + show_data = self._get_tvm_show(sid, get_ep_info) + if not show_data: + return False + + show_obj = self.shows[sid].__dict__ + for k, v in iteritems(show_obj): + if k not in ('cast', 'crew', 'images'): + show_obj[k] = getattr(show_data, show_map.get(k, k), show_obj[k]) + show_obj['runtime'] = show_data.average_runtime or show_data.runtime + p_set = False + if show_data.image: + p_set = True + show_obj['poster'] = show_data.image.get('original') + show_obj['poster_thumb'] = show_data.image.get('medium') + + if (banners or posters or fanart or + any(self.config.get('%s_enabled' % t, False) for t in ('banners', 'posters', 'fanart'))) and \ + not all(getattr(self.shows[sid], '%s_loaded' % t, False) for t in ('poster', 'banner', 'fanart')): + if show_data.images: + b_set, f_set = False, False + self.shows[sid].poster_loaded = True + self.shows[sid].banner_loaded = True + self.shows[sid].fanart_loaded = True + for img in show_data.images: + img_type = img_type_map.get(img.type, TVInfoImageType.other) + img_width, img_height = img.resolutions['original'].get('width'), \ + img.resolutions['original'].get('height') + img_ar = img_width and img_height and float(img_width) / float(img_height) + img_ar_type = self._which_type(img_width, img_ar) + if TVInfoImageType.poster == img_type and img_ar and img_ar_type != img_type and \ + show_obj['poster'] == img.resolutions.get('original')['url']: + p_set = False + show_obj['poster'] = None + show_obj['poster_thumb'] = None + img_type = (TVInfoImageType.other, img_type)[ + not img_ar or img_ar_type == img_type or + img_type not in (TVInfoImageType.banner, TVInfoImageType.poster, TVInfoImageType.fanart)] + img_src = {} + for res, img_url in iteritems(img.resolutions): + img_size = img_size_map.get(res) + if img_size: + img_src[img_size] = img_url.get('url') + show_obj['images'].setdefault(img_type, []).append( + TVInfoImage( + image_type=img_type, sizes=img_src, img_id=img.id, main_image=img.main, + type_str=img.type, width=img_width, height=img_height, aspect_ratio=img_ar)) + if not p_set and TVInfoImageType.poster == img_type: + p_set = True + show_obj['poster'] = img.resolutions.get('original')['url'] + show_obj['poster_thumb'] = img.resolutions.get('original')['url'] + elif not b_set and 'banner' == img.type and TVInfoImageType.banner == img_type: + b_set = True + show_obj['banner'] = img.resolutions.get('original')['url'] + show_obj['banner_thumb'] = img.resolutions.get('medium')['url'] + elif not f_set and 'background' == img.type and TVInfoImageType.fanart == img_type: + f_set = True + show_obj['fanart'] = img.resolutions.get('original')['url'] + + if show_data.schedule: + if 'time' in show_data.schedule: + show_obj['airs_time'] = show_data.schedule['time'] + try: + h, m = show_data.schedule['time'].split(':') + h, m = try_int(h, None), try_int(m, None) + if None is not h and None is not m: + show_obj['time'] = datetime.time(hour=h, minute=m) + except (BaseException, Exception): + pass + if 'days' in show_data.schedule: + show_obj['airs_dayofweek'] = ', '.join(show_data.schedule['days']) + if show_data.genres: + show_obj['genre'] = '|%s|' % '|'.join(show_data.genres) + + if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): + if show_data.cast: + character_person_ids = {} + for ch in show_obj['cast'][RoleTypes.ActorMain]: + character_person_ids.setdefault(ch.id, []).extend([p.id for p in ch.person]) + for ch in show_data.cast.characters: + existing_character = next((c for c in show_obj['cast'][RoleTypes.ActorMain] if c.id == ch.id), + None) # type: Optional[Character] + person = self._convert_person(ch.person) + if existing_character: + existing_person = next((p for p in existing_character.person + if person.id == p.ids.get(TVINFO_TVMAZE)), + None) # type: Person + if existing_person: + try: + character_person_ids[ch.id].remove(existing_person.id) + except (BaseException, Exception): + print('error') + pass + (existing_person.p_id, existing_person.name, existing_person.image, existing_person.gender, + existing_person.birthdate, existing_person.deathdate, existing_person.country, + existing_person.country_code, existing_person.country_timezone, existing_person.thumb_url, + existing_person.url, existing_person.ids) = \ + (ch.person.id, ch.person.name, + ch.person.image and ch.person.image.get('original'), + PersonGenders.named.get( + ch.person.gender and ch.person.gender.lower(), PersonGenders.unknown), + person.birthdate, person.deathdate, + ch.person.country and ch.person.country.get('name'), + ch.person.country and ch.person.country.get('code'), + ch.person.country and ch.person.country.get('timezone'), + ch.person.image and ch.person.image.get('medium'), + ch.person.url, {TVINFO_TVMAZE: ch.person.id}) + else: + existing_character.person.append(person) + else: + show_obj['cast'][RoleTypes.ActorMain].append( + Character(p_id=ch.id, name=ch.name, image=ch.image and ch.image.get('original'), + person=[person], + plays_self=ch.plays_self, thumb_url=ch.image and ch.image.get('medium') + )) + + if character_person_ids: + for c, p_ids in iteritems(character_person_ids): + if p_ids: + char = next((mc for mc in show_obj['cast'][RoleTypes.ActorMain] if mc.id == c), + None) # type: Optional[Character] + if char: + char.person = [p for p in char.person if p.id not in p_ids] + + if show_data.cast: + show_obj['actors'] = [ + {'character': {'id': ch.id, + 'name': ch.name, + 'url': 'https://www.tvmaze.com/character/view?id=%s' % ch.id, + 'image': ch.image and ch.image.get('original'), + }, + 'person': {'id': ch.person and ch.person.id, + 'name': ch.person and ch.person.name, + 'url': ch.person and 'https://www.tvmaze.com/person/view?id=%s' % ch.person.id, + 'image': ch.person and ch.person.image and ch.person.image.get('original'), + 'birthday': None, # not sure about format + 'deathday': None, # not sure about format + 'gender': ch.person and ch.person.gender and ch.person.gender, + 'country': ch.person and ch.person.country and ch.person.country.get('name'), + }, + } for ch in show_data.cast.characters] + + if show_data.crew: + for cw in show_data.crew: + rt = crew_type_names.get(cw.type.lower(), RoleTypes.CrewOther) + show_obj['crew'][rt].append( + Crew(p_id=cw.person.id, name=cw.person.name, + image=cw.person.image and cw.person.image.get('original'), + gender=cw.person.gender, birthdate=cw.person.birthday, deathdate=cw.person.death_day, + country=cw.person.country and cw.person.country.get('name'), + country_code=cw.person.country and cw.person.country.get('code'), + country_timezone=cw.person.country and cw.person.country.get('timezone'), + crew_type_name=cw.type, + ) + ) + + if show_data.externals: + show_obj['ids'] = TVInfoIDs(tvdb=show_data.externals.get('thetvdb'), + rage=show_data.externals.get('tvrage'), + imdb=show_data.externals.get('imdb') and + try_int(show_data.externals.get('imdb').replace('tt', ''), None)) + + if show_data.network: + self._set_network(show_obj, show_data.network, False) + elif show_data.web_channel: + self._set_network(show_obj, show_data.web_channel, True) + + if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False): + log.debug('Getting all episodes of %s' % sid) + if None is show_data: + show_data = self._get_tvm_show(sid, get_ep_info) + if not show_data: + return False + + if show_data.episodes: + specials = [] + for cur_ep in show_data.episodes: + if cur_ep.is_special(): + specials.append(cur_ep) + else: + self._set_episode(sid, cur_ep) + + if specials: + specials.sort(key=lambda ep: ep.airstamp or 'Last') + for ep_n, cur_sp in enumerate(specials, start=1): + cur_sp.season_number, cur_sp.episode_number = 0, ep_n + self._set_episode(sid, cur_sp) + + if show_data.seasons: + for cur_s_k, cur_s_v in iteritems(show_data.seasons): + season_obj = None + if cur_s_v.season_number not in self.shows[sid]: + if all(_e.is_special() for _e in cur_s_v.episodes or []): + season_obj = self.shows[sid][0].__dict__ + else: + log.error('error episodes have no numbers') + season_obj = season_obj or self.shows[sid][cur_s_v.season_number].__dict__ + for k, v in iteritems(season_map): + season_obj[k] = getattr(cur_s_v, v, None) or empty_se.get(v) + if cur_s_v.network: + self._set_network(season_obj, cur_s_v.network, False) + elif cur_s_v.web_channel: + self._set_network(season_obj, cur_s_v.web_channel, True) + if cur_s_v.image: + season_obj['poster'] = cur_s_v.image.get('original') + self.shows[sid].season_images_loaded = True + + self.shows[sid].ep_loaded = True + + return True + + def get_updated_shows(self): + # type: (...) -> Dict[integer_types, integer_types] + return {sid: v.seconds_since_epoch for sid, v in iteritems(tvmaze.show_updates().updates)} + + @staticmethod + def _convert_person(person_obj): + # type: (tvmaze.Person) -> Person + ch = [] + for c in person_obj.castcredits or []: + show = TVInfoShow() + show.seriesname = c.show.name + show.id = c.show.id + show.firstaired = c.show.premiered + show.ids = TVInfoIDs(ids={TVINFO_TVMAZE: show.id}) + show.overview = c.show.summary + show.status = c.show.status + net = c.show.network or c.show.web_channel + show.network = net.name + show.network_id = net.maze_id + show.network_country = net.country + show.network_timezone = net.timezone + show.network_country_code = net.code + show.network_is_stream = None is not c.show.web_channel + ch.append(Character(name=c.character.name, show=show)) + try: + birthdate = person_obj.birthday and tz_p.parse(person_obj.birthday).date() + except (BaseException, Exception): + birthdate = None + try: + deathdate = person_obj.death_day and tz_p.parse(person_obj.death_day).date() + except (BaseException, Exception): + deathdate = None + return Person(p_id=person_obj.id, name=person_obj.name, + image=person_obj.image and person_obj.image.get('original'), + gender=PersonGenders.named.get(person_obj.gender and person_obj.gender.lower(), + PersonGenders.unknown), + birthdate=birthdate, deathdate=deathdate, + country=person_obj.country and person_obj.country.get('name'), + country_code=person_obj.country and person_obj.country.get('code'), + country_timezone=person_obj.country and person_obj.country.get('timezone'), + thumb_url=person_obj.image and person_obj.image.get('medium'), + url=person_obj.url, ids={TVINFO_TVMAZE: person_obj.id}, characters=ch + ) + + def _search_person(self, name=None, ids=None): + # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] + urls, result, ids = [], [], ids or {} + for tv_src in self.supported_person_id_searches: + if tv_src in ids: + if TVINFO_TVMAZE == tv_src: + try: + r = self.get_person(ids[tv_src]) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + r = None + if r: + result.append(r) + if name: + try: + r = tvmaze.people_search(name) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + r = None + if r: + for p in r: + if not any(1 for ep in result if p.id == ep.id): + result.append(self._convert_person(p)) + return result + + def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): + # type: (integer_types, bool, bool, Any) -> Optional[Person] + if not p_id: + return + kw = {} + to_embed = [] + if get_show_credits: + to_embed.append('castcredits') + if to_embed: + kw['embed'] = ','.join(to_embed) + try: + p = tvmaze.person_main_info(p_id, **kw) + except ConnectionSkipException as e: + raise e + except (BaseException, Exception): + p = None + if p: + return self._convert_person(p) + + def get_premieres(self, result_count=100, get_extra_images=False, **kwargs): + # type: (...) -> List[TVInfoEpisode] + return self._filtered_schedule(lambda e: all([1 == e.season_number, 1 == e.episode_number]), + get_images=get_extra_images) + + def get_returning(self, result_count=100, get_extra_images=False, **kwargs): + # type: (...) -> List[TVInfoEpisode] + return self._filtered_schedule(lambda e: all([1 != e.season_number, 1 == e.episode_number]), + get_images=get_extra_images) + + def _make_episode(self, episode_data, show_data=None, get_images=False): + # type: (TVMazeEpisode, TVMazeShow, bool) -> TVInfoEpisode + """ + make out of TVMazeEpisode object and optionally TVMazeShow a TVInfoEpisode + """ + ti_show = TVInfoShow() + ti_show.seriesname = show_data.name + ti_show.id = show_data.maze_id + ti_show.seriesid = ti_show.id + ti_show.language = show_data.language + ti_show.overview = show_data.summary + ti_show.firstaired = show_data.premiered + ti_show.runtime = show_data.average_runtime or show_data.runtime + ti_show.vote_average = show_data.rating and show_data.rating.get('average') + ti_show.popularity = show_data.weight + ti_show.genre_list = show_data.genres or [] + ti_show.genre = '|%s|' % '|'.join(ti_show.genre_list) + ti_show.official_site = show_data.official_site + ti_show.status = show_data.status + ti_show.show_type = show_data.type + ti_show.lastupdated = show_data.updated + ti_show.poster = show_data.image and show_data.image.get('original') + ti_show.aliases = [a.name for a in show_data.akas] + if 'days' in show_data.schedule: + ti_show.airs_dayofweek = ', '.join(show_data.schedule['days']) + network = show_data.network or show_data.web_channel + if network: + ti_show.network_is_stream = None is not show_data.web_channel + ti_show.network = network.name + ti_show.network_id = network.maze_id + ti_show.network_country = network.country + ti_show.network_country_code = network.code + ti_show.network_timezone = network.timezone + if get_images and show_data.images: + b_set, f_set, p_set = False, False, False + for cur_img in show_data.images: + img_type = img_type_map.get(cur_img.type, TVInfoImageType.other) + img_width, img_height = cur_img.resolutions['original'].get('width'), \ + cur_img.resolutions['original'].get('height') + img_ar = img_width and img_height and float(img_width) / float(img_height) + img_ar_type = self._which_type(img_width, img_ar) + if TVInfoImageType.poster == img_type and img_ar and img_ar_type != img_type and \ + ti_show.poster == cur_img.resolutions.get('original')['url']: + p_set = False + ti_show.poster = None + ti_show.poster_thumb = None + img_type = (TVInfoImageType.other, img_type)[ + not img_ar or img_ar_type == img_type or + img_type not in (TVInfoImageType.banner, TVInfoImageType.poster, TVInfoImageType.fanart)] + img_src = {} + for cur_res, cur_img_url in iteritems(cur_img.resolutions): + img_size = img_size_map.get(cur_res) + if img_size: + img_src[img_size] = cur_img_url.get('url') + ti_show.images.setdefault(img_type, []).append( + TVInfoImage( + image_type=img_type, sizes=img_src, img_id=cur_img.id, main_image=cur_img.main, + type_str=cur_img.type, width=img_width, height=img_height, aspect_ratio=img_ar)) + if not p_set and TVInfoImageType.poster == img_type: + p_set = True + ti_show.poster = cur_img.resolutions.get('original')['url'] + ti_show.poster_thumb = cur_img.resolutions.get('original')['url'] + elif not b_set and 'banner' == cur_img.type and TVInfoImageType.banner == img_type: + b_set = True + ti_show.banner = cur_img.resolutions.get('original')['url'] + ti_show.banner_thumb = cur_img.resolutions.get('medium')['url'] + elif not f_set and 'background' == cur_img.type and TVInfoImageType.fanart == img_type: + f_set = True + ti_show.fanart = cur_img.resolutions.get('original')['url'] + ti_show.ids = TVInfoIDs( + tvdb=show_data.externals.get('thetvdb'), rage=show_data.externals.get('tvrage'), tvmaze=show_data.id, + imdb=show_data.externals.get('imdb') and try_int(show_data.externals.get('imdb').replace('tt', ''), None)) + ti_show.imdb_id = show_data.externals.get('imdb') + if isinstance(ti_show.imdb_id, integer_types): + ti_show.imdb_id = 'tt%07d' % ti_show.imdb_id + + ti_episode = TVInfoEpisode() + ti_episode.id = episode_data.maze_id + ti_episode.seasonnumber = episode_data.season_number + ti_episode.episodenumber = episode_data.episode_number + ti_episode.episodename = episode_data.title + ti_episode.airtime = episode_data.airtime + ti_episode.firstaired = episode_data.airdate + if episode_data.airstamp: + try: + at = _datetime_to_timestamp(tz_p.parse(episode_data.airstamp)) + ti_episode.timestamp = at + except (BaseException, Exception): + pass + ti_episode.filename = episode_data.image and (episode_data.image.get('original') or + episode_data.image.get('medium')) + ti_episode.is_special = episode_data.is_special() + ti_episode.overview = episode_data.summary + ti_episode.runtime = episode_data.runtime + ti_episode.show = ti_show + return ti_episode + + def _filtered_schedule(self, condition, get_images=False): + try: + result = sorted([ + e for e in tvmaze.get_full_schedule() + if condition(e) and (None is e.show.language or re.search('(?i)eng|jap', e.show.language))], + key=lambda x: x.show.premiered or x.airstamp) + return [self._make_episode(r, r.show, get_images) for r in result] + except(BaseException, Exception): + return [] diff --git a/lib/api_tvmaze/tvmaze_exceptions.py b/lib/api_tvmaze/tvmaze_exceptions.py new file mode 100644 index 0000000..6302d07 --- /dev/null +++ b/lib/api_tvmaze/tvmaze_exceptions.py @@ -0,0 +1,62 @@ +# encoding:utf-8 + +"""Custom exceptions used or raised by tvmaze_api +""" + +__author__ = 'Prinz23' +__version__ = '1.0' + +__all__ = ['TvMazeException', 'TvMazeError', 'TvMazeUserabort', 'TvMazeShownotfound', + 'TvMazeSeasonnotfound', 'TvMazeEpisodenotfound', 'TvMazeAttributenotfound', 'TvMazeTokenexpired'] + +from lib.tvinfo_base.exceptions import * + + +class TvMazeException(BaseTVinfoException): + """Any exception generated by tvdb_api + """ + pass + + +class TvMazeError(BaseTVinfoError, TvMazeException): + """An error with thetvdb.com (Cannot connect, for example) + """ + pass + + +class TvMazeUserabort(BaseTVinfoUserabort, TvMazeError): + """User aborted the interactive selection (via + the q command, ^c etc) + """ + pass + + +class TvMazeShownotfound(BaseTVinfoShownotfound, TvMazeError): + """Show cannot be found on thetvdb.com (non-existant show) + """ + pass + + +class TvMazeSeasonnotfound(BaseTVinfoSeasonnotfound, TvMazeError): + """Season cannot be found on thetvdb.com + """ + pass + + +class TvMazeEpisodenotfound(BaseTVinfoEpisodenotfound, TvMazeError): + """Episode cannot be found on thetvdb.com + """ + pass + + +class TvMazeAttributenotfound(BaseTVinfoAttributenotfound, TvMazeError): + """Raised if an episode does not have the requested + attribute (such as a episode name) + """ + pass + + +class TvMazeTokenexpired(BaseTVinfoAuthenticationerror, TvMazeError): + """token expired or missing thetvdb.com + """ + pass diff --git a/lib/imdb_api/__init__.py b/lib/imdb_api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/lib/imdb_api/imdb_api.py b/lib/imdb_api/imdb_api.py deleted file mode 100644 index 8c86514..0000000 --- a/lib/imdb_api/imdb_api.py +++ /dev/null @@ -1,217 +0,0 @@ -# encoding:utf-8 -# author:Prinz23 -# project:imdb_api - -__author__ = 'Prinz23' -__version__ = '1.0' -__api_version__ = '1.0.0' - -import logging -import re -from .imdb_exceptions import * -from exceptions_helper import ex -from six import iteritems -from bs4_parser import BS4Parser -from lib import imdbpie -from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound -from lib.tvinfo_base import TVInfoBase, TVINFO_TRAKT, TVINFO_TMDB, TVINFO_TVDB, TVINFO_TVRAGE, TVINFO_IMDB, \ - Person, PersonGenders, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA, TVINFO_INSTAGRAM, Character, TVInfoShow, \ - TVInfoIDs -from sg_helpers import get_url, try_int -from lib.dateutil.parser import parser - -# noinspection PyUnreachableCode -if False: - from typing import Any, AnyStr, Dict, List, Optional, Union - from six import integer_types - -tz_p = parser() -log = logging.getLogger('imdb.api') -log.addHandler(logging.NullHandler()) - - -class IMDbIndexer(TVInfoBase): - # supported_id_searches = [TVINFO_IMDB] - supported_person_id_searches = [TVINFO_IMDB] - supported_id_searches = [TVINFO_IMDB] - - # noinspection PyUnusedLocal - # noinspection PyDefaultArgument - def __init__(self, *args, **kwargs): - super(IMDbIndexer, self).__init__(*args, **kwargs) - - def search(self, series): - # type: (AnyStr) -> List - """This searches for the series name - and returns the result list - """ - result = [] - cache_name_key = 's-title-%s' % series - is_none, shows = self._get_cache_entry(cache_name_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - result = imdbpie.Imdb().search_for_title(series) - except (BaseException, Exception): - pass - self._set_cache_entry(cache_name_key, result, expire=self.search_cache_expire) - else: - result = shows - return result - - def _search_show(self, name=None, ids=None, **kwargs): - # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] - """This searches IMDB for the series name, - """ - def _make_result_dict(s): - imdb_id = try_int(re.search(r'tt(\d+)', s.get('id') or s.get('imdb_id')).group(1), None) - tvs = TVInfoShow() - tvs.seriesname, tvs.id, tvs.firstaired, tvs.genre_list, tvs.overview, tvs.poster, tvs.ids = \ - s['title'], imdb_id, s.get('releaseDetails', {}).get('date') or s.get('year'), s.get('genres'), \ - s.get('plot', {}).get('outline', {}).get('text'), s.get('image') and s['image'].get('url'), \ - TVInfoIDs(imdb=imdb_id) - return tvs - - results = [] - if ids: - for t, p in iteritems(ids): - if t in self.supported_id_searches: - if t == TVINFO_IMDB: - cache_id_key = 's-id-%s-%s' % (TVINFO_IMDB, p) - is_none, shows = self._get_cache_entry(cache_id_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = imdbpie.Imdb().get_title_auxiliary('tt%07d' % p) - except (BaseException, Exception): - continue - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - else: - show = shows - if show: - results.extend([_make_result_dict(show)]) - if name: - for n in ([name], name)[isinstance(name, list)]: - try: - shows = self.search(n) - results.extend([_make_result_dict(s) for s in shows]) - except (BaseException, Exception) as e: - log.debug('Error searching for show: %s' % ex(e)) - seen = set() - results = [seen.add(r.id) or r for r in results if r.id not in seen] - return results - - @staticmethod - def _convert_person(person_obj, filmography=None, bio=None): - if isinstance(person_obj, dict) and 'imdb_id' in person_obj: - imdb_id = try_int(re.search(r'(\d+)', person_obj['imdb_id']).group(1)) - return Person(p_id=imdb_id, name=person_obj['name'], ids={TVINFO_IMDB: imdb_id}) - characters = [] - for known_for in (filmography and filmography['filmography']) or []: - if known_for['titleType'] not in ('tvSeries', 'tvMiniSeries'): - continue - for character in known_for.get('characters') or []: - show = TVInfoShow() - show.id = try_int(re.search(r'(\d+)', known_for.get('id')).group(1)) - show.ids.imdb = show.id - show.seriesname = known_for.get('title') - show.firstaired = known_for.get('year') - characters.append( - Character(name=character, show=show, start_year=known_for.get('startYear'), - end_year=known_for.get('endYear')) - ) - try: - birthdate = person_obj['base']['birthDate'] and tz_p.parse(person_obj['base']['birthDate']).date() - except (BaseException, Exception): - birthdate = None - try: - deathdate = person_obj['base']['deathDate'] and tz_p.parse(person_obj['base']['deathDate']).date() - except (BaseException, Exception): - deathdate = None - imdb_id = try_int(re.search(r'(\d+)', person_obj['id']).group(1)) - return Person(p_id=imdb_id, name=person_obj['base'].get('name'), ids={TVINFO_IMDB: imdb_id}, - gender=PersonGenders.imdb_map.get(person_obj['base'].get('gender'), PersonGenders.unknown), - image=person_obj['base'].get('image', {}).get('url'), - birthplace=person_obj['base'].get('birthPlace'), birthdate=birthdate, deathdate=deathdate, - height=person_obj['base'].get('heightCentimeters'), characters=characters, - deathplace=person_obj['base'].get('deathPlace'), - nicknames=set((person_obj['base'].get('nicknames') and person_obj['base'].get('nicknames')) - or []), - real_name=person_obj['base'].get('realName'), - akas=set((person_obj['base'].get('akas') and person_obj['base'].get('akas')) or []), bio=bio - ) - - def _search_person(self, name=None, ids=None): - # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] - """ - search for person by name - :param name: name to search for - :param ids: dict of ids to search - :return: list of found person's - """ - results, ids = [], ids or {} - for tv_src in self.supported_person_id_searches: - if tv_src in ids: - if TVINFO_IMDB == tv_src: - try: - p = self.get_person(ids[tv_src]) - except (BaseException, Exception): - p = None - if p: - results.append(p) - if name: - cache_name_key = 'p-name-%s' % name - is_none, ps = self._get_cache_entry(cache_name_key) - if None is ps and not is_none: - try: - ps = imdbpie.Imdb().search_for_name(name) - except (BaseException, Exception): - ps = None - self._set_cache_entry(cache_name_key, ps) - if ps: - for cp in ps: - if not any(1 for c in results if cp['imdb_id'] == 'nm%07d' % c.id): - results.append(self._convert_person(cp)) - return results - - def _get_bio(self, p_id): - try: - bio = get_url('https://www.imdb.com/name/nm%07d/bio' % p_id, headers={'Accept-Language': 'en'}) - if not bio: - return - with BS4Parser(bio) as bio_item: - bv = bio_item.find(string='Mini Bio', recursive=True).find_next('p') - for a in bv.findAll('a'): - a.replaceWithChildren() - for b in bv.findAll('br'): - b.replaceWith('\n') - return bv.get_text().strip() - except (BaseException, Exception): - return - - def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): - # type: (integer_types, bool, bool, Any) -> Optional[Person] - if not p_id: - return - cache_main_key, cache_bio_key, cache_credits_key = 'p-main-%s' % p_id, 'p-bio-%s' % p_id, 'p-credits-%s' % p_id - is_none, p = self._get_cache_entry(cache_main_key) - if None is p and not is_none: - try: - p = imdbpie.Imdb().get_name(imdb_id='nm%07d' % p_id) - except (BaseException, Exception): - p = None - self._set_cache_entry(cache_main_key, p) - is_none, bio = self._get_cache_entry(cache_bio_key) - if None is bio and not is_none: - bio = self._get_bio(p_id) - self._set_cache_entry(cache_bio_key, bio) - fg = None - if get_show_credits: - is_none, fg = self._get_cache_entry(cache_credits_key) - if None is fg and not is_none: - try: - fg = imdbpie.Imdb().get_name_filmography(imdb_id='nm%07d' % p_id) - except (BaseException, Exception): - fg = None - self._set_cache_entry(cache_credits_key, fg) - if p: - return self._convert_person(p, filmography=fg, bio=bio) - diff --git a/lib/imdb_api/imdb_exceptions.py b/lib/imdb_api/imdb_exceptions.py deleted file mode 100644 index eaf267a..0000000 --- a/lib/imdb_api/imdb_exceptions.py +++ /dev/null @@ -1,62 +0,0 @@ -# encoding:utf-8 - -"""Custom exceptions used or raised by tvmaze_api -""" - -__author__ = 'Prinz23' -__version__ = '1.0' - -__all__ = ['IMDbException', 'IMDbError', 'IMDbUserabort', 'IMDbShownotfound', - 'IMDbSeasonnotfound', 'IMDbEpisodenotfound', 'IMDbAttributenotfound', 'IMDbTokenexpired'] - -from lib.tvinfo_base.exceptions import * - - -class IMDbException(BaseTVinfoException): - """Any exception generated by tvdb_api - """ - pass - - -class IMDbError(BaseTVinfoError, IMDbException): - """An error with thetvdb.com (Cannot connect, for example) - """ - pass - - -class IMDbUserabort(BaseTVinfoUserabort, IMDbError): - """User aborted the interactive selection (via - the q command, ^c etc) - """ - pass - - -class IMDbShownotfound(BaseTVinfoShownotfound, IMDbError): - """Show cannot be found on thetvdb.com (non-existant show) - """ - pass - - -class IMDbSeasonnotfound(BaseTVinfoSeasonnotfound, IMDbError): - """Season cannot be found on thetvdb.com - """ - pass - - -class IMDbEpisodenotfound(BaseTVinfoEpisodenotfound, IMDbError): - """Episode cannot be found on thetvdb.com - """ - pass - - -class IMDbAttributenotfound(BaseTVinfoAttributenotfound, IMDbError): - """Raised if an episode does not have the requested - attribute (such as a episode name) - """ - pass - - -class IMDbTokenexpired(BaseTVinfoAuthenticationerror, IMDbError): - """token expired or missing thetvdb.com - """ - pass diff --git a/lib/libtrakt/__init__.py b/lib/libtrakt/__init__.py deleted file mode 100644 index f3dd7b1..0000000 --- a/lib/libtrakt/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .trakt import TraktAPI -from .indexerapiinterface import TraktIndexer diff --git a/lib/libtrakt/exceptions.py b/lib/libtrakt/exceptions.py deleted file mode 100644 index 5adf1c2..0000000 --- a/lib/libtrakt/exceptions.py +++ /dev/null @@ -1,49 +0,0 @@ -class TraktException(Exception): - pass - - -class TraktAuthException(TraktException): - pass - - -class TraktServerBusy(TraktException): - pass - - -class TraktShowNotFound(TraktException): - pass - - -class TraktCloudFlareException(TraktException): - pass - - -class TraktMethodNotExisting(TraktException): - pass - - -class TraktTimeout(TraktException): - pass - - -class TraktValueError(TraktException): - pass - - -class TraktServerError(TraktException): - def __init__(self, *args, **kwargs): - self.error_code = kwargs.get('error_code') - kwargs = {} - if 0 < len(args): - args = tuple(['%s, Server Error: %s' % (args[0], self.error_code)]) - else: - args = tuple(['Server Error: %s' % self.error_code]) - super(TraktServerError, self).__init__(*args, **kwargs) - - -class TraktLockedUserAccount(TraktException): - pass - - -class TraktInvalidGrant(TraktException): - pass diff --git a/lib/libtrakt/indexerapiinterface.py b/lib/libtrakt/indexerapiinterface.py deleted file mode 100644 index b854131..0000000 --- a/lib/libtrakt/indexerapiinterface.py +++ /dev/null @@ -1,348 +0,0 @@ -import logging -import re -from .exceptions import TraktException -from exceptions_helper import ConnectionSkipException, ex -from six import iteritems -from .trakt import TraktAPI -from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound -from lib.tvinfo_base import TVInfoBase, TVINFO_TRAKT, TVINFO_TMDB, TVINFO_TVDB, TVINFO_TVRAGE, TVINFO_IMDB, \ - TVINFO_SLUG, Person, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA, TVINFO_INSTAGRAM, Character, TVInfoShow, \ - TVInfoIDs, TVINFO_TRAKT_SLUG -from sg_helpers import try_int -from lib.dateutil.parser import parser - -# noinspection PyUnreachableCode -if False: - from typing import Any, AnyStr, Dict, List, Optional, Union - from six import integer_types - -id_map = { - 'trakt': TVINFO_TRAKT, - 'slug': TVINFO_SLUG, - 'tvdb': TVINFO_TVDB, - 'imdb': TVINFO_IMDB, - 'tmdb': TVINFO_TMDB, - 'tvrage': TVINFO_TVRAGE -} - -id_map_reverse = {v: k for k, v in iteritems(id_map)} - -tz_p = parser() -log = logging.getLogger('libtrakt.api') -log.addHandler(logging.NullHandler()) - - -def _convert_imdb_id(src, s_id): - if TVINFO_IMDB == src: - try: - return try_int(re.search(r'(\d+)', s_id).group(1), s_id) - except (BaseException, Exception): - pass - return s_id - - -class TraktSearchTypes(object): - text = 1 - trakt_id = 'trakt' - trakt_slug = 'trakt_slug' - tvdb_id = 'tvdb' - imdb_id = 'imdb' - tmdb_id = 'tmdb' - tvrage_id = 'tvrage' - all = [text, trakt_id, tvdb_id, imdb_id, tmdb_id, tvrage_id, trakt_slug] - - def __init__(self): - pass - - -map_id_search = {TVINFO_TVDB: TraktSearchTypes.tvdb_id, TVINFO_IMDB: TraktSearchTypes.imdb_id, - TVINFO_TMDB: TraktSearchTypes.tmdb_id, TVINFO_TRAKT: TraktSearchTypes.trakt_id, - TVINFO_TRAKT_SLUG: TraktSearchTypes.trakt_slug} - - -class TraktResultTypes(object): - show = 'show' - episode = 'episode' - movie = 'movie' - person = 'person' - list = 'list' - all = [show, episode, movie, person, list] - - def __init__(self): - pass - - -class TraktIndexer(TVInfoBase): - supported_id_searches = [TVINFO_TVDB, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TRAKT_SLUG] - supported_person_id_searches = [TVINFO_TRAKT, TVINFO_IMDB, TVINFO_TMDB] - - # noinspection PyUnusedLocal - # noinspection PyDefaultArgument - def __init__(self, custom_ui=None, sleep_retry=None, search_type=TraktSearchTypes.text, - result_types=[TraktResultTypes.show], *args, **kwargs): - super(TraktIndexer, self).__init__(*args, **kwargs) - self.config.update({ - 'apikey': '', - 'debug_enabled': False, - 'custom_ui': custom_ui, - 'proxy': None, - 'cache_enabled': False, - 'cache_location': '', - 'valid_languages': [], - 'langabbv_to_id': {}, - 'language': 'en', - 'base_url': '', - 'search_type': search_type if search_type in TraktSearchTypes.all else TraktSearchTypes.text, - 'sleep_retry': sleep_retry, - 'result_types': result_types if isinstance(result_types, list) and all( - [x in TraktResultTypes.all for x in result_types]) else [TraktResultTypes.show], - }) - - @staticmethod - def _make_result_obj(shows, results): - if shows: - try: - for s in shows: - if s['ids']['trakt'] not in [i['ids'].trakt for i in results]: - s['id'] = s['ids']['trakt'] - s['ids'] = TVInfoIDs( - trakt=s['ids']['trakt'], tvdb=s['ids']['tvdb'], tmdb=s['ids']['tmdb'], - rage=s['ids']['tvrage'], - imdb=s['ids']['imdb'] and try_int(s['ids']['imdb'].replace('tt', ''), None)) - results.append(s) - except (BaseException, Exception) as e: - log.debug('Error creating result dict: %s' % ex(e)) - - def _search_show(self, name=None, ids=None, **kwargs): - # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] - """This searches Trakt for the series name, - If a custom_ui UI is configured, it uses this to select the correct - series. - """ - results = [] - if ids: - for t, p in iteritems(ids): - if t in self.supported_id_searches: - if t in (TVINFO_TVDB, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TRAKT_SLUG): - cache_id_key = 's-id-%s-%s' % (t, p) - is_none, shows = self._get_cache_entry(cache_id_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = self.search(p, search_type=map_id_search[t]) - except (BaseException, Exception): - continue - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - else: - show = shows - else: - continue - self._make_result_obj(show, results) - if name: - names = ([name], name)[isinstance(name, list)] - len_names = len(names) - for i, n in enumerate(names, 1): - cache_name_key = 's-name-%s' % n - is_none, shows = self._get_cache_entry(cache_name_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - all_series = self.search(n) - self._set_cache_entry(cache_name_key, all_series, expire=self.search_cache_expire) - except (BaseException, Exception): - all_series = [] - else: - all_series = shows - if not isinstance(all_series, list): - all_series = [all_series] - - if i == len_names and 0 == len(all_series) and not results: - log.debug('Series result returned zero') - raise BaseTVinfoShownotfound('Show-name search returned zero results (cannot find show on TVDB)') - - if all_series: - if None is not self.config['custom_ui']: - log.debug('Using custom UI %s' % self.config['custom_ui'].__name__) - custom_ui = self.config['custom_ui'] - ui = custom_ui(config=self.config) - self._make_result_obj(ui.select_series(all_series), results) - - else: - self._make_result_obj(all_series, results) - - seen = set() - results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] - return results - - @staticmethod - def _dict_prevent_none(d, key, default): - v = None - if isinstance(d, dict): - v = d.get(key, default) - return (v, default)[None is v] - - def search(self, series, search_type=None): - # type: (AnyStr, Union[int, AnyStr]) -> List - search_type = search_type or self.config['search_type'] - if TraktSearchTypes.trakt_slug == search_type: - url = '/shows/%s?extended=full' % series - elif TraktSearchTypes.text != search_type: - url = '/search/%s/%s?type=%s&extended=full&limit=100' % (search_type, (series, 'tt%07d' % series)[ - TraktSearchTypes.imdb_id == search_type and not str(series).startswith('tt')], - ','.join(self.config['result_types'])) - else: - url = '/search/%s?query=%s&extended=full&limit=100' % (','.join(self.config['result_types']), series) - filtered = [] - kwargs = {} - if None is not self.config['sleep_retry']: - kwargs['sleep_retry'] = self.config['sleep_retry'] - try: - from sickbeard.helpers import clean_data - resp = TraktAPI().trakt_request(url, failure_monitor=False, raise_skip_exception=False, **kwargs) - if len(resp): - if isinstance(resp, dict): - resp = [{'type': 'show', 'score': 1, 'show': resp}] - for d in resp: - if isinstance(d, dict) and 'type' in d and d['type'] in self.config['result_types']: - for k, v in iteritems(d): - d[k] = clean_data(v) - if 'show' in d and TraktResultTypes.show == d['type']: - d.update(d['show']) - del d['show'] - d['seriesname'] = self._dict_prevent_none(d, 'title', '') - d['genres_list'] = d.get('genres', []) - d['genres'] = ', '.join(['%s' % v for v in d.get('genres', []) or [] if v]) - d['firstaired'] = (d.get('first_aired') and - re.sub(r'T.*$', '', str(d.get('first_aired'))) or d.get('year')) - filtered.append(d) - except (ConnectionSkipException, TraktException) as e: - log.debug('Could not connect to Trakt service: %s' % ex(e)) - - return filtered - - @staticmethod - def _convert_person_obj(person_obj): - # type: (Dict) -> Person - try: - birthdate = person_obj['birthday'] and tz_p.parse(person_obj['birthday']).date() - except (BaseException, Exception): - birthdate = None - try: - deathdate = person_obj['death'] and tz_p.parse(person_obj['death']).date() - except (BaseException, Exception): - deathdate = None - - return Person(p_id=person_obj['ids']['trakt'], - name=person_obj['name'], - bio=person_obj['biography'], - birthdate=birthdate, - deathdate=deathdate, - homepage=person_obj['homepage'], - birthplace=person_obj['birthplace'], - social_ids={TVINFO_TWITTER: person_obj['social_ids']['twitter'], - TVINFO_FACEBOOK: person_obj['social_ids']['facebook'], - TVINFO_INSTAGRAM: person_obj['social_ids']['instagram'], - TVINFO_WIKIPEDIA: person_obj['social_ids']['wikipedia'] - }, - ids={TVINFO_TRAKT: person_obj['ids']['trakt'], TVINFO_SLUG: person_obj['ids']['slug'], - TVINFO_IMDB: - person_obj['ids']['imdb'] and - try_int(person_obj['ids']['imdb'].replace('nm', ''), None), - TVINFO_TMDB: person_obj['ids']['tmdb'], - TVINFO_TVRAGE: person_obj['ids']['tvrage']}) - - def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): - # type: (integer_types, bool, bool, Any) -> Optional[Person] - """ - get person's data for id or list of matching persons for name - - :param p_id: persons id - :param get_show_credits: get show credits (only for native id) - :param get_images: get images for person - :return: person object - """ - if not p_id: - return - - urls = [('/people/%s?extended=full' % p_id, False)] - if get_show_credits: - urls.append(('/people/%s/shows?extended=full' % p_id, True)) - - if not urls: - return - - result = None - - for url, show_credits in urls: - try: - cache_key_name = 'p-%s-%s' % (('main', 'credits')[show_credits], p_id) - is_none, resp = self._get_cache_entry(cache_key_name) - if None is resp and not is_none: - resp = TraktAPI().trakt_request(url, **kwargs) - self._set_cache_entry(cache_key_name, resp) - if resp: - if show_credits: - pc = [] - for c in resp.get('cast') or []: - show = TVInfoShow() - show.id = c['show']['ids'].get('trakt') - show.seriesname = c['show']['title'] - show.ids = TVInfoIDs(ids={id_map[src]: _convert_imdb_id(id_map[src], sid) - for src, sid in iteritems(c['show']['ids']) if src in id_map}) - show.network = c['show']['network'] - show.firstaired = c['show']['first_aired'] - show.overview = c['show']['overview'] - show.status = c['show']['status'] - show.imdb_id = c['show']['ids'].get('imdb') - show.runtime = c['show']['runtime'] - show.genre_list = c['show']['genres'] - for ch in c.get('characters') or []: - pc.append( - Character( - name=ch, regular=c.get('series_regular'), - show=show - ) - ) - result.characters = pc - else: - result = self._convert_person_obj(resp) - except ConnectionSkipException as e: - raise e - except TraktException as e: - log.debug('Could not connect to Trakt service: %s' % ex(e)) - return result - - def _search_person(self, name=None, ids=None): - # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] - urls, result, ids = [], [], ids or {} - for tv_src in self.supported_person_id_searches: - if tv_src in ids: - if TVINFO_TRAKT == tv_src: - url = '/people/%s?extended=full' % ids.get(tv_src) - elif tv_src in (TVINFO_IMDB, TVINFO_TMDB): - url = '/search/%s/%s?type=person&extended=full&limit=100' % \ - (id_map_reverse[tv_src], (ids.get(tv_src), 'nm%07d' % ids.get(tv_src))[TVINFO_IMDB == tv_src]) - else: - continue - urls.append((tv_src, ids.get(tv_src), url)) - if name: - urls.append(('text', name, '/search/person?query=%s&extended=full&limit=100' % name)) - - for src, s_id, url in urls: - try: - cache_key_name = 'p-src-%s-%s' % (src, s_id) - is_none, resp = self._get_cache_entry(cache_key_name) - if None is resp and not is_none: - resp = TraktAPI().trakt_request(url) - self._set_cache_entry(cache_key_name, resp) - if resp: - for per in (resp, [{'person': resp, 'type': 'person'}])[url.startswith('/people')]: - if 'person' != per['type']: - continue - person = per['person'] - if not any(1 for p in result if person['ids']['trakt'] == p.id): - result.append(self._convert_person_obj(person)) - except ConnectionSkipException as e: - raise e - except TraktException as e: - log.debug('Could not connect to Trakt service: %s' % ex(e)) - - return result diff --git a/lib/libtrakt/trakt.py b/lib/libtrakt/trakt.py deleted file mode 100644 index 9c29e20..0000000 --- a/lib/libtrakt/trakt.py +++ /dev/null @@ -1,381 +0,0 @@ -import requests -import certifi -import json -import sickbeard -import time -import datetime -import logging -from exceptions_helper import ex, ConnectionSkipException -from sg_helpers import get_url, try_int - -from .exceptions import * - -# noinspection PyUnreachableCode -if False: - from typing import Any, AnyStr, Dict - -log = logging.getLogger('libtrakt') -log.addHandler(logging.NullHandler()) - - -class TraktAccount(object): - max_auth_fail = 9 - - def __init__(self, account_id=None, token='', refresh_token='', auth_fail=0, last_fail=None, token_valid_date=None): - self.account_id = account_id - self._name = '' - self._slug = '' - self.token = token - self.refresh_token = refresh_token - self.auth_fail = auth_fail - self.last_fail = last_fail - self.token_valid_date = token_valid_date - - def get_name_slug(self): - try: - resp = TraktAPI().trakt_request('users/settings', send_oauth=self.account_id, sleep_retry=20) - self.reset_auth_failure() - if 'user' in resp: - self._name = resp['user']['username'] - self._slug = resp['user']['ids']['slug'] - except TraktAuthException: - self.inc_auth_failure() - self._name = '' - except TraktException: - pass - - @property - def slug(self): - if self.token and self.active: - if not self._slug: - self.get_name_slug() - else: - self._slug = '' - return self._slug - - @property - def name(self): - if self.token and self.active: - if not self._name: - self.get_name_slug() - else: - self._name = '' - - return self._name - - def reset_name(self): - self._name = '' - - @property - def active(self): - return self.auth_fail < self.max_auth_fail and self.token - - @property - def needs_refresh(self): - return not self.token_valid_date or self.token_valid_date - datetime.datetime.now() < datetime.timedelta(days=3) - - @property - def token_expired(self): - return self.token_valid_date and self.token_valid_date < datetime.datetime.now() - - def reset_auth_failure(self): - if 0 != self.auth_fail: - self.auth_fail = 0 - self.last_fail = None - - def inc_auth_failure(self): - self.auth_fail += 1 - self.last_fail = datetime.datetime.now() - - def auth_failure(self): - if self.auth_fail < self.max_auth_fail: - if self.last_fail: - time_diff = datetime.datetime.now() - self.last_fail - if 0 == self.auth_fail % 3: - if datetime.timedelta(days=1) < time_diff: - self.inc_auth_failure() - sickbeard.save_config() - elif datetime.timedelta(minutes=15) < time_diff: - self.inc_auth_failure() - if self.auth_fail == self.max_auth_fail or datetime.timedelta(hours=6) < time_diff: - sickbeard.save_config() - else: - self.inc_auth_failure() - - -class TraktAPI(object): - max_retrys = 3 - - def __init__(self, timeout=None): - - self.session = requests.Session() - self.verify = sickbeard.TRAKT_VERIFY and certifi.where() - self.timeout = timeout or sickbeard.TRAKT_TIMEOUT - self.auth_url = sickbeard.TRAKT_BASE_URL - self.api_url = sickbeard.TRAKT_BASE_URL - self.headers = {'Content-Type': 'application/json', - 'trakt-api-version': '2', - 'trakt-api-key': sickbeard.TRAKT_CLIENT_ID} - - @staticmethod - def build_config_string(data): - return '!!!'.join('%s|%s|%s|%s|%s|%s' % ( - value.account_id, value.token, value.refresh_token, value.auth_fail, - value.last_fail.strftime('%Y%m%d%H%M') if value.last_fail else '0', - value.token_valid_date.strftime('%Y%m%d%H%M%S') if value.token_valid_date else '0') - for (key, value) in data.items()) - - @staticmethod - def read_config_string(data): - return dict((int(a.split('|')[0]), TraktAccount( - int(a.split('|')[0]), a.split('|')[1], a.split('|')[2], int(a.split('|')[3]), - datetime.datetime.strptime(a.split('|')[4], '%Y%m%d%H%M') if a.split('|')[4] != '0' else None, - datetime.datetime.strptime(a.split('|')[5], '%Y%m%d%H%M%S') if a.split('|')[5] != '0' else None)) - for a in data.split('!!!') if data) - - @staticmethod - def add_account(token, refresh_token, token_valid_date): - k = max(sickbeard.TRAKT_ACCOUNTS.keys() or [0]) + 1 - sickbeard.TRAKT_ACCOUNTS[k] = TraktAccount(account_id=k, token=token, refresh_token=refresh_token, - token_valid_date=token_valid_date) - sickbeard.save_config() - return k - - @staticmethod - def replace_account(account, token, refresh_token, token_valid_date, refresh): - if account in sickbeard.TRAKT_ACCOUNTS: - sickbeard.TRAKT_ACCOUNTS[account].token = token - sickbeard.TRAKT_ACCOUNTS[account].refresh_token = refresh_token - sickbeard.TRAKT_ACCOUNTS[account].token_valid_date = token_valid_date - if not refresh: - sickbeard.TRAKT_ACCOUNTS[account].reset_name() - sickbeard.TRAKT_ACCOUNTS[account].reset_auth_failure() - sickbeard.save_config() - return True - return False - - @staticmethod - def delete_account(account): - if account in sickbeard.TRAKT_ACCOUNTS: - try: - TraktAPI().trakt_request('/oauth/revoke', send_oauth=account, method='POST') - except TraktException: - log.info('Failed to remove account from trakt.tv') - sickbeard.TRAKT_ACCOUNTS.pop(account) - sickbeard.save_config() - return True - return False - - def trakt_token(self, trakt_pin=None, refresh=False, count=0, account=None): - if self.max_retrys <= count: - return False - 0 < count and time.sleep(3) - - data = { - 'client_id': sickbeard.TRAKT_CLIENT_ID, - 'client_secret': sickbeard.TRAKT_CLIENT_SECRET, - 'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob' - } - - if refresh: - if None is not account and account in sickbeard.TRAKT_ACCOUNTS: - data['grant_type'] = 'refresh_token' - data['refresh_token'] = sickbeard.TRAKT_ACCOUNTS[account].refresh_token - else: - return False - else: - data['grant_type'] = 'authorization_code' - if trakt_pin: - data['code'] = trakt_pin - - headers = {'Content-Type': 'application/json'} - - try: - now = datetime.datetime.now() - resp = self.trakt_request('oauth/token', data=data, headers=headers, url=self.auth_url, - count=count, sleep_retry=0) - except TraktInvalidGrant: - if None is not account and account in sickbeard.TRAKT_ACCOUNTS: - sickbeard.TRAKT_ACCOUNTS[account].token = '' - sickbeard.TRAKT_ACCOUNTS[account].refresh_token = '' - sickbeard.TRAKT_ACCOUNTS[account].token_valid_date = None - sickbeard.save_config() - return False - except (TraktAuthException, TraktException): - return False - - if 'access_token' in resp and 'refresh_token' in resp and 'expires_in' in resp: - token_valid_date = now + datetime.timedelta(seconds=try_int(resp['expires_in'])) - if refresh or (not refresh and None is not account and account in sickbeard.TRAKT_ACCOUNTS): - return self.replace_account(account, resp['access_token'], resp['refresh_token'], - token_valid_date, refresh) - return self.add_account(resp['access_token'], resp['refresh_token'], token_valid_date) - - return False - - def trakt_request(self, path, data=None, headers=None, url=None, count=0, sleep_retry=60, - send_oauth=None, method=None, raise_skip_exception=True, failure_monitor=True, **kwargs): - # type: (AnyStr, Dict, Dict, AnyStr, int, int, AnyStr, AnyStr, bool, bool, Any) -> Dict - - if method not in ['GET', 'POST', 'PUT', 'DELETE', None]: - return {} - if None is method: - method = ('GET', 'POST')['data' in kwargs.keys() or None is not data] - if 'oauth/token' != path and None is send_oauth and method in ['POST', 'PUT', 'DELETE']: - return {} - - count += 1 - if count > self.max_retrys: - return {} - - # wait before retry - if 'users/settings' != path: - 1 < count and time.sleep(sleep_retry) - - headers = headers or self.headers - if None is not send_oauth and send_oauth in sickbeard.TRAKT_ACCOUNTS: - if sickbeard.TRAKT_ACCOUNTS[send_oauth].active: - if sickbeard.TRAKT_ACCOUNTS[send_oauth].needs_refresh: - self.trakt_token(refresh=True, count=0, account=send_oauth) - if sickbeard.TRAKT_ACCOUNTS[send_oauth].token_expired or \ - not sickbeard.TRAKT_ACCOUNTS[send_oauth].active: - return {} - headers['Authorization'] = 'Bearer %s' % sickbeard.TRAKT_ACCOUNTS[send_oauth].token - else: - return {} - - kwargs = dict(headers=headers, timeout=self.timeout, verify=self.verify) - if data: - kwargs['data'] = json.dumps(data) - - url = url or self.api_url - try: - resp = get_url('%s%s' % (url, path), session=self.session, use_method=method, return_response=True, - raise_exceptions=True, raise_status_code=True, raise_skip_exception=raise_skip_exception, - failure_monitor=failure_monitor, **kwargs) - # resp = self.session.request(method, '%s%s' % (url, path), **kwargs) - - if 'DELETE' == method: - result = None - if 204 == resp.status_code: - result = {'result': 'success'} - elif 404 == resp.status_code: - result = {'result': 'failed'} - if result and None is not send_oauth and send_oauth in sickbeard.TRAKT_ACCOUNTS: - sickbeard.TRAKT_ACCOUNTS[send_oauth].reset_auth_failure() - return result - resp.raise_for_status() - return {} - - # check for http errors and raise if any are present - resp.raise_for_status() - - # convert response to json - resp = resp.json() - - except requests.RequestException as e: - code = getattr(e.response, 'status_code', None) - if not code: - if 'timed out' in ex(e): - log.warning(u'Timeout connecting to Trakt') - if count >= self.max_retrys: - raise TraktTimeout() - return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, - send_oauth=send_oauth, method=method) - # This is pretty much a fatal error if there is no status_code - # It means there basically was no response at all - else: - log.warning(u'Could not connect to Trakt. Error: %s' % ex(e)) - raise TraktException('Could not connect to Trakt. Error: %s' % ex(e)) - - elif 502 == code: - # Retry the request, Cloudflare had a proxying issue - log.warning(u'Retrying Trakt api request: %s' % path) - if count >= self.max_retrys: - raise TraktCloudFlareException() - return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, - send_oauth=send_oauth, method=method) - - elif 401 == code and 'oauth/token' != path: - if None is not send_oauth: - if sickbeard.TRAKT_ACCOUNTS[send_oauth].needs_refresh: - if self.trakt_token(refresh=True, count=count, account=send_oauth): - return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, - send_oauth=send_oauth, method=method) - - log.warning(u'Unauthorized. Please check your Trakt settings') - sickbeard.TRAKT_ACCOUNTS[send_oauth].auth_failure() - raise TraktAuthException() - - # sometimes the trakt server sends invalid token error even if it isn't - sickbeard.TRAKT_ACCOUNTS[send_oauth].auth_failure() - if count >= self.max_retrys: - raise TraktAuthException() - - return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, - send_oauth=send_oauth, method=method) - - raise TraktAuthException() - elif code in (500, 501, 503, 504, 520, 521, 522): - if count >= self.max_retrys: - log.warning(u'Trakt may have some issues and it\'s unavailable. Code: %s' % code) - raise TraktServerError(error_code=code) - # http://docs.trakt.apiary.io/#introduction/status-codes - log.warning(u'Trakt may have some issues and it\'s unavailable. Trying again') - return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, - send_oauth=send_oauth, method=method) - elif 404 == code: - log.warning(u'Trakt error (404) the resource does not exist: %s%s' % (url, path)) - raise TraktMethodNotExisting('Trakt error (404) the resource does not exist: %s%s' % (url, path)) - elif 429 == code: - if count >= self.max_retrys: - log.warning(u'Trakt replied with Rate-Limiting, maximum retries exceeded.') - raise TraktServerError(error_code=code) - r_headers = getattr(e.response, 'headers', None) - if None is not r_headers: - wait_seconds = min(try_int(r_headers.get('Retry-After', 60), 60), 150) - else: - wait_seconds = 60 - log.warning('Trakt replied with Rate-Limiting, waiting %s seconds.' % wait_seconds) - wait_seconds = (wait_seconds, 60)[0 > wait_seconds] - wait_seconds -= sleep_retry - if 0 < wait_seconds: - time.sleep(wait_seconds) - return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, - send_oauth=send_oauth, method=method) - elif 423 == code: - # locked account - log.error('An application that is NOT SickGear has flooded the Trakt API and they have locked access' - ' to your account. They request you contact their support at https://support.trakt.tv/' - ' This is not a fault of SickGear because it does *not* sync data or send the type of data' - ' that triggers a Trakt access lock.' - ' SickGear may only send a notification on a media process completion if set up for it.') - raise TraktLockedUserAccount() - elif 400 == code and 'invalid_grant' in getattr(e, 'text', ''): - raise TraktInvalidGrant('Error: invalid_grant. The provided authorization grant is invalid, expired, ' - 'revoked, does not match the redirection URI used in the authorization request,' - ' or was issued to another client.') - else: - log.error(u'Could not connect to Trakt. Code error: {0}'.format(code)) - raise TraktException('Could not connect to Trakt. Code error: %s' % code) - except ConnectionSkipException as e: - log.error('Failure handling error') - raise e - except ValueError as e: - log.error(u'Value Error: %s' % ex(e)) - raise TraktValueError(u'Value Error: %s' % ex(e)) - except (BaseException, Exception) as e: - log.error('Exception: %s' % ex(e)) - raise TraktException('Could not connect to Trakt. Code error: %s' % ex(e)) - - # check and confirm Trakt call did not fail - if isinstance(resp, dict) and 'failure' == resp.get('status', None): - if 'message' in resp: - raise TraktException(resp['message']) - if 'error' in resp: - raise TraktException(resp['error']) - raise TraktException('Unknown Error') - - if None is not send_oauth and send_oauth in sickbeard.TRAKT_ACCOUNTS: - sickbeard.TRAKT_ACCOUNTS[send_oauth].reset_auth_failure() - return resp diff --git a/lib/tmdb_api/__init__.py b/lib/tmdb_api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/lib/tmdb_api/tmdb_api.py b/lib/tmdb_api/tmdb_api.py deleted file mode 100644 index ec7b78f..0000000 --- a/lib/tmdb_api/tmdb_api.py +++ /dev/null @@ -1,655 +0,0 @@ -# encoding:utf-8 -# author:Prinz23 -# project:tmdb_api - -__author__ = 'Prinz23' -__version__ = '1.0' -__api_version__ = '1.0.0' - -import json -import logging -import datetime -import re - -from six import iteritems -from sg_helpers import get_url, try_int -from lib.dateutil.parser import parser -from lib.dateutil.tz.tz import _datetime_to_timestamp -from lib.exceptions_helper import ConnectionSkipException, ex -from .tmdb_exceptions import * -from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImageType, Character, Crew, \ - crew_type_names, Person, RoleTypes, TVInfoEpisode, TVInfoIDs, TVInfoSeason, PersonGenders, TVINFO_TVMAZE, \ - TVINFO_TVDB, TVINFO_IMDB, TVINFO_TMDB, TVINFO_TWITTER, TVINFO_INSTAGRAM, TVINFO_FACEBOOK, TVInfoShow, \ - TVInfoSocialIDs, TVInfoNetwork -from lib import tmdbsimple - -# noinspection PyUnreachableCode -if False: - from typing import Any, AnyStr, Dict, List, Optional, Union - from six import integer_types - -log = logging.getLogger('tmdb.api') -log.addHandler(logging.NullHandler()) -tz_p = parser() -tmdbsimple.API_KEY = 'edc5f123313769de83a71e157758030b' - -id_map = {TVINFO_IMDB: 'imdb_id', TVINFO_TVDB: 'tvdb_id', TVINFO_FACEBOOK: 'facebook_id', TVINFO_TWITTER: 'twitter_id', - TVINFO_INSTAGRAM: 'instagram_id'} - -tv_show_map = {'name': 'seriesname', 'id': 'id', 'first_air_date': 'firstaired', 'status': 'status', - 'original_language': 'language'} - - -def tmdb_GET(self, path, params=None): - url = self._get_complete_url(path) - params = self._get_params(params) - return get_url(url=url, params=params, json=True, raise_skip_exception=True) - - -def tmdb_POST(self, path, params=None, payload=None): - url = self._get_complete_url(path) - params = self._get_params(params) - data = json.dumps(payload) if payload else payload - return get_url(url=url, params=params, post_data=data, json=True, raise_skip_exception=True) - - -tmdbsimple.base.TMDB._GET = tmdb_GET -tmdbsimple.base.TMDB._POST = tmdb_POST - -_TMDB_CONSTANTS_CACHE = {'date': datetime.datetime(2000, 1, 1), 'data': {}} - - -def get_tmdb_constants(): - # type: (...) -> Dict - """return tmdbsimple Configuration().info() or cached copy""" - global _TMDB_CONSTANTS_CACHE - # only retrieve info data if older then 3 days - if 3 < (datetime.datetime.now() - _TMDB_CONSTANTS_CACHE['date']).days or not _TMDB_CONSTANTS_CACHE['data']: - try: - tv_genres = {g['id']: g['name'] for g in tmdbsimple.Genres().tv_list()['genres']} - response = tmdbsimple.Configuration().info() - sorted_poster_sizes = sorted((try_int(_p.replace('w', '')) for _p in response['images']['poster_sizes'] - if 'original' != _p), reverse=True) - sorted_backdrop_sizes = sorted((try_int(_p.replace('w', '')) for _p in response['images']['backdrop_sizes'] - if 'original' != _p), reverse=True) - sorted_profile_sizes = sorted((try_int(_p.replace('w', '')) for _p in response['images']['profile_sizes'] - if 'original' != _p and not _p.startswith('h')), reverse=True) - _TMDB_CONSTANTS_CACHE = { - 'date': datetime.datetime.now(), - 'data': { - 'genres': tv_genres, - 'img_base_url': response['images']['secure_base_url'], - 'img_profile_sizes': response['images']['profile_sizes'], - 'poster_sizes': response['images']['poster_sizes'], - 'backdrop_sizes': response['images']['backdrop_sizes'], - 'logo_sizes': response['images']['logo_sizes'], - 'still_sizes': response['images']['still_sizes'], - 'change_keys': response['change_keys'], - 'size_map': { - TVInfoImageType.poster: { - TVInfoImageSize.original: 'original', - TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_poster_sizes if s < 400), 342), - TVInfoImageSize.small: 'w%s' % next((s for s in sorted_poster_sizes if s < 200), 185) - }, - TVInfoImageType.fanart: { - TVInfoImageSize.original: 'original', - TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 1000), 780), - TVInfoImageSize.small: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 500), 300) - }, - TVInfoImageType.person_poster: { - TVInfoImageSize.original: 'original', - TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_profile_sizes if s < 400), 185), - TVInfoImageSize.small: 'w%s' % next((s for s in sorted_profile_sizes if s < 150), 45) - } - } - } - } - except (BaseException, Exception): - poster_sizes = ['w92', 'w154', 'w185', 'w342', 'w500', 'w780', 'original'] - sorted_poster_sizes = sorted((try_int(_p.replace('w', '')) for _p in poster_sizes - if 'original' != _p), reverse=True) - backdrop_sizes = ['w300', 'w780', 'w1280', 'original'] - sorted_backdrop_sizes = sorted((try_int(_p.replace('w', '')) for _p in backdrop_sizes - if 'original' != _p), reverse=True) - profile_sizes = ['w45', 'w185', 'h632', 'original'] - sorted_profile_sizes = sorted((try_int(_p.replace('w', '')) for _p in profile_sizes - if 'original' != _p and not _p.startswith('h')), reverse=True) - _TMDB_CONSTANTS_CACHE['data'] = { - 'genres': {10759: 'Action & Adventure', 16: 'Animation', 35: 'Comedy', 80: 'Crime', 99: 'Documentary', - 18: 'Drama', 10751: 'Family', 10762: 'Kids', 9648: 'Mystery', 10763: 'News', - 10764: 'Reality', 10765: 'Sci-Fi & Fantasy', 10766: 'Soap', 10767: 'Talk', - 10768: 'War & Politics', 37: 'Western'}, - 'img_base_url': r'https://image.tmdb.org/t/p/', - 'img_profile_sizes': ['w45', 'w185', 'h632', 'original'], - 'poster_sizes': poster_sizes, - 'backdrop_sizes': backdrop_sizes, - 'logo_sizes': ['w45', 'w92', 'w154', 'w185', 'w300', 'w500', 'original'], - 'still_sizes': ['w92', 'w185', 'w300', 'original'], - 'change_keys': ['adult', 'air_date', 'also_known_as', 'alternative_titles', 'biography', 'birthday', - 'budget', 'cast', 'certifications', 'character_names', 'created_by', 'crew', 'deathday', - 'episode', 'episode_number', 'episode_run_time', 'freebase_id', 'freebase_mid', - 'general', 'genres', 'guest_stars', 'homepage', 'images', 'imdb_id', 'languages', - 'name', 'network', 'origin_country', 'original_name', 'original_title', 'overview', - 'parts', 'place_of_birth', 'plot_keywords', 'production_code', 'production_companies', - 'production_countries', 'releases', 'revenue', 'runtime', 'season', 'season_number', - 'season_regular', 'spoken_languages', 'status', 'tagline', 'title', 'translations', - 'tvdb_id', 'tvrage_id', 'type', 'video', 'videos'], - 'size_map': { - TVInfoImageType.poster: { - TVInfoImageSize.original: 'original', - TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_poster_sizes if s < 400), 342), - TVInfoImageSize.small: 'w%s' % next((s for s in sorted_poster_sizes if s < 200), 185) - }, - TVInfoImageType.fanart: { - TVInfoImageSize.original: 'original', - TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 1000), 780), - TVInfoImageSize.small: 'w%s' % next((s for s in sorted_backdrop_sizes if s < 500), 300) - }, - TVInfoImageType.person_poster: { - TVInfoImageSize.original: 'original', - TVInfoImageSize.medium: 'w%s' % next((s for s in sorted_profile_sizes if s < 400), 185), - TVInfoImageSize.small: 'w%s' % next((s for s in sorted_profile_sizes if s < 150), 45) - } - } - } - pass - return _TMDB_CONSTANTS_CACHE['data'] - - -class TmdbIndexer(TVInfoBase): - API_KEY = tmdbsimple.API_KEY - supported_person_id_searches = [TVINFO_TMDB, TVINFO_IMDB, TVINFO_TWITTER, TVINFO_INSTAGRAM, TVINFO_FACEBOOK] - supported_id_searches = [TVINFO_TMDB, TVINFO_IMDB, TVINFO_TVDB] - - # noinspection PyUnusedLocal - # noinspection PyDefaultArgument - def __init__(self, *args, **kwargs): - super(TmdbIndexer, self).__init__(*args, **kwargs) - response = get_tmdb_constants() - self.img_base_url = response.get('img_base_url') - self.size_map = response.get('size_map') - self.tv_genres = response.get('genres') - - def _search_show(self, name=None, ids=None, **kwargs): - # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] - """This searches TMDB for the series name, - """ - def _make_result_dict(s): - tvs = TVInfoShow() - tvs.seriesname, tvs.id, tvs.firstaired, tvs.genre_list, tvs.overview, tvs.poster, tvs.ids = \ - s['name'], s['id'], s.get('first_air_date'), \ - [self.tv_genres.get(g) for g in s.get('genre_ids') or []], \ - s.get('overview'), s.get('poster_path') and '%s%s%s' % ( - self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original], - s.get('poster_path')), \ - TVInfoIDs(tvdb=s.get('external_ids') and s['external_ids'].get('tvdb_id'), - tmdb=s['id'], rage=s.get('external_ids') and s['external_ids'].get('tvrage_id'), - imdb=s.get('external_ids') and s['external_ids'].get('imdb_id') and - try_int(s['external_ids'].get('imdb_id', '').replace('tt', ''), None)) - return tvs - - results = [] - if ids: - for t, p in iteritems(ids): - if t in self.supported_id_searches: - if t == TVINFO_TMDB: - cache_id_key = 's-id-%s-%s' % (TVINFO_TMDB, p) - is_none, shows = self._get_cache_entry(cache_id_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = tmdbsimple.TV(id=p).info(append_to_response='external_ids') - except (BaseException, Exception): - continue - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - else: - show = shows - if show: - results.extend([_make_result_dict(show)]) - elif t in (TVINFO_IMDB, TVINFO_TVDB): - cache_id_key = 's-id-%s-%s' % (t, p) - is_none, shows = self._get_cache_entry(cache_id_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = tmdbsimple.Find(id=(p, 'tt%07d' % p)[t == TVINFO_IMDB]).info( - external_source=id_map[t]) - if show.get('tv_results') and 1 == len(show['tv_results']): - show = tmdbsimple.TV(id=show['tv_results'][0]['id']).info( - append_to_response='external_ids') - except (BaseException, Exception): - continue - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - else: - show = shows - if show: - results.extend([_make_result_dict(s) - for s in show.get('tv_results') or (show.get('id') and [show]) or []]) - if name: - for n in ([name], name)[isinstance(name, list)]: - cache_name_key = 's-name-%s' % n - is_none, shows = self._get_cache_entry(cache_name_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - shows = tmdbsimple.Search().tv(query=n) - self._set_cache_entry(cache_name_key, shows, expire=self.search_cache_expire) - results.extend([_make_result_dict(s) for s in shows.get('results') or []]) - except (BaseException, Exception) as e: - log.debug('Error searching for show: %s' % ex(e)) - else: - results.extend([_make_result_dict(s) for s in (shows and shows.get('results')) or []]) - seen = set() - results = [seen.add(r.id) or r for r in results if r.id not in seen] - return results - - def _convert_person_obj(self, person_obj): - gender = PersonGenders.tmdb_map.get(person_obj.get('gender'), PersonGenders.unknown) - try: - birthdate = person_obj.get('birthday') and tz_p.parse(person_obj.get('birthday')).date() - except (BaseException, Exception): - birthdate = None - try: - deathdate = person_obj.get('deathday') and tz_p.parse(person_obj.get('deathday')).date() - except (BaseException, Exception): - deathdate = None - - cast = person_obj.get('cast') or person_obj.get('tv_credits', {}).get('cast') - - characters = [] - for character in cast or []: - show = TVInfoShow() - show.id = character.get('id') - show.ids = TVInfoIDs(ids={TVINFO_TMDB: show.id}) - show.seriesname = character.get('original_name') - show.overview = character.get('overview') - show.firstaired = character.get('first_air_date') - characters.append( - Character(name=character.get('character'), show=show) - ) - - pi = person_obj.get('images') - image_url, main_image, thumb_url, main_thumb, image_list = None, None, None, None, [] - if pi: - for i in sorted(pi['profiles'], key=lambda a: a['vote_average'] or 0, reverse=True): - if not any((main_image, main_thumb)): - if 500 < i['height'] and not image_url: - image_url = '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original], - i['file_path']) - thumb_url = '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.medium], - i['file_path']) - elif not thumb_url: - thumb_url = '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original], - i['file_path']) - if image_url and thumb_url: - main_image_url, main_thumb = image_url, thumb_url - image_list.append( - TVInfoImage( - image_type=TVInfoImageType.person_poster, - sizes={_s: '%s%s%s' % (self.img_base_url, - self.size_map[TVInfoImageType.person_poster][_s], i['file_path']) - for _s in (TVInfoImageSize.original, TVInfoImageSize.medium, TVInfoImageSize.small)}, - aspect_ratio=i['aspect_ratio'], - height=i['height'], - width=i['width'], - lang=i['iso_639_1'], - rating=i['vote_average'], - votes=i['vote_count'] - ) - ) - - return Person(p_id=person_obj.get('id'), gender=gender, name=person_obj.get('name'), birthdate=birthdate, - deathdate=deathdate, bio=person_obj.get('biography'), birthplace=person_obj.get('place_of_birth'), - homepage=person_obj.get('homepage'), characters=characters, image=main_image, - thumb_url=main_thumb, images=image_list, akas=set(person_obj.get('also_known_as') or []), - ids={TVINFO_TMDB: person_obj.get('id'), - TVINFO_IMDB: - person_obj.get('imdb_id') and try_int(person_obj['imdb_id'].replace('nm', ''), None)}) - - def _search_person(self, name=None, ids=None): - # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] - """ - search for person by name - :param name: name to search for - :param ids: dict of ids to search - :return: list of found person's - """ - results, ids = [], ids or {} - search_text_obj = tmdbsimple.Search() - for tv_src in self.supported_person_id_searches: - if tv_src in ids: - if TVINFO_TMDB == tv_src: - try: - people_obj = self.get_person(ids[tv_src]) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - people_obj = None - if people_obj and not any(1 for r in results if r.id == people_obj.id): - results.append(people_obj) - elif tv_src in (TVINFO_IMDB, TVINFO_TMDB): - try: - cache_key_name = 'p-src-%s-%s' % (tv_src, ids.get(tv_src)) - is_none, result_objs = self._get_cache_entry(cache_key_name) - if None is result_objs and not is_none: - result_objs = tmdbsimple.Find(id=(ids.get(tv_src), - 'nm%07d' % ids.get(tv_src))[TVINFO_IMDB == tv_src]).info( - external_source=id_map[tv_src]).get('person_results') - self._set_cache_entry(cache_key_name, result_objs) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - result_objs = None - if result_objs: - for person_obj in result_objs: - if not any(1 for r in results if r.id == person_obj['id']): - results.append(self._convert_person_obj(person_obj)) - else: - continue - if name: - cache_key_name = 'p-src-text-%s' % name - is_none, people_objs = self._get_cache_entry(cache_key_name) - if None is people_objs and not is_none: - try: - people_objs = search_text_obj.person(query=name, include_adult=True) - self._set_cache_entry(cache_key_name, people_objs) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - people_objs = None - if people_objs and people_objs.get('results'): - for person_obj in people_objs['results']: - if not any(1 for r in results if r.id == person_obj['id']): - results.append(self._convert_person_obj(person_obj)) - - return results - - def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): - # type: (integer_types, bool, bool, Any) -> Optional[Person] - kw = {} - to_append = [] - if get_show_credits: - to_append.append('tv_credits') - if get_images: - to_append.append('images') - if to_append: - kw['append_to_response'] = ','.join(to_append) - - cache_key_name = 'p-%s-%s' % (p_id, '-'.join(to_append)) - is_none, people_obj = self._get_cache_entry(cache_key_name) - if None is people_obj and not is_none: - try: - people_obj = tmdbsimple.People(id=p_id).info(**kw) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - people_obj = None - self._set_cache_entry(cache_key_name, people_obj) - - if people_obj: - return self._convert_person_obj(people_obj) - - def _convert_show(self, show_dict): - # type: (Dict) -> TVInfoShow - tv_s = TVInfoShow() - if show_dict: - tv_s.seriesname = show_dict.get('name') or show_dict.get('original_name') or show_dict.get('original_title') - org_title = show_dict.get('original_name') or show_dict.get('original_title') - if org_title != tv_s.seriesname: - tv_s.aliases = [org_title] - tv_s.id = show_dict.get('id') - tv_s.seriesid = tv_s.id - tv_s.language = show_dict.get('original_language') - tv_s.overview = show_dict.get('overview') - tv_s.firstaired = show_dict.get('first_air_date') - tv_s.vote_count = show_dict.get('vote_count') - tv_s.vote_average = show_dict.get('vote_average') - tv_s.popularity = show_dict.get('popularity') - tv_s.origin_countries = show_dict.get('origin_country') or [] - tv_s.genre_list = [] - for g in show_dict.get('genre_ids') or []: - if g in self.tv_genres: - tv_s.genre_list.append(self.tv_genres.get(g)) - tv_s.genre = ', '.join(tv_s.genre_list) - image_url = show_dict.get('poster_path') and '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original], - show_dict.get('poster_path')) - thumb_image_url = show_dict.get('poster_path') and '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.small], - show_dict.get('poster_path')) - backdrop_url = show_dict.get('backdrop_path') and '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.fanart][TVInfoImageSize.original], - show_dict.get('backdrop_path')) - tv_s.poster = image_url - tv_s.poster_thumb = thumb_image_url - tv_s.fanart = backdrop_url - tv_s.ids = TVInfoIDs(tmdb=tv_s.id) - return tv_s - - def _get_show_list(self, src_method, result_count, **kwargs): - result = [] - try: - c_page = 1 - while len(result) < result_count: - results = src_method(page=c_page, **kwargs) - t_pages = results.get('total_pages') - if c_page != results.get('page') or c_page >= t_pages: - break - c_page += 1 - if results and 'results' in results: - result += [self._convert_show(t) for t in results['results']] - else: - break - except (BaseException, Exception): - pass - return result[:result_count] - - def get_trending(self, result_count=100, time_window='day', **kwargs): - """ - list of trending tv shows for day or week - :param result_count: - :param time_window: valid values: 'day', 'week' - """ - t_windows = ('day', 'week')['week' == time_window] - return self._get_show_list(tmdbsimple.Trending(media_type='tv', time_window=t_windows).info, result_count) - - def get_popular(self, result_count=100, **kwargs): - return self._get_show_list(tmdbsimple.TV().popular, result_count) - - def get_top_rated(self, result_count=100, **kwargs): - return self._get_show_list(tmdbsimple.TV().top_rated, result_count) - - def discover(self, result_count=100, **kwargs): - """ - Discover TV shows by different types of data like average rating, - number of votes, genres, the network they aired on and air dates. - - Discover also supports a nice list of sort options. See below for all - of the available options. - - Also note that a number of filters support being comma (,) or pipe (|) - separated. Comma's are treated like an AND and query while pipe's are - an OR. - - Some examples of what can be done with discover can be found at - https://www.themoviedb.org/documentation/api/discover. - - kwargs: - language: (optional) ISO 639-1 code. - sort_by: (optional) Available options are 'vote_average.desc', - 'vote_average.asc', 'first_air_date.desc', - 'first_air_date.asc', 'popularity.desc', 'popularity.asc' - sort_by: (optional) Allowed values: vote_average.desc, - vote_average.asc, first_air_date.desc, first_air_date.asc, - popularity.desc, popularity.asc - Default: popularity.desc - air_date.gte: (optional) Filter and only include TV shows that have - a air date (by looking at all episodes) that is greater or - equal to the specified value. - air_date.lte: (optional) Filter and only include TV shows that have - a air date (by looking at all episodes) that is less than or - equal to the specified value. - first_air_date.gte: (optional) Filter and only include TV shows - that have a original air date that is greater or equal to the - specified value. Can be used in conjunction with the - "include_null_first_air_dates" filter if you want to include - items with no air date. - first_air_date.lte: (optional) Filter and only include TV shows - that have a original air date that is less than or equal to the - specified value. Can be used in conjunction with the - "include_null_first_air_dates" filter if you want to include - items with no air date. - first_air_date_year: (optional) Filter and only include TV shows - that have a original air date year that equal to the specified - value. Can be used in conjunction with the - "include_null_first_air_dates" filter if you want to include - items with no air date. - timezone: (optional) Used in conjunction with the air_date.gte/lte - filter to calculate the proper UTC offset. Default - America/New_York. - vote_average.gte: (optional) Filter and only include movies that - have a rating that is greater or equal to the specified value. - Minimum 0. - vote_count.gte: (optional) Filter and only include movies that have - a rating that is less than or equal to the specified value. - Minimum 0. - with_genres: (optional) Comma separated value of genre ids that you - want to include in the results. - with_networks: (optional) Comma separated value of network ids that - you want to include in the results. - without_genres: (optional) Comma separated value of genre ids that - you want to exclude from the results. - with_runtime.gte: (optional) Filter and only include TV shows with - an episode runtime that is greater than or equal to a value. - with_runtime.lte: (optional) Filter and only include TV shows with - an episode runtime that is less than or equal to a value. - include_null_first_air_dates: (optional) Use this filter to include - TV shows that don't have an air date while using any of the - "first_air_date" filters. - with_original_language: (optional) Specify an ISO 639-1 string to - filter results by their original language value. - without_keywords: (optional) Exclude items with certain keywords. - You can comma and pipe seperate these values to create an 'AND' - or 'OR' logic. - screened_theatrically: (optional) Filter results to include items - that have been screened theatrically. - with_companies: (optional) A comma separated list of production - company ID's. Only include movies that have one of the ID's - added as a production company. - with_keywords: (optional) A comma separated list of keyword ID's. - Only includes TV shows that have one of the ID's added as a - keyword. - - :param result_count: - """ - return self._get_show_list(tmdbsimple.Discover().tv, result_count, **kwargs) - - def _get_show_data(self, sid, language, get_ep_info=False, banners=False, posters=False, seasons=False, - seasonwides=False, fanart=False, actors=False, **kwargs): - # type: (integer_types, AnyStr, bool, bool, bool, bool, bool, bool, bool, Optional[Any]) -> bool - # note: this is only working for images fetching currently - self.show_not_found = False - to_append = ['external_ids', 'alternative_titles', 'content_ratings'] - if any((banners, posters, seasons, seasonwides, fanart)): - to_append.append('images') - if actors: - to_append.append('aggregate_credits') - if get_ep_info: - to_append.append('episode_groups') - try: - tmdb = tmdbsimple.TV(sid) - show_data = tmdb.info(append_to_response=','.join(to_append)) - except (BaseException, Exception): - self.show_not_found = True - return False - - if not show_data: - self.show_not_found = True - return False - - self._set_show_data(sid, 'seriesid', show_data['id']) - - runtime = None - for r in sorted(show_data['episode_run_time'], reverse=True): - if 40 < r < 50: - runtime = r - break - if 20 < r < 40: - runtime = r - break - if not runtime and show_data['episode_run_time']: - runtime = max(show_data['episode_run_time'] or [0]) or None - self._set_show_data(sid, 'runtime', runtime) - - image_url = show_data.get('poster_path') and '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original], - show_data.get('poster_path')) - if image_url: - self._set_show_data(sid, 'poster', image_url) - thumb_image_url = show_data.get('poster_path') and '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.small], - show_data.get('poster_path')) - self._set_show_data(sid, 'poster_thumb', thumb_image_url) - - backdrop_url = show_data.get('backdrop_path') and '%s%s%s' % \ - (self.img_base_url, self.size_map[TVInfoImageType.fanart][TVInfoImageSize.original], - show_data.get('backdrop_path')) - if backdrop_url: - self._set_show_data(sid, 'fanart', backdrop_url) - - self.shows[sid].genre_list = [] - for g in show_data.get('genre_ids') or []: - if g in self.tv_genres: - self.shows[sid].genre_list.append(self.tv_genres.get(g)) - self._set_show_data(sid, 'genre', ', '.join(self.shows[sid].genre_list)) - - self.shows[sid].networks = [ - TVInfoNetwork(name=n.get('name'), n_id=n.get('id'), country_code=n.get('origin_country')) - for n in show_data['networks'] or [] - ] - - if show_data['networks']: - self.shows[sid].network = show_data['networks'][0]['name'] - self.shows[sid].network_id = show_data['networks'][0].get('id') - self.shows[sid].network_country_code = show_data['networks'][0].get('origin_country') - - for k, v in iteritems(show_data): - if k in tv_show_map: - self._set_show_data(sid, tv_show_map.get(k, k), v) - - self._set_show_data(sid, 'ids', - TVInfoIDs( - tvdb=show_data['external_ids'].get('tvdb_id'), - tmdb=show_data['id'], - rage=show_data['external_ids'].get('tvrage_id'), - imdb=show_data['external_ids'].get('imdb_id') - and try_int(show_data['external_ids'].get('imdb_id', '').replace('tt', ''), None))) - self._set_show_data(sid, 'social_ids', - TVInfoSocialIDs(twitter=show_data['external_ids'].get('twitter_id'), - instagram=show_data['external_ids'].get('instagram_id'), - facebook=show_data['external_ids'].get('facebook_id'))) - if 'images' in show_data: - show_obj = self.shows[sid] # type: TVInfoShow - show_obj.poster_loaded = True - show_obj.banner_loaded = True - show_obj.fanart_loaded = True - for img_type, img_list in iteritems(show_data['images']): - img_type = {'backdrops': TVInfoImageType.fanart, 'posters': TVInfoImageType.poster}.get(img_type) - for img in img_list: - show_obj.images.setdefault(img_type, []).append( - TVInfoImage( - image_type=img_type, - sizes={ - t_s: '%s%s%s' % (self.img_base_url, self.size_map[img_type][t_s], img['file_path']) - for t_s in [TVInfoImageSize.original, TVInfoImageSize.medium, TVInfoImageSize.small] - }, - rating=img['vote_average'], - votes=img['vote_count'], - lang=img['iso_639_1'], - height=img['height'], - width=img['width'], - aspect_ratio=img['aspect_ratio'] - ) - ) - - return True diff --git a/lib/tmdb_api/tmdb_exceptions.py b/lib/tmdb_api/tmdb_exceptions.py deleted file mode 100644 index 773a8b5..0000000 --- a/lib/tmdb_api/tmdb_exceptions.py +++ /dev/null @@ -1,62 +0,0 @@ -# encoding:utf-8 - -"""Custom exceptions used or raised by tmdb_api -""" - -__author__ = 'Prinz23' -__version__ = '1.0' - -__all__ = ['TmdbException', 'TmdbError', 'TmdbUserabort', 'TmdbShownotfound', - 'TmdbSeasonnotfound', 'TmdbEpisodenotfound', 'TmdbAttributenotfound', 'TmdbTokenexpired'] - -from lib.tvinfo_base.exceptions import * - - -class TmdbException(BaseTVinfoException): - """Any exception generated by tvdb_api - """ - pass - - -class TmdbError(BaseTVinfoError, TmdbException): - """An error with thetvdb.com (Cannot connect, for example) - """ - pass - - -class TmdbUserabort(BaseTVinfoUserabort, TmdbError): - """User aborted the interactive selection (via - the q command, ^c etc) - """ - pass - - -class TmdbShownotfound(BaseTVinfoShownotfound, TmdbError): - """Show cannot be found on thetvdb.com (non-existant show) - """ - pass - - -class TmdbSeasonnotfound(BaseTVinfoSeasonnotfound, TmdbError): - """Season cannot be found on thetvdb.com - """ - pass - - -class TmdbEpisodenotfound(BaseTVinfoEpisodenotfound, TmdbError): - """Episode cannot be found on thetvdb.com - """ - pass - - -class TmdbAttributenotfound(BaseTVinfoAttributenotfound, TmdbError): - """Raised if an episode does not have the requested - attribute (such as a episode name) - """ - pass - - -class TmdbTokenexpired(BaseTVinfoAuthenticationerror, TmdbError): - """token expired or missing thetvdb.com - """ - pass diff --git a/lib/tvdb_api/UNLICENSE b/lib/tvdb_api/UNLICENSE deleted file mode 100644 index c4205d4..0000000 --- a/lib/tvdb_api/UNLICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright 2011-2012 Ben Dickson (dbr) - -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/lib/tvdb_api/__init__.py b/lib/tvdb_api/__init__.py deleted file mode 100644 index 8b13789..0000000 --- a/lib/tvdb_api/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/lib/tvdb_api/tvdb_api.py b/lib/tvdb_api/tvdb_api.py deleted file mode 100644 index 3c73441..0000000 --- a/lib/tvdb_api/tvdb_api.py +++ /dev/null @@ -1,1262 +0,0 @@ -# !/usr/bin/env python2 -# encoding:utf-8 -# author:dbr/Ben -# project:tvdb_api -# repository:http://github.com/dbr/tvdb_api -# license:un license (http://unlicense.org/) - -from __future__ import absolute_import -from functools import wraps - -__author__ = 'dbr/Ben' -__version__ = '2.0' -__api_version__ = '3.0.0' - -import copy -import datetime -import getpass -import logging -import os -import random -import re -import requests -import requests.exceptions -import tempfile -import time -import warnings - -from bs4_parser import BS4Parser -from collections import OrderedDict -from sg_helpers import clean_data, get_url, try_int -from sickbeard import ENV - -from lib.cachecontrol import CacheControl, caches -from lib.dateutil.parser import parse -from lib.exceptions_helper import ConnectionSkipException -from lib.tvinfo_base import CastList, Character, CrewList, Person, RoleTypes, \ - TVINFO_TVDB, TVINFO_TVDB_SLUG, TVInfoBase, TVInfoIDs - -from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired -from .tvdb_ui import BaseUI, ConsoleUI - -from _23 import filter_list, list_keys, list_values, map_list -from six import integer_types, iteritems, PY2, string_types - -# noinspection PyUnreachableCode -if False: - # noinspection PyUnresolvedReferences - from typing import Any, AnyStr, Dict, List, Optional, Union - from lib.tvinfo_base import TVInfoShow - - -THETVDB_V2_API_TOKEN = {'token': None, 'datetime': datetime.datetime.fromordinal(1)} -log = logging.getLogger('tvdb.api') -log.addHandler(logging.NullHandler()) - - -# noinspection PyUnusedLocal -def _record_hook(r, *args, **kwargs): - r.hook_called = True - if 301 == r.status_code and isinstance(r.headers.get('Location'), string_types) \ - and r.headers.get('Location').startswith('http://api.thetvdb.com/'): - r.headers['Location'] = r.headers['Location'].replace('http://', 'https://') - return r - - -def retry(exception_to_check, tries=4, delay=3, backoff=2): - """Retry calling the decorated function using an exponential backoff. - - http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ - original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry - - :param exception_to_check: the exception to check. may be a tuple of - exceptions to check - :type exception_to_check: Exception or tuple - :param tries: number of times to try (not retry) before giving up - :type tries: int - :param delay: initial delay between retries in seconds - :type delay: int - :param backoff: backoff multiplier e.g. value of 2 will double the delay - each retry - :type backoff: int - """ - - def deco_retry(f): - - @wraps(f) - def f_retry(*args, **kwargs): - mtries, mdelay = tries, delay - auth_error = 0 - while 1 < mtries: - try: - return f(*args, **kwargs) - except exception_to_check as e: - msg = '%s, Retrying in %d seconds...' % (str(e), mdelay) - log.warning(msg) - time.sleep(mdelay) - if isinstance(e, TvdbTokenexpired) and not auth_error: - auth_error += 1 - else: - mtries -= 1 - mdelay *= backoff - except ConnectionSkipException as e: - raise e - try: - return f(*args, **kwargs) - except TvdbTokenexpired: - if not auth_error: - return f(*args, **kwargs) - raise TvdbTokenexpired - except ConnectionSkipException as e: - raise e - - return f_retry # true decorator - - return deco_retry - - -class Actors(list): - """Holds all Actor instances for a show - """ - pass - - -class Actor(dict): - """Represents a single actor. Should contain.. - - id, - image, - name, - role, - sortorder - """ - - def __repr__(self): - return '' % self.get('name') - - -class Tvdb(TVInfoBase): - """Create easy-to-use interface to name of season/episode name - >> t = Tvdb() - >> t['Scrubs'][1][24]['episodename'] - u'My Last Day' - """ - supported_id_searches = [TVINFO_TVDB, TVINFO_TVDB_SLUG] - - # noinspection PyUnusedLocal - def __init__(self, - interactive=False, - select_first=False, - debug=False, - cache=True, - banners=False, - fanart=False, - posters=False, - seasons=False, - seasonwides=False, - actors=False, - custom_ui=None, - language=None, - search_all_languages=False, - apikey=None, - dvdorder=False, - proxy=None, - *args, - **kwargs): - - """interactive (True/False): - When True, uses built-in console UI is used to select the correct show. - When False, the first search result is used. - - select_first (True/False): - Automatically selects the first series search result (rather - than showing the user a list of more than one series). - Is overridden by interactive = False, or specifying a custom_ui - - debug (True/False) DEPRECATED: - Replaced with proper use of logging module. To show debug messages: - - >> import logging - >> logging.basicConfig(level = logging.DEBUG) - - cache (True/False/str/unicode/urllib2 opener): - Retrieved XML are persisted to to disc. If true, stores in - tvdb_api folder under your systems TEMP_DIR, if set to - str/unicode instance it will use this as the cache - location. If False, disables caching. Can also be passed - an arbitrary Python object, which is used as a urllib2 - opener, which should be created by urllib2.build_opener - - banners (True/False): - Retrieves the banners for a show. These are accessed - via the banners key of a Show(), for example: - - >> Tvdb(banners=True)['scrubs']['banners'].keys() - ['fanart', 'poster', 'series', 'season'] - - actors (True/False): - Retrieves a list of the actors for a show. These are accessed - via the actors key of a Show(), for example: - - >> t = Tvdb(actors=True) - >> t['scrubs']['actors'][0]['name'] - u'Zach Braff' - - custom_ui (tvdb_ui.BaseUI subclass): - A callable subclass of tvdb_ui.BaseUI (overrides interactive option) - - language (2 character language abbreviation): - The language of the returned data. Is also the language search - uses. Default is "en" (English). For full list, run.. - - >> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS - ['da', 'fi', 'nl', ...] - - search_all_languages (True/False): - By default, Tvdb will only search in the language specified using - the language option. When this is True, it will search for the - show in and language - - apikey (str/unicode): - Override the default thetvdb.com API key. By default it will use - tvdb_api's own key (fine for small scripts), but you can use your - own key if desired - this is recommended if you are embedding - tvdb_api in a larger application) - See http://thetvdb.com/?tab=apiregister to get your own key - - """ - - super(Tvdb, self).__init__(*args, **kwargs) - self.config = {} - - if None is not apikey: - self.config['apikey'] = apikey - else: - self.config['apikey'] = '0629B785CE550C8D' # tvdb_api's API key - - self.config['debug_enabled'] = debug # show debugging messages - - self.config['custom_ui'] = custom_ui - - self.config['interactive'] = interactive # prompt for correct series? - - self.config['select_first'] = select_first - - self.config['search_all_languages'] = search_all_languages - - self.config['dvdorder'] = dvdorder - - self.config['proxy'] = proxy - - if cache is True: - self.config['cache_enabled'] = True - self.config['cache_location'] = self._get_temp_dir() - elif cache is False: - self.config['cache_enabled'] = False - elif isinstance(cache, string_types): - self.config['cache_enabled'] = True - self.config['cache_location'] = cache - else: - raise ValueError('Invalid value for Cache %r (type was %s)' % (cache, type(cache))) - - self.config['banners_enabled'] = banners - self.config['posters_enabled'] = posters - self.config['seasons_enabled'] = seasons - self.config['seasonwides_enabled'] = seasonwides - self.config['fanart_enabled'] = fanart - self.config['actors_enabled'] = actors - - if self.config['debug_enabled']: - warnings.warn('The debug argument to tvdb_api.__init__ will be removed in the next version. ' + - 'To enable debug messages, use the following code before importing: ' + - 'import logging; logging.basicConfig(level=logging.DEBUG)') - logging.basicConfig(level=logging.DEBUG) - - # List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml - # Hard-coded here as it is relatively static, and saves another HTTP request, as - # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml - self.config['valid_languages'] = [ - 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', - 'he', 'hr', 'hu', 'it', 'ja', 'ko', 'nl', 'no', - 'pl', 'pt', 'ru', 'sl', 'sv', 'tr', 'zh' - ] - - # not mapped: el, sl, tr. added as guess: fin, pol. unknown: _1 - self.config['langabbv_23'] = { - 'cs': 'ces', 'da': 'dan', 'de': 'deu', 'en': 'eng', 'es': 'spa', 'fi': 'fin', 'fr': 'fra', - 'he': 'heb', 'hr': 'hrv', 'hu': 'hun', 'it': 'ita', 'ja': 'jpn', 'ko': 'kor', 'nb': 'nor', - 'nl': 'nld', 'no': 'nor', - 'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp', - } - self.config['valid_languages_3'] = list_values(self.config['langabbv_23']) - - # TheTvdb.com should be based around numeric language codes, - # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16 - # requires the language ID, thus this mapping is required (mainly - # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations) - self.config['langabbv_to_id'] = { - 'cs': 28, 'da': 10, 'de': 14, 'el': 20, 'en': 7, 'es': 16, 'fi': 11, 'fr': 17, - 'he': 24, 'hr': 31, 'hu': 19, 'it': 15, 'ja': 25, 'ko': 32, 'nl': 13, 'no': 9, - 'pl': 18, 'pt': 26, 'ru': 22, 'sl': 30, 'sv': 8, 'tr': 21, 'zh': 27 - } - - if not language: - self.config['language'] = 'en' - else: - if language not in self.config['valid_languages']: - raise ValueError('Invalid language %s, options are: %s' % (language, self.config['valid_languages'])) - else: - self.config['language'] = language - - # The following url_ configs are based of the - # http://thetvdb.com/wiki/index.php/Programmers_API - self.config['base_url'] = 'https://thetvdb.com/' - self.config['api3_url'] = 'https://api.thetvdb.com/' - - self.config['url_search_series'] = '%(api3_url)ssearch/series' % self.config - self.config['params_search_series'] = {'name': ''} - - self.config['url_series_episodes_info'] = '%(api3_url)sseries/%%s/episodes?page=%%s' % self.config - - self.config['url_series_info'] = '%(api3_url)sseries/%%s' % self.config - self.config['url_episodes_info'] = '%(api3_url)sepisodes/%%s' % self.config - self.config['url_actors_info'] = '%(api3_url)sseries/%%s/actors' % self.config - - self.config['url_series_images'] = '%(api3_url)sseries/%%s/images/query?keyType=%%s' % self.config - self.config['url_artworks'] = 'https://artworks.thetvdb.com/banners/%s' - - self.config['url_people'] = '%(base_url)speople/%%s' % self.config - self.config['url_series_people'] = '%(base_url)sseries/%%s/people' % self.config - self.config['url_series_all'] = '%(base_url)sseries/%%s/allseasons/official' % self.config - self.config['url_series_dvd'] = '%(base_url)sseries/%%s/allseasons/dvd' % self.config - self.config['url_series_abs'] = '%(base_url)sseries/%%s/seasons/absolute/1' % self.config - - def _search_show(self, name=None, ids=None, **kwargs): - # type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow] - def map_data(data): - data['poster'] = data.get('image') - data['ids'] = TVInfoIDs( - tvdb=data.get('id'), - imdb=data.get('imdb_id') and try_int(data.get('imdb_id', '').replace('tt', ''), None)) - return data - - results = [] - if ids: - if ids.get(TVINFO_TVDB): - cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB]) - is_none, shows = self._get_cache_entry(cache_id_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - d_m = self._get_show_data(ids.get(TVINFO_TVDB), self.config['language'], direct_data=True) - self._set_cache_entry(cache_id_key, d_m, expire=self.search_cache_expire) - except (BaseException, Exception): - d_m = None - else: - d_m = shows - if d_m: - results = map_list(map_data, [d_m['data']]) - if ids.get(TVINFO_TVDB_SLUG): - cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG]) - is_none, shows = self._get_cache_entry(cache_id_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - d_m = self.get_series(ids.get(TVINFO_TVDB_SLUG).replace('-', ' ')) - self._set_cache_entry(cache_id_key, d_m, expire=self.search_cache_expire) - except (BaseException, Exception): - d_m = None - else: - d_m = shows - if d_m: - for r in d_m: - if ids.get(TVINFO_TVDB_SLUG) == r['slug']: - results = map_list(map_data, [r]) - break - if name: - for n in ([name], name)[isinstance(name, list)]: - cache_name_key = 's-name-%s' % n - is_none, shows = self._get_cache_entry(cache_name_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - r = self.get_series(n) - self._set_cache_entry(cache_name_key, r, expire=self.search_cache_expire) - except (BaseException, Exception): - r = None - else: - r = shows - if r: - results.extend(map_list(map_data, r)) - - seen = set() - results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] - return results - - def get_new_token(self): - global THETVDB_V2_API_TOKEN - token = THETVDB_V2_API_TOKEN.get('token', None) - dt = THETVDB_V2_API_TOKEN.get('datetime', datetime.datetime.fromordinal(1)) - url = '%s%s' % (self.config['api3_url'], 'login') - params = {'apikey': self.config['apikey']} - resp = get_url(url.strip(), post_json=params, parse_json=True, raise_skip_exception=True) - if resp: - if 'token' in resp: - token = resp['token'] - dt = datetime.datetime.now() - - return {'token': token, 'datetime': dt} - - def get_token(self): - global THETVDB_V2_API_TOKEN - if None is THETVDB_V2_API_TOKEN.get( - 'token') or datetime.datetime.now() - THETVDB_V2_API_TOKEN.get( - 'datetime', datetime.datetime.fromordinal(1)) > datetime.timedelta(hours=23): - THETVDB_V2_API_TOKEN = self.get_new_token() - if not THETVDB_V2_API_TOKEN.get('token'): - raise TvdbError('Could not get Authentification Token') - return THETVDB_V2_API_TOKEN.get('token') - - @staticmethod - def _get_temp_dir(): - """Returns the [system temp dir]/tvdb_api-u501 (or - tvdb_api-myuser) - """ - if hasattr(os, 'getuid'): - uid = 'u%d' % (os.getuid()) - else: - # For Windows - try: - uid = getpass.getuser() - except ImportError: - return os.path.join(tempfile.gettempdir(), 'tvdb_api') - - return os.path.join(tempfile.gettempdir(), 'tvdb_api-%s' % uid) - - def _match_url_pattern(self, pattern, url): - if pattern in self.config: - try: - if PY2: - return None is not re.search('^%s$' % re.escape(self.config[pattern]).replace('\\%s', '[^/]+'), url) - else: - return None is not re.search('^%s$' % re.escape(self.config[pattern]).replace(r'%s', '[^/]+'), url) - except (BaseException, Exception): - pass - return False - - def is_apikey(self, check_url=None): - return bool(self.config['apikey']) and (None is check_url or '://api' in check_url) - - @retry((TvdbError, TvdbTokenexpired)) - def _load_url(self, url, params=None, language=None, parse_json=False, **kwargs): - log.debug('Retrieving URL %s' % url) - - parse_json = parse_json or self.is_apikey(url) - session = requests.session() - - if self.config['cache_enabled']: - session = CacheControl(session, cache=caches.FileCache(self.config['cache_location'])) - - if self.config['proxy']: - log.debug('Using proxy for URL: %s' % url) - session.proxies = {'http': self.config['proxy'], 'https': self.config['proxy']} - - headers = {'Accept-Encoding': 'gzip,deflate'} - if self.is_apikey(url): - headers.update({'Authorization': 'Bearer %s' % self.get_token(), - 'Accept': 'application/vnd.thetvdb.v%s' % __api_version__}) - - if None is not language and language in self.config['valid_languages']: - headers.update({'Accept-Language': language}) - - resp = None - is_series_info = self._match_url_pattern('url_series_info', url) - if is_series_info: - self.show_not_found = False - self.not_found = False - try: - resp = get_url(url.strip(), params=params, session=session, headers=headers, parse_json=parse_json, - raise_status_code=True, raise_exceptions=True, raise_skip_exception=True, **kwargs) - except ConnectionSkipException as e: - raise e - except requests.exceptions.HTTPError as e: - if 401 == e.response.status_code: - if self.is_apikey(url): - # token expired, get new token, raise error to retry - global THETVDB_V2_API_TOKEN - THETVDB_V2_API_TOKEN = self.get_new_token() - raise TvdbTokenexpired - elif 404 == e.response.status_code: - if is_series_info: - self.show_not_found = True - elif self._match_url_pattern('url_series_episodes_info', url): - resp = {'data': []} - self.not_found = True - elif 404 != e.response.status_code: - raise TvdbError - except (BaseException, Exception): - raise TvdbError - - if is_series_info and isinstance(resp, dict) and isinstance(resp.get('data'), dict) and \ - isinstance(resp['data'].get('seriesName'), string_types) and \ - re.search(r'^[*]\s*[*]\s*[*]', resp['data'].get('seriesName', ''), flags=re.I): - self.show_not_found = True - self.not_found = True - - map_show = {'airstime': 'airs_time', 'airsdayofweek': 'airs_dayofweek', 'imdbid': 'imdb_id', - 'writers': 'writer', 'siterating': 'rating'} - - def map_show_keys(data): - keep_data = {} - del_keys = [] - new_data = {} - for k, v in iteritems(data): - k_org = k - k = k.lower() - if None is not v: - if k in ['banner', 'fanart', 'poster'] and v: - v = self.config['url_artworks'] % v - elif 'genre' == k: - keep_data['genre_list'] = v - v = '|%s|' % '|'.join([clean_data(c) for c in v if isinstance(c, string_types)]) - elif 'gueststars' == k: - keep_data['gueststars_list'] = v - v = '|%s|' % '|'.join([clean_data(c) for c in v if isinstance(c, string_types)]) - elif 'writers' == k: - keep_data[k] = v - v = '|%s|' % '|'.join([clean_data(c) for c in v if isinstance(c, string_types)]) - elif 'rating' == k: - new_data['contentrating'] = v - elif 'firstaired' == k: - if v: - try: - v = parse(v, fuzzy=True).strftime('%Y-%m-%d') - except (BaseException, Exception): - v = None - else: - v = None - elif 'imdbid' == k: - if v: - if re.search(r'^(tt)?\d{1,9}$', v, flags=re.I): - v = clean_data(v) - else: - v = '' - else: - v = clean_data(v) - - if not v and 'seriesname' == k: - if isinstance(data.get('aliases'), list) and 0 < len(data.get('aliases')): - v = data['aliases'].pop(0) - # this is a invalid show, it has no Name - if not v: - return None - - if k in map_show: - k = map_show[k] - if k_org is not k: - del_keys.append(k_org) - new_data[k] = v - else: - data[k] = v - for d in del_keys: - del (data[d]) - if isinstance(data, dict): - data.update(new_data) - data.update(keep_data) - return data - - if resp and isinstance(resp, dict): - if isinstance(resp.get('data'), dict): - resp['data'] = map_show_keys(resp['data']) - elif isinstance(resp.get('data'), list): - data_list = [] - for idx, row in enumerate(resp['data']): - if isinstance(row, dict): - cr = map_show_keys(row) - if None is not cr: - data_list.append(cr) - resp['data'] = data_list - return resp - return dict([(u'data', (None, resp)[isinstance(resp, string_types)])]) - - def _getetsrc(self, url, params=None, language=None, parse_json=False): - """Loads a URL using caching - """ - try: - src = self._load_url(url, params=params, language=language, parse_json=parse_json) - if isinstance(src, dict): - if None is not src['data']: - data = src['data'] - else: - data = {} - # data = src['data'] or {} - if isinstance(data, list): - if 0 < len(data): - data = data[0] - # data = data[0] or {} - if None is data or (isinstance(data, dict) and 1 > len(data.keys())): - raise ValueError - return src - except (KeyError, IndexError, Exception): - pass - - @staticmethod - def clean_overview(text): - """replace newlines with period and space, remove multiple spaces""" - return ' '.join(['%s.' % re.sub(r'[\s][\s]+', r' ', x).strip().rstrip('.') for x in text.split('\r\n')]) - - def get_show_info(self, sid, language=None): - # type: (int, Optional[str]) -> Optional[dict] - results = self.search_tvs(sid, language=language) - for cur_result in (isinstance(results, dict) and results.get('results') or []): - result = filter_list(lambda r: 'series' == r['type'] and sid == r['id'], - cur_result.get('nbHits') and cur_result.get('hits') or []) - if 1 == len(result): - result[0]['overview'] = self.clean_overview( - result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng']) - # remap - for from_key, to_key in iteritems({ - 'name': 'seriesname', 'first_air_date': 'firstaired' - }): - result[0][to_key] = result[0][from_key] - del result[0][from_key] # delete also prevents false +ve with the following new key notifier - - # notify of new keys - if ENV.get('SG_DEV_MODE'): - new_keys = set(list_keys(result[0])).difference({ - '_highlightResult', 'aliases', 'banner', - 'fanart', 'firstaired', 'follower_count', - 'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable', - 'seriesname', 'network', - 'objectID', 'overviews', 'poster', 'release_year', - 'slug', 'status', - 'translations', 'type', - 'url', 'uuid' - }) - if new_keys: - log.warning('DEV_MODE: New get_show_info tvdb attrs for %s %r' % (sid, new_keys)) - - return result[0] - - # fallback : e.g. https://thetvdb.com/?tab=series&id=349309&lid=7 - response = self._load_url(self.config['base_url'], params={ - 'tab': 'series', 'id': sid, 'lid': self.config['langabbv_to_id'].get(language, 7)}) - series = {} - - def get_value(tag, contains): - try: - rc_contains = re.compile(r'(?i)%s' % contains) - parent = copy.copy(tag.find(string=rc_contains, recursive=True).find_parent(class_=re.compile('item'))) - return ', '.join(re.sub(r'(?i)(\s)([\s]+)', r'\1', i.get_text(strip=True)) - for i in parent.find_all('span')) - except(BaseException, Exception): - pass - - with BS4Parser(response.get('data', '')) as soup: - basic_info = soup.find(id='series_basic_info') - series_id = try_int(get_value(basic_info, r'series\sid'), None) - if None is not series_id: - series['id'] = series_id - series['firstaired'] = None # fill from ep listings page - series['genrelist'] = get_value(basic_info, 'genres').split(', ') # extra field - series['genre'] = '|%s|' % '|'.join(series['genrelist']) - series['language'] = language - series['seriesname'] = soup.find(id='series_title').get_text(strip=True) - series['networklist'] = get_value(basic_info, 'network').split(', ') # extra field - series['network'] = '|%s|' % '|'.join(series['networklist']) # e.g. '|network|network n|network 10|' - series['status'] = get_value(basic_info, 'status') - series['type'] = 'series' # extra field - - airs_at = get_value(basic_info, 'airs') - airs = airs_at and airs_at.split(', ') or [] - if 0 < len(airs): - series['airs_time'] = 'at ' in airs[-1] \ - and re.sub(r'(?i)\s+([ap]m)', r'\1', airs[-1]).split()[-1] or '' - series['airs_dayofweek'] = ', '.join(airs[0:-1]) - else: - series['airs_time'] = airs_at - series['airs_dayofweek'] = '' - - # alias list - series['aliases'] = [] - try: - lang_tag = soup.find(id='translations').select('.change_translation_text[data-language="%s"]' % ( - self.config['langabbv_23'].get(language) or 'eng'))[0] - series['aliases'] = [t.get_text(strip=True) for t in lang_tag - .find(string=re.compile('(?i)alias'), recursive=True).find_parent() - .find_next_sibling('ul').find_all('li')] - except(BaseException, Exception): - pass - - # images - series['image'] = series['poster'] = (soup.find(rel=re.compile('artwork_posters')) or {}).get('href') - series['banner'] = (soup.find(rel=re.compile('artwork_banners')) or {}).get('href') - series['fanart'] = (soup.find(rel=re.compile('artwork_backgrounds')) or {}).get('href') - - series['imdb_id'] = re.sub(r'.*(tt\d+)', r'\1', - (soup.find(href=re.compile(r'imdb\.com')) or {}).get('href', '')) - - # {lang: overview} - series.setdefault('overviews', {}) - for cur_tag in soup.find_all(class_='change_translation_text'): - try: - lang = cur_tag.attrs.get('data-language') - if None is not lang: - text = cur_tag.p.get_text(strip=True) - if text: - text = self.clean_overview(text) - series['overviews'].setdefault(lang, text) # extra field - if lang == self.config['langabbv_23'].get(language): - series['overview'] = text - except(BaseException, Exception): - pass - - runtime = get_value(basic_info, 'runtime') - runtime_often = None - if ', ' in runtime: - try: - # sort runtimes by most number of episodes (e.g. '25 minutes (700 episodes)') - runtime_often = sorted([re.findall(r'([^(]+)\((\d+).*', i)[0] for i in runtime.split(', ')], - key=lambda x: try_int(x[1]), reverse=True) - runtime_often = next(iter(runtime_often))[0].strip() # first item is most frequent runtime - except(BaseException, Exception): - runtime_often = None - series['runtime'] = runtime_often and re.sub('^([0-9]+).*', r'\1', runtime_often) or runtime - - series['season'] = None - try: - last_season = sorted([x.get('href') - for x in soup.find_all(href=re.compile(r'/seasons/official/(\d+)'))])[-1] - series['season'] = re.findall(r'(\d+)$', last_season)[0] - except(BaseException, Exception): - pass - - series['slug'] = series['url'] = '' - try: - rc_slug = re.compile('(?i)/series/(?P[^/]+)/(?:episode|season)') - series['slug'] = rc_slug.search(soup.find(href=rc_slug).get('href')).group('slug') - series['url'] = '%sseries/%s' % (self.config['base_url'], series['slug']) # extra field - except(BaseException, Exception): - pass - - # {lang: show title in lang} # extra field - series['translations'] = {t.attrs.get('data-language'): t.attrs.get('data-title') - for t in soup.find_all(class_='change_translation_text') - if all(t.attrs.get(a) for a in ('data-title', 'data-language'))} - - return series - - def search_tvs(self, terms, language=None): - # type: (Union[int, str], Optional[str]) -> Optional[dict] - try: - src = self._load_url( - 'https://tvshow''time-%s.algo''lia.net/1/' - 'indexes/*/queries' % random.choice([1, 2, 3, 'dsn']), - params={'x-algo''lia-agent': 'Alg''olia for vani''lla JavaScript (lite) 3.3''2.0;' - 'instant''search.js (3.5''.3);JS Helper (2.2''8.0)', - 'x-algo''lia''-app''lication-id': 'tvshow''time', - 'x-algo''lia''-ap''i-key': '3d''978dd96c457390f21cec6131ce5d''9c'[::-1]}, - post_json={'requests': [ - {'indexName': 'TVDB', - 'params': '&'.join( - ['query=%s' % terms, 'maxValuesPerFacet=10', 'page=0', - 'facetFilters=[["type:series", "type:person"]]', - 'tagFilters=', 'analytics=false', 'advancedSyntax=true', - 'highlightPreTag=__ais-highlight__', 'highlightPostTag=__/ais-highlight__' - ]) - }]}, - language=language, parse_json=True, failure_monitor=False) - return src - except (KeyError, IndexError, Exception): - pass - - def search(self, series): - # type: (AnyStr) -> List - """This searches TheTVDB.com for the series name - and returns the result list - """ - if PY2: - series = series.encode('utf-8') - self.config['params_search_series']['name'] = series - log.debug('Searching for show %s' % series) - - try: - series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'], - language=self.config['language']) - if series_found: - return list_values(series_found)[0] - except (BaseException, Exception): - pass - - return [] - - def get_series(self, series): - """This searches TheTVDB.com for the series name, - If a custom_ui UI is configured, it uses this to select the correct - series. If not, and interactive == True, ConsoleUI is used, if not - BaseUI is used to select the first result. - """ - all_series = self.search(series) - if not isinstance(all_series, list): - all_series = [all_series] - - if 0 == len(all_series): - log.debug('Series result returned zero') - raise TvdbShownotfound('Show-name search returned zero results (cannot find show on TVDB)') - - if None is not self.config['custom_ui']: - log.debug('Using custom UI %s' % self.config['custom_ui'].__name__) - custom_ui = self.config['custom_ui'] - ui = custom_ui(config=self.config) - else: - if not self.config['interactive']: - log.debug('Auto-selecting first search result using BaseUI') - ui = BaseUI(config=self.config) - else: - log.debug('Interactively selecting show using ConsoleUI') - ui = ConsoleUI(config=self.config) - - return ui.select_series(all_series) - - def _parse_banners(self, sid, img_list): - banners = {} - - try: - for cur_banner in img_list: - bid = cur_banner['id'] - btype = (cur_banner['keytype'], 'banner')['series' == cur_banner['keytype']] - btype2 = (cur_banner['resolution'], try_int(cur_banner['subkey'], cur_banner['subkey']))[ - btype in ('season', 'seasonwide')] - if None is btype or None is btype2: - continue - - for k, v in iteritems(cur_banner): - if None is k or None is v: - continue - - k, v = k.lower(), v.lower() if isinstance(v, string_types) else v - if 'filename' == k: - k = 'bannerpath' - v = self.config['url_artworks'] % v - elif 'thumbnail' == k: - k = 'thumbnailpath' - v = self.config['url_artworks'] % v - elif 'keytype' == k: - k = 'bannertype' - banners.setdefault(btype, OrderedDict()).setdefault(btype2, OrderedDict()).setdefault(bid, {})[ - k] = v - - except (BaseException, Exception): - pass - - self._set_show_data(sid, '_banners', banners, add=True) - - def _parse_actors(self, sid, actor_list, actor_list_alt): - - a = [] - cast = CastList() - try: - alts = {} - if actor_list_alt: - with BS4Parser(actor_list_alt) as soup: - rc_role = re.compile(r'/series/(?P[^/]+)/people/(?P\d+)/?$') - rc_img = re.compile(r'/(?Pperson/(?P[0-9]+)/(?P[^/]+)\..*)') - rc_img_v3 = re.compile(r'/(?Pactors/(?P[^/]+)\..*)') - max_people = 5 - rc_clean = re.compile(r'[^a-z0-9]') - for cur_enum, cur_role in enumerate(soup.find_all('a', href=rc_role) or []): - try: - image = person_id = None - for cur_rc in (rc_img, rc_img_v3): - img_tag = cur_role.find('img', src=cur_rc) - if img_tag: - img_parsed = cur_rc.search(img_tag.get('src')) - image, person_id = [x in img_parsed.groupdict() and img_parsed.group(x) - for x in ('url', 'person_id')] - break - lines = [x.strip() for x in cur_role.get_text().split('\n') if x.strip()][0:2] - name = role = '' - if len(lines): - name = lines[0] - for line in lines[1:]: - if line.lower().startswith('as '): - role = line[3:] - break - if not person_id and max_people: - max_people -= 1 - results = self.search_tvs(name) - try: - for cur_result in (isinstance(results, dict) and results.get('results') or []): - # sorts 'banners/images/missing/' to last before filter - people = filter_list( - lambda r: 'person' == r['type'] - and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''), - cur_result.get('nbHits') - and sorted(cur_result.get('hits'), - key=lambda x: len(x['image']), reverse=True) or []) - if ENV.get('SG_DEV_MODE'): - for person in people: - new_keys = set(list_keys(person)).difference({ - '_highlightResult', 'banner', 'id', 'image', - 'is_tvdb_searchable', 'is_tvt_searchable', 'name', - 'objectID', 'people_birthdate', 'people_died', - 'poster', 'type', 'url' - }) - if new_keys: - log.warning('DEV_MODE: New _parse_actors tvdb attrs for %s %r' - % (person['id'], new_keys)) - - person_ok = False - for person in people: - if image: - people_data = self._load_url(person['url'])['data'] - person_ok = re.search(re.escape(image), people_data) - if not image or person_ok: - person_id = person['id'] - raise ValueError('value okay, id found') - except (BaseException, Exception): - pass - - rid = int(rc_role.search(cur_role.get('href')).group('role_id')) - alts.setdefault(rid, {'id': rid, 'person_id': person_id or None, 'name': name, 'role': role, - 'image': image, 'sortorder': cur_enum, 'lastupdated': 0}) - except(BaseException, Exception): - pass - if not self.is_apikey(): # for the future when apikey == '' - actor_list = sorted([d for _, d in iteritems(alts)], key=lambda x: x.get('sortorder')) - - unique_c_p, c_p_list, new_actor_list = set(), [], [] - for actor in sorted(actor_list, key=lambda x: x.get('lastupdated'), reverse=True): - c_p_list.append((actor['name'], actor['role'])) - if (actor['name'], actor['role']) not in unique_c_p: - unique_c_p.add((actor['name'], actor['role'])) - new_actor_list.append(actor) - for n in sorted(new_actor_list, key=lambda x: x['sortorder']): - role_image = (alts.get(n['id'], {}).get('image'), n.get('image'))[ - any([n.get('image')]) and 1 == c_p_list.count((n['name'], n['role']))] - if role_image: - role_image = self.config['url_artworks'] % role_image - character_name = n.get('role', '').strip() or alts.get(n['id'], {}).get('role', '') - person_name = n.get('name', '').strip() or alts.get(n['id'], {}).get('name', '') - try: - person_id = try_int(re.search(r'^person/(\d+)/', n.get('image', '')).group(1), None) - except (BaseException, Exception): - person_id = None - person_id = person_id or alts.get(n['id'], {}).get('person_id') - character_id = n.get('id', None) or alts.get(n['id'], {}).get('rid') - a.append({'character': {'id': character_id, - 'name': character_name, - 'url': None, # not supported by tvdb - 'image': role_image, - }, - 'person': {'id': person_id, - 'name': person_name, - 'url': person_id and (self.config['url_people'] % person_id) or None, - 'image': None, # not supported by tvdb - 'birthday': None, # not supported by tvdb - 'deathday': None, # not supported by tvdb - 'gender': None, # not supported by tvdb - 'country': None, # not supported by tvdb - }, - }) - cast[RoleTypes.ActorMain].append( - Character(p_id=character_id, name=character_name, - person=[Person(p_id=person_id, name=person_name)], image=role_image)) - except (BaseException, Exception): - pass - self._set_show_data(sid, 'actors', a) - self._set_show_data(sid, 'cast', cast) - self.shows[sid].actors_loaded = True - - def get_episode_data(self, epid): - # Parse episode information - data = None - log.debug('Getting all episode data for %s' % epid) - url = self.config['url_episodes_info'] % epid - episode_data = self._getetsrc(url, language=self.config['language']) - - if episode_data and 'data' in episode_data: - data = episode_data['data'] - if isinstance(data, dict): - for k, v in iteritems(data): - k = k.lower() - - if None is not v: - if 'filename' == k and v: - v = self.config['url_artworks'] % v - else: - v = clean_data(v) - data[k] = v - - return data - - def _parse_images(self, sid, language, show_data, image_type, enabled_type, type_bool): - mapped_img_types = {'banner': 'series'} - excluded_main_data = enabled_type in ['seasons_enabled', 'seasonwides_enabled'] - loaded_name = '%s_loaded' % image_type - if (type_bool or self.config[enabled_type]) and not getattr(self.shows.get(sid), loaded_name, False): - image_data = self._getetsrc(self.config['url_series_images'] % - (sid, mapped_img_types.get(image_type, image_type)), language=language) - if image_data and 0 < len(image_data.get('data', '') or ''): - image_data['data'] = sorted(image_data['data'], reverse=True, - key=lambda x: (x['ratingsinfo']['average'], x['ratingsinfo']['count'])) - if not excluded_main_data: - url_image = self.config['url_artworks'] % image_data['data'][0]['filename'] - url_thumb = self.config['url_artworks'] % image_data['data'][0]['thumbnail'] - self._set_show_data(sid, image_type, url_image) - self._set_show_data(sid, u'%s_thumb' % image_type, url_thumb) - excluded_main_data = True # artwork found so prevent fallback - self._parse_banners(sid, image_data['data']) - self.shows[sid].__dict__[loaded_name] = True - - # fallback image thumbnail for none excluded_main_data if artwork is not found - if not excluded_main_data and show_data['data'].get(image_type): - self._set_show_data(sid, u'%s_thumb' % image_type, - re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I)) - - def _get_show_data(self, - sid, # type: integer_types - language, # type: AnyStr - get_ep_info=False, # type: bool - banners=False, # type: bool - posters=False, # type: bool - seasons=False, # type: bool - seasonwides=False, # type: bool - fanart=False, # type: bool - actors=False, # type: bool - direct_data=False, # type: bool - **kwargs # type: Optional[Any] - ): # type: (...) -> Optional[bool, dict] - """Takes a series ID, gets the epInfo URL and parses the TVDB - XML file into the shows dict in layout: - shows[series_id][season_number][episode_number] - """ - - # Parse show information - url = self.config['url_series_info'] % sid - if direct_data or sid not in self.shows or None is self.shows[sid].id: - log.debug('Getting all series data for %s' % sid) - show_data = self._getetsrc(url, language=language) - if not show_data or not show_data.get('data'): - show_data = {'data': self.get_show_info(sid, language=language)} - if direct_data: - return show_data - - # check and make sure we have data to process and that it contains a series name - if not (show_data and 'seriesname' in show_data.get('data', {}) or {}): - return False - - for k, v in iteritems(show_data['data']): - self._set_show_data(sid, k, v) - self._set_show_data(sid, 'ids', - TVInfoIDs( - tvdb=show_data['data'].get('id'), - imdb=show_data['data'].get('imdb_id') - and try_int(show_data['data'].get('imdb_id', '').replace('tt', ''), None))) - else: - show_data = {'data': {}} - - for img_type, en_type, p_type in [(u'poster', 'posters_enabled', posters), - (u'banner', 'banners_enabled', banners), - (u'fanart', 'fanart_enabled', fanart), - (u'season', 'seasons_enabled', seasons), - (u'seasonwide', 'seasonwides_enabled', seasonwides)]: - self._parse_images(sid, language, show_data, img_type, en_type, p_type) - - if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): - actor_data = self._getetsrc(self.config['url_actors_info'] % sid, language=language) - actor_data_alt = self._getetsrc(self.config['url_series_people'] % sid, language=language) - if actor_data and 0 < len(actor_data.get('data', '') or '') or actor_data_alt and actor_data_alt['data']: - self._parse_actors(sid, actor_data['data'], actor_data_alt and actor_data_alt['data']) - - if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False): - # Parse episode data - log.debug('Getting all episodes of %s' % sid) - - page = 1 - episodes = [] - while page <= 400: - episode_data = {} - if self.is_apikey(): - episode_data = self._getetsrc( - self.config['url_series_episodes_info'] % (sid, page), language=language) - - if not episode_data: - response = {'data': None} - items_found = False - # fallback to page 'all' if dvd is enabled and response has no items - for page_type in ('url_series_dvd', 'url_series_all'): - if 'dvd' not in page_type or self.config['dvdorder']: - response = self._load_url(self.config[page_type] % show_data.get('data').get('slug')) - with BS4Parser(response.get('data') or '') as soup: - items_found = bool(soup.find_all(class_='list-group-item')) - if items_found: - break - if not items_found: - break - - episode_data = {'data': []} - with BS4Parser(response.get('data')) as soup: - items = soup.find_all(class_='list-group-item') - rc_sxe = re.compile(r'(?i)s(?:pecial\s*)?(\d+)\s*[xe]\s*(\d+)') # Special nxn or SnnEnn - rc_episode = re.compile(r'(?i)/series/%s/episodes?/(?P\d+)' % show_data['data']['slug']) - rc_date = re.compile(r'\s\d{4}\s*$') - season_type, episode_type = ['%s%s' % (('aired', 'dvd')['dvd' in page_type], x) - for x in ('season', 'episodenumber')] - for cur_item in items: - try: - heading_tag = cur_item.find(class_='list-group-item-heading') - sxe = heading_tag.find(class_='episode-label').get_text(strip=True) - ep_season, ep_episode = [try_int(x) for x in rc_sxe.findall(sxe)[0]] - link_ep_tag = heading_tag.find(href=rc_episode) or {} - link_match = rc_episode.search(link_ep_tag.get('href', '')) - ep_id = link_match and try_int(link_match.group('ep_id'), None) - ep_name = link_ep_tag.get_text(strip=True) - # ep_network = None # extra field - ep_aired = None - for cur_tag in cur_item.find('ul').find_all('li'): - text = cur_tag.get_text(strip=True) - if rc_date.search(text): - ep_aired = parse(text).strftime('%Y-%m-%d') - # elif text in show_data['data']['network']: # unreliable data - # ep_network = text - ep_overview = None - item_tag = cur_item.find(class_='list-group-item-text') - if item_tag: - ep_overview = self.clean_overview(item_tag.get_text() or '') - ep_filename = None - link_ep_tag = item_tag.find(href=rc_episode) or None - if link_ep_tag: - ep_filename = (link_ep_tag.find('img') or {}).get('src', '') - - episode_data['data'].append({ - 'id': ep_id, season_type: ep_season, episode_type: ep_episode, - 'episodename': ep_name, 'firstaired': ep_aired, 'overview': ep_overview, - 'filename': ep_filename, # 'network': ep_network - }) - - if not show_data['data']['firstaired'] and ep_aired \ - and (1, 1) == (ep_season, ep_episode): - show_data['data']['firstaired'] = ep_aired - - episode_data['fallback'] = True - except (BaseException, Exception): - continue - - if None is episode_data: - raise TvdbError('Exception retrieving episodes for show') - if isinstance(episode_data, dict) and not episode_data.get('data', []): - if 1 != page: - self.not_found = False - break - if not getattr(self, 'not_found', False) and None is not episode_data.get('data'): - episodes.extend(episode_data['data']) - next_link = episode_data.get('links', {}).get('next', None) - # check if page is a valid following page - if not isinstance(next_link, integer_types) or next_link <= page: - next_link = None - if not next_link and isinstance(episode_data, dict) \ - and isinstance(episode_data.get('data', []), list) and \ - (100 > len(episode_data.get('data', [])) or episode_data.get('fallback')): - break - if next_link: - page = next_link - else: - page += 1 - - ep_map_keys = {'absolutenumber': u'absolute_number', 'airedepisodenumber': u'episodenumber', - 'airedseason': u'seasonnumber', 'airedseasonid': u'seasonid', - 'dvdepisodenumber': u'dvd_episodenumber', 'dvdseason': u'dvd_season'} - - for cur_ep in episodes: - if self.config['dvdorder']: - log.debug('Using DVD ordering.') - use_dvd = None is not cur_ep.get('dvdseason') and None is not cur_ep.get('dvdepisodenumber') - else: - use_dvd = False - - if use_dvd: - elem_seasnum, elem_epno = cur_ep.get('dvdseason'), cur_ep.get('dvdepisodenumber') - else: - elem_seasnum, elem_epno = cur_ep.get('airedseason'), cur_ep.get('airedepisodenumber') - - if None is elem_seasnum or None is elem_epno: - log.warning('An episode has incomplete season/episode number (season: %r, episode: %r)' % ( - elem_seasnum, elem_epno)) - continue # Skip to next episode - - # float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data - seas_no = int(float(elem_seasnum)) - ep_no = int(float(elem_epno)) - - if not cur_ep.get('network'): - cur_ep['network'] = self.shows[sid].network - for k, v in iteritems(cur_ep): - k = k.lower() - - if None is not v: - if 'filename' == k and v: - if '://' not in v: - v = self.config['url_artworks'] % v - else: - v = clean_data(v) - - if k in ep_map_keys: - k = ep_map_keys[k] - self._set_item(sid, seas_no, ep_no, k, v) - - crew = CrewList() - cast = CastList() - try: - for director in cur_ep.get('directors', []): - crew[RoleTypes.CrewDirector].append(Person(name=director)) - except (BaseException, Exception): - pass - try: - for guest in cur_ep.get('gueststars_list', []): - cast[RoleTypes.ActorGuest].append(Character(person=[Person(name=guest)])) - except (BaseException, Exception): - pass - try: - for writers in cur_ep.get('writers', []): - crew[RoleTypes.CrewWriter].append(Person(name=writers)) - except (BaseException, Exception): - pass - self._set_item(sid, seas_no, ep_no, 'crew', crew) - self._set_item(sid, seas_no, ep_no, 'cast', cast) - - self.shows[sid].ep_loaded = True - - return True - - def _name_to_sid(self, name): - """Takes show name, returns the correct series ID (if the show has - already been grabbed), or grabs all episodes and returns - the correct SID. - """ - if name in self.corrections: - log.debug('Correcting %s to %s' % (name, self.corrections[name])) - return self.corrections[name] - else: - log.debug('Getting show %s' % name) - selected_series = self.get_series(name) - if isinstance(selected_series, dict): - selected_series = [selected_series] - sids = [int(x['id']) for x in selected_series if - self._get_show_data(int(x['id']), self.config['language'])] - self.corrections.update(dict([(x['seriesname'], int(x['id'])) for x in selected_series])) - return sids - - -def main(): - """Simple example of using tvdb_api - it just - grabs an episode name interactively. - """ - import logging - - logging.basicConfig(level=logging.DEBUG) - - tvdb_instance = Tvdb(interactive=True, cache=False) - print(tvdb_instance['Lost']['seriesname']) - print(tvdb_instance['Lost'][1][4]['episodename']) - - -if '__main__' == __name__: - main() diff --git a/lib/tvdb_api/tvdb_cache.py b/lib/tvdb_api/tvdb_cache.py deleted file mode 100644 index 9edc9b9..0000000 --- a/lib/tvdb_api/tvdb_cache.py +++ /dev/null @@ -1,251 +0,0 @@ -#!/usr/bin/env python2 -#encoding:utf-8 -#author:dbr/Ben -#project:tvdb_api -#repository:http://github.com/dbr/tvdb_api -#license:unlicense (http://unlicense.org/) - -""" -urllib2 caching handler -Modified from http://code.activestate.com/recipes/491261/ -""" -from __future__ import with_statement - -__author__ = "dbr/Ben" -__version__ = "1.9" - -import os -import time -import errno -from hashlib import md5 -from threading import RLock -from six import StringIO -from six.moves import urllib, http_client as httplib - -cache_lock = RLock() - -def locked_function(origfunc): - """Decorator to execute function under lock""" - def wrapped(*args, **kwargs): - cache_lock.acquire() - try: - return origfunc(*args, **kwargs) - finally: - cache_lock.release() - return wrapped - -def calculate_cache_path(cache_location, url): - """Checks if [cache_location]/[hash_of_url].headers and .body exist - """ - thumb = md5(url).hexdigest() - header = os.path.join(cache_location, thumb + ".headers") - body = os.path.join(cache_location, thumb + ".body") - return header, body - -def check_cache_time(path, max_age): - """Checks if a file has been created/modified in the [last max_age] seconds. - False means the file is too old (or doesn't exist), True means it is - up-to-date and valid""" - if not os.path.isfile(path): - return False - cache_modified_time = os.stat(path).st_mtime - time_now = time.time() - if cache_modified_time < time_now - max_age: - # Cache is old - return False - else: - return True - -@locked_function -def exists_in_cache(cache_location, url, max_age): - """Returns if header AND body cache file exist (and are up-to-date)""" - hpath, bpath = calculate_cache_path(cache_location, url) - if os.path.exists(hpath) and os.path.exists(bpath): - return( - check_cache_time(hpath, max_age) - and check_cache_time(bpath, max_age) - ) - else: - # File does not exist - return False - -@locked_function -def store_in_cache(cache_location, url, response): - """Tries to store response in cache.""" - hpath, bpath = calculate_cache_path(cache_location, url) - try: - outf = open(hpath, "wb") - headers = str(response.info()) - outf.write(headers) - outf.close() - - outf = open(bpath, "wb") - outf.write(response.read()) - outf.close() - except IOError: - return True - else: - return False - -@locked_function -def delete_from_cache(cache_location, url): - """Deletes a response in cache.""" - hpath, bpath = calculate_cache_path(cache_location, url) - try: - if os.path.exists(hpath): - os.remove(hpath) - if os.path.exists(bpath): - os.remove(bpath) - except IOError: - return True - else: - return False - -class CacheHandler(urllib.request.BaseHandler): - """Stores responses in a persistant on-disk cache. - - If a subsequent GET request is made for the same URL, the stored - response is returned, saving time, resources and bandwidth - """ - @locked_function - def __init__(self, cache_location, max_age = 21600): - """The location of the cache directory""" - self.max_age = max_age - self.cache_location = cache_location - if not os.path.exists(self.cache_location): - try: - os.mkdir(self.cache_location) - except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir(self.cache_location): - # File exists, and it's a directory, - # another process beat us to creating this dir, that's OK. - pass - else: - # Our target dir is already a file, or different error, - # relay the error! - raise - - def default_open(self, request): - """Handles GET requests, if the response is cached it returns it - """ - if "GET" != request.get_method(): - return None # let the next handler try to handle the request - - if exists_in_cache( - self.cache_location, request.get_full_url(), self.max_age - ): - return CachedResponse( - self.cache_location, - request.get_full_url(), - set_cache_header = True - ) - else: - return None - - def http_response(self, request, response): - """Gets a HTTP response, if it was a GET request and the status code - starts with 2 (200 OK etc) it caches it and returns a CachedResponse - """ - if ("GET" == request.get_method() - and str(response.code).startswith("2")): - if 'x-local-cache' not in response.info(): - # Response is not cached - set_cache_header = store_in_cache( - self.cache_location, - request.get_full_url(), - response - ) - else: - set_cache_header = True - - return CachedResponse( - self.cache_location, - request.get_full_url(), - set_cache_header = set_cache_header - ) - else: - return response - -class CachedResponse(StringIO): - """An urllib2.response-like object for cached responses. - - To determine if a response is cached or coming directly from - the network, check the x-local-cache header rather than the object type. - """ - - @locked_function - def __init__(self, cache_location, url, set_cache_header=True): - self.cache_location = cache_location - hpath, bpath = calculate_cache_path(cache_location, url) - - StringIO.__init__(self, open(bpath, "rb").read()) - - self.url = url - self.code = 200 - self.msg = "OK" - headerbuf = open(hpath, "rb").read() - if set_cache_header: - headerbuf += "x-local-cache: %s\r\n" % (bpath) - self.headers = httplib.HTTPMessage(StringIO(headerbuf)) - - def info(self): - """Returns headers - """ - return self.headers - - def geturl(self): - """Returns original URL - """ - return self.url - - @locked_function - def recache(self): - new_request = urllib.request.urlopen(self.url) - set_cache_header = store_in_cache( - self.cache_location, - new_request.url, - new_request - ) - CachedResponse.__init__(self, self.cache_location, self.url, True) - - @locked_function - def delete_cache(self): - delete_from_cache( - self.cache_location, - self.url - ) - - -if __name__ == "__main__": - def main(): - """Quick test/example of CacheHandler""" - opener = urllib.request.build_opener(CacheHandler("/tmp/")) - response = opener.open('http://google.com') - print(response.headers) - print('Response:', response.read()) - - response.recache() - print(response.headers) - print('After recache:', response.read()) - - # Test usage in threads - from threading import Thread - - class CacheThreadTest(Thread): - lastdata = None - - def run(self): - req = opener.open("http://google.com") - newdata = req.read() - if None is self.lastdata: - self.lastdata = newdata - assert self.lastdata == newdata, "Data was not consistent, uhoh" - req.recache() - threads = [CacheThreadTest() for _ in range(50)] - print('Starting threads') - [t.start() for t in threads] - print('..done') - print('Joining threads') - [t.join() for t in threads] - print('..done') - main() diff --git a/lib/tvdb_api/tvdb_exceptions.py b/lib/tvdb_api/tvdb_exceptions.py deleted file mode 100644 index 9a22354..0000000 --- a/lib/tvdb_api/tvdb_exceptions.py +++ /dev/null @@ -1,66 +0,0 @@ -# encoding:utf-8 -# author:dbr/Ben -# project:tvdb_api -# repository:http://github.com/dbr/tvdb_api -# license:unlicense (http://unlicense.org/) - -"""Custom exceptions used or raised by tvdb_api -""" - -__author__ = 'dbr/Ben' -__version__ = '1.9' - -__all__ = ['TvdbException', 'TvdbError', 'TvdbUserabort', 'TvdbShownotfound', - 'TvdbSeasonnotfound', 'TvdbEpisodenotfound', 'TvdbAttributenotfound', 'TvdbTokenexpired'] - -from lib.tvinfo_base.exceptions import * - - -class TvdbException(BaseTVinfoException): - """Any exception generated by tvdb_api - """ - pass - - -class TvdbError(BaseTVinfoError, TvdbException): - """An error with thetvdb.com (Cannot connect, for example) - """ - pass - - -class TvdbUserabort(BaseTVinfoUserabort, TvdbError): - """User aborted the interactive selection (via - the q command, ^c etc) - """ - pass - - -class TvdbShownotfound(BaseTVinfoShownotfound, TvdbError): - """Show cannot be found on thetvdb.com (non-existant show) - """ - pass - - -class TvdbSeasonnotfound(BaseTVinfoSeasonnotfound, TvdbError): - """Season cannot be found on thetvdb.com - """ - pass - - -class TvdbEpisodenotfound(BaseTVinfoEpisodenotfound, TvdbError): - """Episode cannot be found on thetvdb.com - """ - pass - - -class TvdbAttributenotfound(BaseTVinfoAttributenotfound, TvdbError): - """Raised if an episode does not have the requested - attribute (such as a episode name) - """ - pass - - -class TvdbTokenexpired(BaseTVinfoAuthenticationerror, TvdbError): - """token expired or missing thetvdb.com - """ - pass diff --git a/lib/tvdb_api/tvdb_ui.py b/lib/tvdb_api/tvdb_ui.py deleted file mode 100644 index fae4383..0000000 --- a/lib/tvdb_api/tvdb_ui.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python2 -# encoding:utf-8 -# author:dbr/Ben -# project:tvdb_api -# repository:http://github.com/dbr/tvdb_api -# license:unlicense (http://unlicense.org/) - -"""Contains included user interfaces for Tvdb show selection. - -A UI is a callback. A class, it's __init__ function takes two arguments: - -- config, which is the Tvdb config dict, setup in tvdb_api.py -- log, which is Tvdb's logger instance (which uses the logging module). You can -call log.info() log.warning() etc - -It must have a method "select_series", this is passed a list of dicts, each dict -contains the the keys "name" (human readable show name), and "sid" (the shows -ID as on thetvdb.com). For example: - -[{'name': u'Lost', 'sid': u'73739'}, - {'name': u'Lost Universe', 'sid': u'73181'}] - -The "select_series" method must return the appropriate dict, or it can raise -tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show -cannot be found). - -A simple example callback, which returns a random series: - -# >>> import random -# >>> from tvdb_ui import BaseUI -# >>> class RandomUI(BaseUI): -# ... def select_series(self, allSeries): -# ... import random -# ... return random.choice(allSeries) - -Then to use it.. - -# >>> from tvdb_api import Tvdb -# >>> t = Tvdb(custom_ui = RandomUI) -# >>> random_matching_series = t['Lost'] -# >>> type(random_matching_series) -# -""" - -__author__ = "dbr/Ben" -__version__ = "1.9" - -import logging -import warnings - -from .tvdb_exceptions import TvdbUserabort -from six import moves - - -def log(): - return logging.getLogger(__name__) - - -class BaseUI(object): - """Default non-interactive UI, which auto-selects first results - """ - def __init__(self, config, log=None): - self.config = config - if None is not log: - warnings.warn("the UI's log parameter is deprecated, instead use\n" - "use import logging; logging.getLogger('ui').info('blah')\n" - "The self.log attribute will be removed in the next version") - self.log = logging.getLogger(__name__) - - def select_series(self, all_series): - return all_series[0] - - -class ConsoleUI(BaseUI): - """Interactively allows the user to select a show from a console based UI - """ - - @staticmethod - def _displaySeries(all_series, limit=6): - """Helper function, lists series with corresponding ID - """ - if None is not limit: - toshow = all_series[:limit] - else: - toshow = all_series - - print('TVDB Search Results:') - for i, cshow in enumerate(toshow): - i_show = i + 1 # Start at more human readable number 1 (not 0) - log().debug('Showing allSeries[%s], series %s)' % (i_show, all_series[i]['seriesname'])) - if 0 == i: - extra = " (default)" - else: - extra = "" - - print ('%s -> %s [%s] # http://thetvdb.com/?tab=series&id=%s&lid=%s%s' % ( - i_show, - cshow['seriesname'].encode('UTF-8', 'ignore'), - cshow['language'].encode('UTF-8', 'ignore'), - str(cshow['id']), - cshow['lid'], - extra - )) - - def select_series(self, all_series): - self._displaySeries(all_series) - - if 1 == len(all_series): - # Single result, return it! - print('Automatically selecting only result') - return all_series[0] - - if self.config['select_first'] is True: - print('Automatically returning first search result') - return all_series[0] - - while True: # return breaks this loop - try: - print('Enter choice (first number, return for default, \'all\', ? for help):') - ans = moves.input() - except KeyboardInterrupt: - raise TvdbUserabort("User aborted (^c keyboard interupt)") - except EOFError: - raise TvdbUserabort("User aborted (EOF received)") - - log().debug('Got choice of: %s' % ans) - try: - selected_id = int(ans) - 1 # The human entered 1 as first result, not zero - except ValueError: # Input was not number - if 0 == len(ans.strip()): - # Default option - log().debug('Default option, returning first series') - return all_series[0] - if "q" == ans: - log().debug('Got quit command (q)') - raise TvdbUserabort("User aborted ('q' quit command)") - elif "?" == ans: - print('## Help') - print('# Enter the number that corresponds to the correct show.') - print('# a - display all results') - print('# all - display all results') - print('# ? - this help') - print('# q - abort tvnamer') - print('# Press return with no input to select first result') - elif ans.lower() in ["a", "all"]: - self._displaySeries(all_series, limit=None) - else: - log().debug('Unknown keypress %s' % ans) - else: - log().debug('Trying to return ID: %d' % selected_id) - try: - return all_series[selected_id] - except IndexError: - log().debug('Invalid show number entered!') - print('Invalid number (%s) selected!') - self._displaySeries(all_series) diff --git a/lib/tvmaze_api/__init__.py b/lib/tvmaze_api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/lib/tvmaze_api/tvmaze_api.py b/lib/tvmaze_api/tvmaze_api.py deleted file mode 100644 index ad1865e..0000000 --- a/lib/tvmaze_api/tvmaze_api.py +++ /dev/null @@ -1,666 +0,0 @@ -# encoding:utf-8 -# author:Prinz23 -# project:tvmaze_api - -__author__ = 'Prinz23' -__version__ = '1.0' -__api_version__ = '1.0.0' - -import datetime -import logging -import re - -import requests -from urllib3.util.retry import Retry -from requests.adapters import HTTPAdapter - -from six import integer_types, iteritems -from sg_helpers import get_url, try_int -from lib.dateutil.parser import parser -# noinspection PyProtectedMember -from lib.dateutil.tz.tz import _datetime_to_timestamp -from lib.exceptions_helper import ConnectionSkipException, ex -# from .tvmaze_exceptions import * -from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImageType, Character, Crew, \ - crew_type_names, Person, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoSeason, PersonGenders, \ - TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB -from lib.pytvmaze import tvmaze - -# noinspection PyUnreachableCode -if False: - from typing import Any, AnyStr, Dict, List, Optional - from lib.pytvmaze.tvmaze import Episode as TVMazeEpisode, Show as TVMazeShow - -log = logging.getLogger('tvmaze.api') -log.addHandler(logging.NullHandler()) - - -# Query TVmaze free endpoints -def tvmaze_endpoint_standard_get(url): - s = requests.Session() - retries = Retry(total=5, - backoff_factor=0.1, - status_forcelist=[429]) - # noinspection HttpUrlsUsage - s.mount('http://', HTTPAdapter(max_retries=retries)) - s.mount('https://', HTTPAdapter(max_retries=retries)) - # noinspection PyProtectedMember - return get_url(url, json=True, session=s, hooks={'response': tvmaze._record_hook}, raise_skip_exception=True) - - -tvmaze.TVmaze.endpoint_standard_get = staticmethod(tvmaze_endpoint_standard_get) -tvm_obj = tvmaze.TVmaze() -empty_ep = TVInfoEpisode() -empty_se = TVInfoSeason() -tz_p = parser() - -img_type_map = { - 'poster': TVInfoImageType.poster, - 'banner': TVInfoImageType.banner, - 'background': TVInfoImageType.fanart, - 'typography': TVInfoImageType.typography, -} - -img_size_map = { - 'original': TVInfoImageSize.original, - 'medium': TVInfoImageSize.medium, -} - -show_map = { - 'id': 'maze_id', - 'ids': 'externals', - # 'slug': '', - 'seriesid': 'maze_id', - 'seriesname': 'name', - 'aliases': 'akas', - # 'season': '', - 'classification': 'type', - # 'genre': '', - 'genre_list': 'genres', - # 'actors': '', - # 'cast': '', - # 'show_type': '', - # 'network': 'network', - # 'network_id': '', - # 'network_timezone': '', - # 'network_country': '', - # 'network_country_code': '', - # 'network_is_stream': '', - # 'runtime': 'runtime', - 'language': 'language', - 'official_site': 'official_site', - # 'imdb_id': '', - # 'zap2itid': '', - # 'airs_dayofweek': '', - # 'airs_time': '', - # 'time': '', - 'firstaired': 'premiered', - # 'added': '', - # 'addedby': '', - # 'siteratingcount': '', - # 'lastupdated': '', - # 'contentrating': '', - 'rating': 'rating', - 'status': 'status', - 'overview': 'summary', - # 'poster': 'image', - # 'poster_thumb': '', - # 'banner': '', - # 'banner_thumb': '', - # 'fanart': '', - # 'banners': '', - 'updated_timestamp': 'updated', -} -season_map = { - 'id': 'id', - 'number': 'season_number', - 'name': 'name', - # 'actors': '', - # 'cast': '', - # 'network': '', - # 'network_id': '', - # 'network_timezone': '', - # 'network_country': '', - # 'network_country_code': '', - # 'network_is_stream': '', - 'ordered': '', - 'start_date': 'premiere_date', - 'end_date': 'end_date', - # 'poster': '', - 'summery': 'summary', - 'episode_order': 'episode_order', -} - - -class TvMaze(TVInfoBase): - supported_id_searches = [TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB] - supported_person_id_searches = [TVINFO_TVMAZE] - - def __init__(self, *args, **kwargs): - super(TvMaze, self).__init__(*args, **kwargs) - - def _search_show(self, name=None, ids=None, **kwargs): - def _make_result_dict(s): - return {'seriesname': s.name, 'id': s.id, 'firstaired': s.premiered, - 'network': s.network and s.network.name, - 'genres': s.genres, 'overview': s.summary, - 'aliases': [a.name for a in s.akas], 'image': s.image and s.image.get('original'), - 'ids': TVInfoIDs( - tvdb=s.externals.get('thetvdb'), rage=s.externals.get('tvrage'), tvmaze=s.id, - imdb=s.externals.get('imdb') and try_int(s.externals.get('imdb').replace('tt', ''), None))} - results = [] - if ids: - for t, p in iteritems(ids): - if t in self.supported_id_searches: - cache_id_key = 's-id-%s-%s' % (t, ids[t]) - is_none, shows = self._get_cache_entry(cache_id_key) - if t == TVINFO_TVDB: - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = tvmaze.lookup_tvdb(p) - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - except (BaseException, Exception): - continue - else: - show = shows - elif t == TVINFO_IMDB: - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = tvmaze.lookup_imdb((p, 'tt%07d' % p)[not str(p).startswith('tt')]) - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - except (BaseException, Exception): - continue - else: - show = shows - elif t == TVINFO_TVMAZE: - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - show = tvm_obj.get_show(maze_id=p) - self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire) - except (BaseException, Exception): - continue - else: - show = shows - else: - continue - if show: - try: - if show.id not in [i['id'] for i in results]: - results.append(_make_result_dict(show)) - except (BaseException, Exception) as e: - log.debug('Error creating result dict: %s' % ex(e)) - if name: - for n in ([name], name)[isinstance(name, list)]: - cache_name_key = 's-name-%s' % n - is_none, shows = self._get_cache_entry(cache_name_key) - if not self.config.get('cache_search') or (None is shows and not is_none): - try: - shows = tvmaze.show_search(n) - except (BaseException, Exception) as e: - log.debug('Error searching for show: %s' % ex(e)) - continue - results.extend([_make_result_dict(s) for s in shows or []]) - - seen = set() - results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] - return results - - def _set_episode(self, sid, ep_obj): - for _k, _s in ( - ('seasonnumber', 'season_number'), ('episodenumber', 'episode_number'), - ('episodename', 'title'), ('overview', 'summary'), ('firstaired', 'airdate'), - ('airtime', 'airtime'), ('runtime', 'runtime'), - ('seriesid', 'maze_id'), ('id', 'maze_id'), ('is_special', 'special'), ('filename', 'image')): - if 'filename' == _k: - image = getattr(ep_obj, _s, {}) or {} - image = image.get('original') or image.get('medium') - self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, _k, image) - else: - self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, _k, - getattr(ep_obj, _s, getattr(empty_ep, _k))) - - if ep_obj.airstamp: - try: - at = _datetime_to_timestamp(tz_p.parse(ep_obj.airstamp)) - self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, 'timestamp', at) - except (BaseException, Exception): - pass - - @staticmethod - def _set_network(show_obj, network, is_stream): - show_obj['network'] = network.name - show_obj['network_timezone'] = network.timezone - show_obj['network_country'] = network.country - show_obj['network_country_code'] = network.code - show_obj['network_id'] = network.maze_id - show_obj['network_is_stream'] = is_stream - - def _get_tvm_show(self, show_id, get_ep_info): - try: - self.show_not_found = False - return tvm_obj.get_show(maze_id=show_id, embed='cast%s' % ('', ',episodeswithspecials')[get_ep_info]) - except tvmaze.ShowNotFound: - self.show_not_found = True - except (BaseException, Exception): - log.debug('Error getting data for tvmaze show id: %s' % show_id) - - def _get_show_data(self, sid, language, get_ep_info=False, banners=False, posters=False, seasons=False, - seasonwides=False, fanart=False, actors=False, **kwargs): - log.debug('Getting all series data for %s' % sid) - - show_data = self._get_tvm_show(sid, get_ep_info) - if not show_data: - return False - - show_obj = self.shows[sid].__dict__ - for k, v in iteritems(show_obj): - if k not in ('cast', 'crew', 'images'): - show_obj[k] = getattr(show_data, show_map.get(k, k), show_obj[k]) - show_obj['runtime'] = show_data.average_runtime or show_data.runtime - p_set = False - if show_data.image: - p_set = True - show_obj['poster'] = show_data.image.get('original') - show_obj['poster_thumb'] = show_data.image.get('medium') - - if (banners or posters or fanart or - any(self.config.get('%s_enabled' % t, False) for t in ('banners', 'posters', 'fanart'))) and \ - not all(getattr(self.shows[sid], '%s_loaded' % t, False) for t in ('poster', 'banner', 'fanart')): - if show_data.images: - b_set, f_set = False, False - self.shows[sid].poster_loaded = True - self.shows[sid].banner_loaded = True - self.shows[sid].fanart_loaded = True - for img in show_data.images: - img_type = img_type_map.get(img.type, TVInfoImageType.other) - img_width, img_height = img.resolutions['original'].get('width'), \ - img.resolutions['original'].get('height') - img_ar = img_width and img_height and float(img_width) / float(img_height) - img_ar_type = self._which_type(img_width, img_ar) - if TVInfoImageType.poster == img_type and img_ar and img_ar_type != img_type and \ - show_obj['poster'] == img.resolutions.get('original')['url']: - p_set = False - show_obj['poster'] = None - show_obj['poster_thumb'] = None - img_type = (TVInfoImageType.other, img_type)[ - not img_ar or img_ar_type == img_type or - img_type not in (TVInfoImageType.banner, TVInfoImageType.poster, TVInfoImageType.fanart)] - img_src = {} - for res, img_url in iteritems(img.resolutions): - img_size = img_size_map.get(res) - if img_size: - img_src[img_size] = img_url.get('url') - show_obj['images'].setdefault(img_type, []).append( - TVInfoImage( - image_type=img_type, sizes=img_src, img_id=img.id, main_image=img.main, - type_str=img.type, width=img_width, height=img_height, aspect_ratio=img_ar)) - if not p_set and TVInfoImageType.poster == img_type: - p_set = True - show_obj['poster'] = img.resolutions.get('original')['url'] - show_obj['poster_thumb'] = img.resolutions.get('original')['url'] - elif not b_set and 'banner' == img.type and TVInfoImageType.banner == img_type: - b_set = True - show_obj['banner'] = img.resolutions.get('original')['url'] - show_obj['banner_thumb'] = img.resolutions.get('medium')['url'] - elif not f_set and 'background' == img.type and TVInfoImageType.fanart == img_type: - f_set = True - show_obj['fanart'] = img.resolutions.get('original')['url'] - - if show_data.schedule: - if 'time' in show_data.schedule: - show_obj['airs_time'] = show_data.schedule['time'] - try: - h, m = show_data.schedule['time'].split(':') - h, m = try_int(h, None), try_int(m, None) - if None is not h and None is not m: - show_obj['time'] = datetime.time(hour=h, minute=m) - except (BaseException, Exception): - pass - if 'days' in show_data.schedule: - show_obj['airs_dayofweek'] = ', '.join(show_data.schedule['days']) - if show_data.genres: - show_obj['genre'] = '|%s|' % '|'.join(show_data.genres) - - if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): - if show_data.cast: - character_person_ids = {} - for ch in show_obj['cast'][RoleTypes.ActorMain]: - character_person_ids.setdefault(ch.id, []).extend([p.id for p in ch.person]) - for ch in show_data.cast.characters: - existing_character = next((c for c in show_obj['cast'][RoleTypes.ActorMain] if c.id == ch.id), - None) # type: Optional[Character] - person = self._convert_person(ch.person) - if existing_character: - existing_person = next((p for p in existing_character.person - if person.id == p.ids.get(TVINFO_TVMAZE)), - None) # type: Person - if existing_person: - try: - character_person_ids[ch.id].remove(existing_person.id) - except (BaseException, Exception): - print('error') - pass - (existing_person.p_id, existing_person.name, existing_person.image, existing_person.gender, - existing_person.birthdate, existing_person.deathdate, existing_person.country, - existing_person.country_code, existing_person.country_timezone, existing_person.thumb_url, - existing_person.url, existing_person.ids) = \ - (ch.person.id, ch.person.name, - ch.person.image and ch.person.image.get('original'), - PersonGenders.named.get( - ch.person.gender and ch.person.gender.lower(), PersonGenders.unknown), - person.birthdate, person.deathdate, - ch.person.country and ch.person.country.get('name'), - ch.person.country and ch.person.country.get('code'), - ch.person.country and ch.person.country.get('timezone'), - ch.person.image and ch.person.image.get('medium'), - ch.person.url, {TVINFO_TVMAZE: ch.person.id}) - else: - existing_character.person.append(person) - else: - show_obj['cast'][RoleTypes.ActorMain].append( - Character(p_id=ch.id, name=ch.name, image=ch.image and ch.image.get('original'), - person=[person], - plays_self=ch.plays_self, thumb_url=ch.image and ch.image.get('medium') - )) - - if character_person_ids: - for c, p_ids in iteritems(character_person_ids): - if p_ids: - char = next((mc for mc in show_obj['cast'][RoleTypes.ActorMain] if mc.id == c), - None) # type: Optional[Character] - if char: - char.person = [p for p in char.person if p.id not in p_ids] - - if show_data.cast: - show_obj['actors'] = [ - {'character': {'id': ch.id, - 'name': ch.name, - 'url': 'https://www.tvmaze.com/character/view?id=%s' % ch.id, - 'image': ch.image and ch.image.get('original'), - }, - 'person': {'id': ch.person and ch.person.id, - 'name': ch.person and ch.person.name, - 'url': ch.person and 'https://www.tvmaze.com/person/view?id=%s' % ch.person.id, - 'image': ch.person and ch.person.image and ch.person.image.get('original'), - 'birthday': None, # not sure about format - 'deathday': None, # not sure about format - 'gender': ch.person and ch.person.gender and ch.person.gender, - 'country': ch.person and ch.person.country and ch.person.country.get('name'), - }, - } for ch in show_data.cast.characters] - - if show_data.crew: - for cw in show_data.crew: - rt = crew_type_names.get(cw.type.lower(), RoleTypes.CrewOther) - show_obj['crew'][rt].append( - Crew(p_id=cw.person.id, name=cw.person.name, - image=cw.person.image and cw.person.image.get('original'), - gender=cw.person.gender, birthdate=cw.person.birthday, deathdate=cw.person.death_day, - country=cw.person.country and cw.person.country.get('name'), - country_code=cw.person.country and cw.person.country.get('code'), - country_timezone=cw.person.country and cw.person.country.get('timezone'), - crew_type_name=cw.type, - ) - ) - - if show_data.externals: - show_obj['ids'] = TVInfoIDs(tvdb=show_data.externals.get('thetvdb'), - rage=show_data.externals.get('tvrage'), - imdb=show_data.externals.get('imdb') and - try_int(show_data.externals.get('imdb').replace('tt', ''), None)) - - if show_data.network: - self._set_network(show_obj, show_data.network, False) - elif show_data.web_channel: - self._set_network(show_obj, show_data.web_channel, True) - - if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False): - log.debug('Getting all episodes of %s' % sid) - if None is show_data: - show_data = self._get_tvm_show(sid, get_ep_info) - if not show_data: - return False - - if show_data.episodes: - specials = [] - for cur_ep in show_data.episodes: - if cur_ep.is_special(): - specials.append(cur_ep) - else: - self._set_episode(sid, cur_ep) - - if specials: - specials.sort(key=lambda ep: ep.airstamp or 'Last') - for ep_n, cur_sp in enumerate(specials, start=1): - cur_sp.season_number, cur_sp.episode_number = 0, ep_n - self._set_episode(sid, cur_sp) - - if show_data.seasons: - for cur_s_k, cur_s_v in iteritems(show_data.seasons): - season_obj = None - if cur_s_v.season_number not in self.shows[sid]: - if all(_e.is_special() for _e in cur_s_v.episodes or []): - season_obj = self.shows[sid][0].__dict__ - else: - log.error('error episodes have no numbers') - season_obj = season_obj or self.shows[sid][cur_s_v.season_number].__dict__ - for k, v in iteritems(season_map): - season_obj[k] = getattr(cur_s_v, v, None) or empty_se.get(v) - if cur_s_v.network: - self._set_network(season_obj, cur_s_v.network, False) - elif cur_s_v.web_channel: - self._set_network(season_obj, cur_s_v.web_channel, True) - if cur_s_v.image: - season_obj['poster'] = cur_s_v.image.get('original') - self.shows[sid].season_images_loaded = True - - self.shows[sid].ep_loaded = True - - return True - - def get_updated_shows(self): - # type: (...) -> Dict[integer_types, integer_types] - return {sid: v.seconds_since_epoch for sid, v in iteritems(tvmaze.show_updates().updates)} - - @staticmethod - def _convert_person(person_obj): - # type: (tvmaze.Person) -> Person - ch = [] - for c in person_obj.castcredits or []: - show = TVInfoShow() - show.seriesname = c.show.name - show.id = c.show.id - show.firstaired = c.show.premiered - show.ids = TVInfoIDs(ids={TVINFO_TVMAZE: show.id}) - show.overview = c.show.summary - show.status = c.show.status - net = c.show.network or c.show.web_channel - show.network = net.name - show.network_id = net.maze_id - show.network_country = net.country - show.network_timezone = net.timezone - show.network_country_code = net.code - show.network_is_stream = None is not c.show.web_channel - ch.append(Character(name=c.character.name, show=show)) - try: - birthdate = person_obj.birthday and tz_p.parse(person_obj.birthday).date() - except (BaseException, Exception): - birthdate = None - try: - deathdate = person_obj.death_day and tz_p.parse(person_obj.death_day).date() - except (BaseException, Exception): - deathdate = None - return Person(p_id=person_obj.id, name=person_obj.name, - image=person_obj.image and person_obj.image.get('original'), - gender=PersonGenders.named.get(person_obj.gender and person_obj.gender.lower(), - PersonGenders.unknown), - birthdate=birthdate, deathdate=deathdate, - country=person_obj.country and person_obj.country.get('name'), - country_code=person_obj.country and person_obj.country.get('code'), - country_timezone=person_obj.country and person_obj.country.get('timezone'), - thumb_url=person_obj.image and person_obj.image.get('medium'), - url=person_obj.url, ids={TVINFO_TVMAZE: person_obj.id}, characters=ch - ) - - def _search_person(self, name=None, ids=None): - # type: (AnyStr, Dict[integer_types, integer_types]) -> List[Person] - urls, result, ids = [], [], ids or {} - for tv_src in self.supported_person_id_searches: - if tv_src in ids: - if TVINFO_TVMAZE == tv_src: - try: - r = self.get_person(ids[tv_src]) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - r = None - if r: - result.append(r) - if name: - try: - r = tvmaze.people_search(name) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - r = None - if r: - for p in r: - if not any(1 for ep in result if p.id == ep.id): - result.append(self._convert_person(p)) - return result - - def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs): - # type: (integer_types, bool, bool, Any) -> Optional[Person] - if not p_id: - return - kw = {} - to_embed = [] - if get_show_credits: - to_embed.append('castcredits') - if to_embed: - kw['embed'] = ','.join(to_embed) - try: - p = tvmaze.person_main_info(p_id, **kw) - except ConnectionSkipException as e: - raise e - except (BaseException, Exception): - p = None - if p: - return self._convert_person(p) - - def get_premieres(self, result_count=100, get_extra_images=False, **kwargs): - # type: (...) -> List[TVInfoEpisode] - return self._filtered_schedule(lambda e: all([1 == e.season_number, 1 == e.episode_number]), - get_images=get_extra_images) - - def get_returning(self, result_count=100, get_extra_images=False, **kwargs): - # type: (...) -> List[TVInfoEpisode] - return self._filtered_schedule(lambda e: all([1 != e.season_number, 1 == e.episode_number]), - get_images=get_extra_images) - - def _make_episode(self, episode_data, show_data=None, get_images=False): - # type: (TVMazeEpisode, TVMazeShow, bool) -> TVInfoEpisode - """ - make out of TVMazeEpisode object and optionally TVMazeShow a TVInfoEpisode - """ - ti_show = TVInfoShow() - ti_show.seriesname = show_data.name - ti_show.id = show_data.maze_id - ti_show.seriesid = ti_show.id - ti_show.language = show_data.language - ti_show.overview = show_data.summary - ti_show.firstaired = show_data.premiered - ti_show.runtime = show_data.average_runtime or show_data.runtime - ti_show.vote_average = show_data.rating and show_data.rating.get('average') - ti_show.popularity = show_data.weight - ti_show.genre_list = show_data.genres or [] - ti_show.genre = '|%s|' % '|'.join(ti_show.genre_list) - ti_show.official_site = show_data.official_site - ti_show.status = show_data.status - ti_show.show_type = show_data.type - ti_show.lastupdated = show_data.updated - ti_show.poster = show_data.image and show_data.image.get('original') - ti_show.aliases = [a.name for a in show_data.akas] - if 'days' in show_data.schedule: - ti_show.airs_dayofweek = ', '.join(show_data.schedule['days']) - network = show_data.network or show_data.web_channel - if network: - ti_show.network_is_stream = None is not show_data.web_channel - ti_show.network = network.name - ti_show.network_id = network.maze_id - ti_show.network_country = network.country - ti_show.network_country_code = network.code - ti_show.network_timezone = network.timezone - if get_images and show_data.images: - b_set, f_set, p_set = False, False, False - for cur_img in show_data.images: - img_type = img_type_map.get(cur_img.type, TVInfoImageType.other) - img_width, img_height = cur_img.resolutions['original'].get('width'), \ - cur_img.resolutions['original'].get('height') - img_ar = img_width and img_height and float(img_width) / float(img_height) - img_ar_type = self._which_type(img_width, img_ar) - if TVInfoImageType.poster == img_type and img_ar and img_ar_type != img_type and \ - ti_show.poster == cur_img.resolutions.get('original')['url']: - p_set = False - ti_show.poster = None - ti_show.poster_thumb = None - img_type = (TVInfoImageType.other, img_type)[ - not img_ar or img_ar_type == img_type or - img_type not in (TVInfoImageType.banner, TVInfoImageType.poster, TVInfoImageType.fanart)] - img_src = {} - for cur_res, cur_img_url in iteritems(cur_img.resolutions): - img_size = img_size_map.get(cur_res) - if img_size: - img_src[img_size] = cur_img_url.get('url') - ti_show.images.setdefault(img_type, []).append( - TVInfoImage( - image_type=img_type, sizes=img_src, img_id=cur_img.id, main_image=cur_img.main, - type_str=cur_img.type, width=img_width, height=img_height, aspect_ratio=img_ar)) - if not p_set and TVInfoImageType.poster == img_type: - p_set = True - ti_show.poster = cur_img.resolutions.get('original')['url'] - ti_show.poster_thumb = cur_img.resolutions.get('original')['url'] - elif not b_set and 'banner' == cur_img.type and TVInfoImageType.banner == img_type: - b_set = True - ti_show.banner = cur_img.resolutions.get('original')['url'] - ti_show.banner_thumb = cur_img.resolutions.get('medium')['url'] - elif not f_set and 'background' == cur_img.type and TVInfoImageType.fanart == img_type: - f_set = True - ti_show.fanart = cur_img.resolutions.get('original')['url'] - ti_show.ids = TVInfoIDs( - tvdb=show_data.externals.get('thetvdb'), rage=show_data.externals.get('tvrage'), tvmaze=show_data.id, - imdb=show_data.externals.get('imdb') and try_int(show_data.externals.get('imdb').replace('tt', ''), None)) - ti_show.imdb_id = show_data.externals.get('imdb') - if isinstance(ti_show.imdb_id, integer_types): - ti_show.imdb_id = 'tt%07d' % ti_show.imdb_id - - ti_episode = TVInfoEpisode() - ti_episode.id = episode_data.maze_id - ti_episode.seasonnumber = episode_data.season_number - ti_episode.episodenumber = episode_data.episode_number - ti_episode.episodename = episode_data.title - ti_episode.airtime = episode_data.airtime - ti_episode.firstaired = episode_data.airdate - if episode_data.airstamp: - try: - at = _datetime_to_timestamp(tz_p.parse(episode_data.airstamp)) - ti_episode.timestamp = at - except (BaseException, Exception): - pass - ti_episode.filename = episode_data.image and (episode_data.image.get('original') or - episode_data.image.get('medium')) - ti_episode.is_special = episode_data.is_special() - ti_episode.overview = episode_data.summary - ti_episode.runtime = episode_data.runtime - ti_episode.show = ti_show - return ti_episode - - def _filtered_schedule(self, condition, get_images=False): - try: - result = sorted([ - e for e in tvmaze.get_full_schedule() - if condition(e) and (None is e.show.language or re.search('(?i)eng|jap', e.show.language))], - key=lambda x: x.show.premiered or x.airstamp) - return [self._make_episode(r, r.show, get_images) for r in result] - except(BaseException, Exception): - return [] diff --git a/lib/tvmaze_api/tvmaze_exceptions.py b/lib/tvmaze_api/tvmaze_exceptions.py deleted file mode 100644 index 6302d07..0000000 --- a/lib/tvmaze_api/tvmaze_exceptions.py +++ /dev/null @@ -1,62 +0,0 @@ -# encoding:utf-8 - -"""Custom exceptions used or raised by tvmaze_api -""" - -__author__ = 'Prinz23' -__version__ = '1.0' - -__all__ = ['TvMazeException', 'TvMazeError', 'TvMazeUserabort', 'TvMazeShownotfound', - 'TvMazeSeasonnotfound', 'TvMazeEpisodenotfound', 'TvMazeAttributenotfound', 'TvMazeTokenexpired'] - -from lib.tvinfo_base.exceptions import * - - -class TvMazeException(BaseTVinfoException): - """Any exception generated by tvdb_api - """ - pass - - -class TvMazeError(BaseTVinfoError, TvMazeException): - """An error with thetvdb.com (Cannot connect, for example) - """ - pass - - -class TvMazeUserabort(BaseTVinfoUserabort, TvMazeError): - """User aborted the interactive selection (via - the q command, ^c etc) - """ - pass - - -class TvMazeShownotfound(BaseTVinfoShownotfound, TvMazeError): - """Show cannot be found on thetvdb.com (non-existant show) - """ - pass - - -class TvMazeSeasonnotfound(BaseTVinfoSeasonnotfound, TvMazeError): - """Season cannot be found on thetvdb.com - """ - pass - - -class TvMazeEpisodenotfound(BaseTVinfoEpisodenotfound, TvMazeError): - """Episode cannot be found on thetvdb.com - """ - pass - - -class TvMazeAttributenotfound(BaseTVinfoAttributenotfound, TvMazeError): - """Raised if an episode does not have the requested - attribute (such as a episode name) - """ - pass - - -class TvMazeTokenexpired(BaseTVinfoAuthenticationerror, TvMazeError): - """token expired or missing thetvdb.com - """ - pass diff --git a/sickbeard/__init__.py b/sickbeard/__init__.py index f0f20b3..d747fa2 100755 --- a/sickbeard/__init__.py +++ b/sickbeard/__init__.py @@ -57,7 +57,7 @@ from adba.aniDBerrors import AniDBError # noinspection PyProtectedMember from browser_ua import get_ua from configobj import ConfigObj -from libtrakt import TraktAPI +from api_trakt import TraktAPI from _23 import b64encodestring, decode_bytes, filter_iter, list_items, map_list, ordered_dict, scandir from six import iteritems, PY2, string_types @@ -69,7 +69,7 @@ if False: from adba import Connection from .event_queue import Events from .tv import TVShow - from lib.libtrakt.trakt import TraktAccount + from lib.api_trakt.trakt import TraktAccount PID = None ENV = {} diff --git a/sickbeard/config.py b/sickbeard/config.py index 3bca7fb..088b192 100644 --- a/sickbeard/config.py +++ b/sickbeard/config.py @@ -26,7 +26,7 @@ import encodingKludge as ek import sickbeard import sickbeard.providers from . import db, helpers, logger, naming -from lib.libtrakt import TraktAPI +from lib.api_trakt import TraktAPI from _23 import filter_list, urlsplit, urlunsplit from six import string_types diff --git a/sickbeard/indexers/indexer_config.py b/sickbeard/indexers/indexer_config.py index d55d83b..9577b31 100644 --- a/sickbeard/indexers/indexer_config.py +++ b/sickbeard/indexers/indexer_config.py @@ -1,8 +1,8 @@ -from lib.tvdb_api.tvdb_api import Tvdb -from lib.libtrakt.indexerapiinterface import TraktIndexer -from lib.tvmaze_api.tvmaze_api import TvMaze -from lib.tmdb_api.tmdb_api import TmdbIndexer -from lib.imdb_api.imdb_api import IMDbIndexer +from lib.api_tvdb.tvdb_api import Tvdb +from lib.api_trakt.indexerapiinterface import TraktIndexer +from lib.api_tvmaze.tvmaze_api import TvMaze +from lib.api_tmdb.tmdb_api import TmdbIndexer +from lib.api_imdb.imdb_api import IMDbIndexer # noinspection PyUnresolvedReferences from lib.tvinfo_base import ( TVINFO_FACEBOOK, TVINFO_INSTAGRAM, TVINFO_TWITTER, TVINFO_WIKIPEDIA, diff --git a/sickbeard/logger.py b/sickbeard/logger.py index 6a41d6f..4d26213 100644 --- a/sickbeard/logger.py +++ b/sickbeard/logger.py @@ -76,7 +76,7 @@ class SBRotatingLogHandler(object): self.log_lock = threading.Lock() self.log_types = ['sickbeard', 'tornado.application', 'tornado.general', 'subliminal', 'adba', 'encodingKludge', 'tvdb.api', 'TVInfo'] - self.external_loggers = ['sg.helper', 'libtrakt', 'libtrakt.api'] + self.external_loggers = ['sg.helper', 'api_trakt', 'api_trakt.api'] self.log_types_null = ['tornado.access'] def __del__(self): diff --git a/sickbeard/notifiers/__init__.py b/sickbeard/notifiers/__init__.py index 8f5f729..0c5d35e 100644 --- a/sickbeard/notifiers/__init__.py +++ b/sickbeard/notifiers/__init__.py @@ -20,7 +20,7 @@ import os import re # import pushalot -# from lib import libtrakt +# from lib import api_trakt from . import emby, kodi, plex, xbmc, \ boxcar2, nmj, nmjv2, pushbullet, pushover, pytivo, synoindex, synologynotifier, \ discord, emailnotify, gitter, libnotify, growl, prowl, slack, telegram, trakt diff --git a/sickbeard/notifiers/trakt.py b/sickbeard/notifiers/trakt.py index 03c1486..716162c 100644 --- a/sickbeard/notifiers/trakt.py +++ b/sickbeard/notifiers/trakt.py @@ -20,7 +20,7 @@ import os from .generic import BaseNotifier import sickbeard -from lib.libtrakt import TraktAPI, exceptions +from lib.api_trakt import TraktAPI, exceptions from exceptions_helper import ConnectionSkipException from _23 import list_keys diff --git a/sickbeard/webserve.py b/sickbeard/webserve.py index 25b737e..b459355 100644 --- a/sickbeard/webserve.py +++ b/sickbeard/webserve.py @@ -91,8 +91,8 @@ from lib.cfscrape import CloudflareScraper from lib.dateutil import tz, zoneinfo from lib.dateutil.relativedelta import relativedelta from lib.fuzzywuzzy import fuzz -from lib.libtrakt import TraktAPI -from lib.libtrakt.exceptions import TraktException, TraktAuthException +from lib.api_trakt import TraktAPI +from lib.api_trakt.exceptions import TraktException, TraktAuthException import lib.rarfile.rarfile as rarfile