You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

277 lines
10 KiB

#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking TCP connection factory.
"""
from __future__ import absolute_import, division, print_function
import functools
import socket
import numbers
import datetime
Change core system to improve performance and facilitate multi TV info sources. Change migrate core objects TVShow and TVEpisode and everywhere that these objects affect. Add message to logs and disable ui backlog buttons when no media provider has active and/or scheduled searching enabled. Change views for py3 compat. Change set default runtime of 5 mins if none is given for layout Day by Day. Add OpenSubtitles authentication support to config/Subtitles/Subtitles Plugin. Add "Enforce media hash match" to config/Subtitles Plugin/Opensubtitles for accurate subs if enabled, but if disabled, search failures will fallback to use less reliable subtitle results. Add Apprise 0.8.0 (6aa52c3). Add hachoir_py3 3.0a6 (5b9e05a). Add sgmllib3k 1.0.0 Update soupsieve 1.9.1 (24859cc) to soupsieve_py2 1.9.5 (6a38398) Add soupsieve_py3 2.0.0.dev (69194a2). Add Tornado_py3 Web Server 6.0.3 (ff985fe). Add xmlrpclib_to 0.1.1 (c37db9e). Remove ancient Growl lib 0.1 Remove xmltodict library. Change requirements.txt for Cheetah3 to minimum 3.2.4 Change update sabToSickBeard. Change update autoProcessTV. Change remove Twitter notifier. Update NZBGet Process Media extension, SickGear-NG 1.7 → 2.4 Update Kodi addon 1.0.3 → 1.0.4 Update ADBA for py3. Update Beautiful Soup 4.8.0 (r526) to 4.8.1 (r531). Update Send2Trash 1.3.0 (a568370) to 1.5.0 (66afce7). Update soupsieve 1.9.1 (24859cc) to 1.9.5 (6a38398). Change use GNTP (Growl Notification Transport Protocol) from Apprise. Change add multi host support to Growl notifier. Fix Growl notifier when using empty password. Change update links for Growl notifications. Change deprecate confg/Notifications/Growl password field as these are now stored with host setting. Fix prevent infinite memoryError from a particular jpg data structure. Change subliminal for py3. Change enzyme for py3. Change browser_ua for py3. Change feedparser for py3 (sgmlib is no longer available on py3 as standardlib so added ext lib) Fix Guessit. Fix parse_xml for py3. Fix name parser with multi eps for py3. Fix tvdb_api fixes for py3 (search show). Fix config/media process to only display "pattern is invalid" qtip on "Episode naming" tab if the associated field is actually visible. Also, if the field becomes hidden due to a setting change, hide any previously displayed qtip. Note for Javascript::getelementbyid (or $('tag[id="<name>"')) is required when an id is being searched in the dom due to ":" used in a shows id name. Change download anidb xml files to main cache folder and use adba lib folder as a last resort. Change create get anidb show groups as centralised helper func and consolidate dupe code. Change move anidb related functions to newly renamed anime.py (from blacklistandwhitelist.py). Change str encode hex no longer exits in py3, use codecs.encode(...) instead. Change fix b64decode on py3 returns bytestrings. Change use binary read when downloading log file via browser to prevent any encoding issues. Change add case insensitive ordering to anime black/whitelist. Fix anime groups list not excluding whitelisted stuff. Change add Windows utf8 fix ... see: ytdl-org/youtube-dl#820 Change if no qualities are wanted, exit manual search thread. Fix keepalive for py3 process media. Change add a once a month update of tvinfo show mappings to the daily updater. Change autocorrect ids of new shows by updating from -8 to 31 days of the airdate of episode one. Add next run time to Manage/Show Tasks/Daily show update. Change when fetching imdb data, if imdb id is an episode id then try to find and use real show id. Change delete diskcache db in imdbpie when value error (due to change in Python version). Change during startup, cleanup any _cleaner.pyc/o to prevent issues when switching python versions. Add .pyc cleaner if python version is switched. Change replace deprecated gettz_db_metadata() and gettz. Change rebrand "SickGear PostProcessing script" to "SickGear Process Media extension". Change improve setup guide to use the NZBGet version to minimise displayed text based on version. Change NZBGet versions prior to v17 now told to upgrade as those version are no longer supported - code has actually exit on start up for some time but docs were outdated. Change comment out code and unused option sg_base_path. Change supported Python version 2.7.9-2.7.18 inclusive expanded to 3.7.1-3.8.1 inclusive. Change pidfile creation under Linux 0o644. Make logger accept lists to output continuously using the log_lock instead of split up by other processes. Fix long path issues with Windows process media.
6 years ago
from tornado_py2.concurrent import Future, future_add_done_callback
from tornado_py2.ioloop import IOLoop
from tornado_py2.iostream import IOStream
from tornado_py2 import gen
from tornado_py2.netutil import Resolver
from tornado_py2.platform.auto import set_close_exec
from tornado_py2.gen import TimeoutError
from tornado_py2.util import timedelta_to_seconds
_INITIAL_CONNECT_TIMEOUT = 0.3
class _Connector(object):
"""A stateless implementation of the "Happy Eyeballs" algorithm.
"Happy Eyeballs" is documented in RFC6555 as the recommended practice
for when both IPv4 and IPv6 addresses are available.
In this implementation, we partition the addresses by family, and
make the first connection attempt to whichever address was
returned first by ``getaddrinfo``. If that connection fails or
times out, we begin a connection in parallel to the first address
of the other family. If there are additional failures we retry
with other addresses, keeping one connection attempt per family
in flight at a time.
http://tools.ietf.org/html/rfc6555
"""
def __init__(self, addrinfo, connect):
self.io_loop = IOLoop.current()
self.connect = connect
self.future = Future()
self.timeout = None
self.connect_timeout = None
self.last_error = None
self.remaining = len(addrinfo)
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
self.streams = set()
@staticmethod
def split(addrinfo):
"""Partition the ``addrinfo`` list by address family.
Returns two lists. The first list contains the first entry from
``addrinfo`` and all others with the same family, and the
second list contains all other addresses (normally one list will
be AF_INET and the other AF_INET6, although non-standard resolvers
may return additional families).
"""
primary = []
secondary = []
primary_af = addrinfo[0][0]
for af, addr in addrinfo:
if af == primary_af:
primary.append((af, addr))
else:
secondary.append((af, addr))
return primary, secondary
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT, connect_timeout=None):
self.try_connect(iter(self.primary_addrs))
self.set_timeout(timeout)
if connect_timeout is not None:
self.set_connect_timeout(connect_timeout)
return self.future
def try_connect(self, addrs):
try:
af, addr = next(addrs)
except StopIteration:
# We've reached the end of our queue, but the other queue
# might still be working. Send a final error on the future
# only when both queues are finished.
if self.remaining == 0 and not self.future.done():
self.future.set_exception(self.last_error or
IOError("connection failed"))
return
stream, future = self.connect(af, addr)
self.streams.add(stream)
future_add_done_callback(
future, functools.partial(self.on_connect_done, addrs, af, addr))
def on_connect_done(self, addrs, af, addr, future):
self.remaining -= 1
try:
stream = future.result()
except Exception as e:
if self.future.done():
return
# Error: try again (but remember what happened so we have an
# error to raise in the end)
self.last_error = e
self.try_connect(addrs)
if self.timeout is not None:
# If the first attempt failed, don't wait for the
# timeout to try an address from the secondary queue.
self.io_loop.remove_timeout(self.timeout)
self.on_timeout()
return
self.clear_timeouts()
if self.future.done():
# This is a late arrival; just drop it.
stream.close()
else:
self.streams.discard(stream)
self.future.set_result((af, addr, stream))
self.close_streams()
def set_timeout(self, timeout):
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
self.on_timeout)
def on_timeout(self):
self.timeout = None
if not self.future.done():
self.try_connect(iter(self.secondary_addrs))
def clear_timeout(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
def set_connect_timeout(self, connect_timeout):
self.connect_timeout = self.io_loop.add_timeout(
connect_timeout, self.on_connect_timeout)
def on_connect_timeout(self):
if not self.future.done():
self.future.set_exception(TimeoutError())
self.close_streams()
def clear_timeouts(self):
if self.timeout is not None:
self.io_loop.remove_timeout(self.timeout)
if self.connect_timeout is not None:
self.io_loop.remove_timeout(self.connect_timeout)
def close_streams(self):
for stream in self.streams:
stream.close()
class TCPClient(object):
"""A non-blocking TCP connection factory.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, resolver=None):
if resolver is not None:
self.resolver = resolver
self._own_resolver = False
else:
self.resolver = Resolver()
self._own_resolver = True
def close(self):
if self._own_resolver:
self.resolver.close()
@gen.coroutine
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
max_buffer_size=None, source_ip=None, source_port=None,
timeout=None):
"""Connect to the given host and port.
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
``ssl_options`` is not None).
Using the ``source_ip`` kwarg, one can specify the source
IP address to use when establishing the connection.
In case the user needs to resolve and
use a specific interface, it has to be handled outside
of Tornado as this depends very much on the platform.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Similarly, when the user requires a certain source port, it can
be specified using the ``source_port`` arg.
.. versionchanged:: 4.5
Added the ``source_ip`` and ``source_port`` arguments.
.. versionchanged:: 5.0
Added the ``timeout`` argument.
"""
if timeout is not None:
if isinstance(timeout, numbers.Real):
timeout = IOLoop.current().time() + timeout
elif isinstance(timeout, datetime.timedelta):
timeout = IOLoop.current().time() + timedelta_to_seconds(timeout)
else:
raise TypeError("Unsupported timeout %r" % timeout)
if timeout is not None:
addrinfo = yield gen.with_timeout(
timeout, self.resolver.resolve(host, port, af))
else:
addrinfo = yield self.resolver.resolve(host, port, af)
connector = _Connector(
addrinfo,
functools.partial(self._create_stream, max_buffer_size,
source_ip=source_ip, source_port=source_port)
)
af, addr, stream = yield connector.start(connect_timeout=timeout)
# TODO: For better performance we could cache the (af, addr)
# information here and re-use it on subsequent connections to
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
if ssl_options is not None:
if timeout is not None:
stream = yield gen.with_timeout(timeout, stream.start_tls(
False, ssl_options=ssl_options, server_hostname=host))
else:
stream = yield stream.start_tls(False, ssl_options=ssl_options,
server_hostname=host)
raise gen.Return(stream)
def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
source_port=None):
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
source_port_bind = source_port if isinstance(source_port, int) else 0
source_ip_bind = source_ip
if source_port_bind and not source_ip:
# User required a specific port, but did not specify
# a certain source IP, will bind to the default loopback.
source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
# Trying to use the same address family as the requested af socket:
# - 127.0.0.1 for IPv4
# - ::1 for IPv6
socket_obj = socket.socket(af)
set_close_exec(socket_obj.fileno())
if source_port_bind or source_ip_bind:
# If the user requires binding also to a specific IP/port.
try:
socket_obj.bind((source_ip_bind, source_port_bind))
except socket.error:
socket_obj.close()
# Fail loudly if unable to use the IP/port.
raise
try:
stream = IOStream(socket_obj,
max_buffer_size=max_buffer_size)
except socket.error as e:
fu = Future()
fu.set_exception(e)
return fu
else:
return stream, stream.connect(addr)