diff --git a/libs/tornado/__init__.py b/libs/tornado/__init__.py index 0517408..eefe0f2 100755 --- a/libs/tornado/__init__.py +++ b/libs/tornado/__init__.py @@ -25,5 +25,5 @@ from __future__ import absolute_import, division, print_function, with_statement # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) -version = "3.3.dev1" -version_info = (3, 3, 0, -100) +version = "4.0.1" +version_info = (4, 0, 1, -100) diff --git a/libs/tornado/auth.py b/libs/tornado/auth.py index f15413e..7bd3fa1 100755 --- a/libs/tornado/auth.py +++ b/libs/tornado/auth.py @@ -51,7 +51,7 @@ Example usage for Google OpenID:: response_type='code', extra_params={'approval_prompt': 'auto'}) -.. versionchanged:: 3.3 +.. versionchanged:: 4.0 All of the callback interfaces in this module are now guaranteed to run their callback with an argument of ``None`` on error. Previously some functions would do this while others would simply @@ -883,9 +883,10 @@ class FriendFeedMixin(OAuthMixin): class GoogleMixin(OpenIdMixin, OAuthMixin): """Google Open ID / OAuth authentication. - *Deprecated:* New applications should use `GoogleOAuth2Mixin` - below instead of this class. As of May 19, 2014, Google has stopped - supporting registration-free authentication. + .. deprecated:: 4.0 + New applications should use `GoogleOAuth2Mixin` + below instead of this class. As of May 19, 2014, Google has stopped + supporting registration-free authentication. No application registration is necessary to use Google for authentication or to access Google resources on behalf of a user. @@ -1053,9 +1054,10 @@ class GoogleOAuth2Mixin(OAuth2Mixin): class FacebookMixin(object): """Facebook Connect authentication. - *Deprecated:* New applications should use `FacebookGraphMixin` - below instead of this class. This class does not support the - Future-based interface seen on other classes in this module. + .. deprecated:: 1.1 + New applications should use `FacebookGraphMixin` + below instead of this class. This class does not support the + Future-based interface seen on other classes in this module. To authenticate with Facebook, register your application with Facebook at http://www.facebook.com/developers/apps.php. Then diff --git a/libs/tornado/concurrent.py b/libs/tornado/concurrent.py index 63b0a8c..702aa35 100755 --- a/libs/tornado/concurrent.py +++ b/libs/tornado/concurrent.py @@ -60,7 +60,7 @@ class Future(object): This functionality was previously available in a separate class ``TracebackFuture``, which is now a deprecated alias for this class. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 `tornado.concurrent.Future` is always a thread-unsafe ``Future`` with support for the ``exc_info`` methods. Previously it would be an alias for the thread-safe `concurrent.futures.Future` @@ -152,7 +152,7 @@ class Future(object): def exc_info(self): """Returns a tuple in the same format as `sys.exc_info` or None. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ return self._exc_info @@ -161,7 +161,7 @@ class Future(object): Preserves tracebacks on Python 2. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ self._exc_info = exc_info self.set_exception(exc_info[1]) diff --git a/libs/tornado/curl_httpclient.py b/libs/tornado/curl_httpclient.py index fc7d7f2..3da59a4 100755 --- a/libs/tornado/curl_httpclient.py +++ b/libs/tornado/curl_httpclient.py @@ -51,18 +51,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): self._fds = {} self._timeout = None - try: - self._socket_action = self._multi.socket_action - except AttributeError: - # socket_action is found in pycurl since 7.18.2 (it's been - # in libcurl longer than that but wasn't accessible to - # python). - gen_log.warning("socket_action method missing from pycurl; " - "falling back to socket_all. Upgrading " - "libcurl and pycurl will improve performance") - self._socket_action = \ - lambda fd, action: self._multi.socket_all() - # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by @@ -87,7 +75,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): for curl in self._curls: curl.close() self._multi.close() - self._closed = True super(CurlAsyncHTTPClient, self).close() def fetch_impl(self, request, callback): @@ -143,7 +130,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): action |= pycurl.CSELECT_OUT while True: try: - ret, num_handles = self._socket_action(fd, action) + ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: @@ -156,7 +143,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): self._timeout = None while True: try: - ret, num_handles = self._socket_action( + ret, num_handles = self._multi.socket_action( pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] @@ -224,11 +211,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): "callback": callback, "curl_start_time": time.time(), } - # Disable IPv6 to mitigate the effects of this bug - # on curl versions <= 7.21.0 - # http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976 - if pycurl.version_info()[2] <= 0x71500: # 7.21.0 - curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) _curl_setup_request(curl, request, curl.info["buffer"], curl.info["headers"]) self._multi.add_handle(curl) @@ -350,7 +332,7 @@ def _curl_setup_request(curl, request, buffer, headers): curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") if request.network_interface: curl.setopt(pycurl.INTERFACE, request.network_interface) - if request.use_gzip: + if request.decompress_response: curl.setopt(pycurl.ENCODING, "gzip,deflate") else: curl.setopt(pycurl.ENCODING, "none") @@ -384,7 +366,6 @@ def _curl_setup_request(curl, request, buffer, headers): if request.allow_ipv6 is False: # Curl behaves reasonably when DNS resolution gives an ipv6 address # that we can't reach, so allow ipv6 unless the user asks to disable. - # (but see version check in _process_queue above) curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) else: curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) @@ -474,7 +455,7 @@ def _curl_header_callback(headers, header_line): try: (__, __, reason) = httputil.parse_response_start_line(header_line) header_line = "X-Http-Reason: %s" % reason - except httputil.HTTPInputException: + except httputil.HTTPInputError: return if not header_line: return diff --git a/libs/tornado/gen.py b/libs/tornado/gen.py index 4d1dc6e..06f2715 100755 --- a/libs/tornado/gen.py +++ b/libs/tornado/gen.py @@ -29,16 +29,7 @@ could be written with ``gen`` as:: Most asynchronous functions in Tornado return a `.Future`; yielding this object returns its `~.Future.result`. -For functions that do not return ``Futures``, `Task` works with any -function that takes a ``callback`` keyword argument (most Tornado functions -can be used in either style, although the ``Future`` style is preferred -since it is both shorter and provides better exception handling):: - - @gen.coroutine - def get(self): - yield gen.Task(AsyncHTTPClient().fetch, "http://example.com") - -You can also yield a list or dict of ``Futures`` and/or ``Tasks``, which will be +You can also yield a list or dict of ``Futures``, which will be started at the same time and run in parallel; a list or dict of results will be returned when they are all finished:: @@ -54,30 +45,6 @@ be returned when they are all finished:: .. versionchanged:: 3.2 Dict support added. - -For more complicated interfaces, `Task` can be split into two parts: -`Callback` and `Wait`:: - - class GenAsyncHandler2(RequestHandler): - @gen.coroutine - def get(self): - http_client = AsyncHTTPClient() - http_client.fetch("http://example.com", - callback=(yield gen.Callback("key"))) - response = yield gen.Wait("key") - do_something_with_response(response) - self.render("template.html") - -The ``key`` argument to `Callback` and `Wait` allows for multiple -asynchronous operations to be started at different times and proceed -in parallel: yield several callbacks with different keys, then wait -for them once all the async operations have started. - -The result of a `Wait` or `Task` yield expression depends on how the callback -was run. If it was called with no arguments, the result is ``None``. If -it was called with one argument, the result is that argument. If it was -called with more than one argument or any keyword arguments, the result -is an `Arguments` object, which is a named tuple ``(args, kwargs)``. """ from __future__ import absolute_import, division, print_function, with_statement @@ -252,8 +219,8 @@ class Return(Exception): class YieldPoint(object): """Base class for objects that may be yielded from the generator. - Applications do not normally need to use this class, but it may be - subclassed to provide additional yielding behavior. + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. """ def start(self, runner): """Called by the runner after the generator has yielded. @@ -289,6 +256,9 @@ class Callback(YieldPoint): The callback may be called with zero or one arguments; if an argument is given it will be returned by `Wait`. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. """ def __init__(self, key): self.key = key @@ -305,7 +275,11 @@ class Callback(YieldPoint): class Wait(YieldPoint): - """Returns the argument passed to the result of a previous `Callback`.""" + """Returns the argument passed to the result of a previous `Callback`. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. + """ def __init__(self, key): self.key = key @@ -326,6 +300,9 @@ class WaitAll(YieldPoint): a list of results in the same order. `WaitAll` is equivalent to yielding a list of `Wait` objects. + + .. deprecated:: 4.0 + Use `Futures <.Future>` instead. """ def __init__(self, keys): self.keys = keys @@ -341,21 +318,13 @@ class WaitAll(YieldPoint): def Task(func, *args, **kwargs): - """Runs a single asynchronous operation. + """Adapts a callback-based asynchronous function for use in coroutines. Takes a function (and optional additional arguments) and runs it with those arguments plus a ``callback`` keyword argument. The argument passed to the callback is returned as the result of the yield expression. - A `Task` is equivalent to a `Callback`/`Wait` pair (with a unique - key generated automatically):: - - result = yield gen.Task(func, args) - - func(args, callback=(yield gen.Callback(key))) - result = yield gen.Wait(key) - - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 ``gen.Task`` is now a function that returns a `.Future`, instead of a subclass of `YieldPoint`. It still behaves the same way when yielded. @@ -464,7 +433,7 @@ def multi_future(children): This function is faster than the `Multi` `YieldPoint` because it does not require the creation of a stack context. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ if isinstance(children, dict): keys = list(children.keys()) @@ -520,7 +489,7 @@ def with_timeout(timeout, future, io_loop=None): Currently only supports Futures, not other `YieldPoint` classes. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ # TODO: allow yield points in addition to futures? # Tricky to do with stack_context semantics. @@ -564,7 +533,7 @@ coroutines that are likely to yield Futures that are ready instantly. Usage: ``yield gen.moment`` -.. versionadded:: 3.3 +.. versionadded:: 4.0 """ moment.set_result(None) diff --git a/libs/tornado/http1connection.py b/libs/tornado/http1connection.py index edaa5d9..1ac24f5 100644 --- a/libs/tornado/http1connection.py +++ b/libs/tornado/http1connection.py @@ -16,11 +16,13 @@ """Client and server implementations of HTTP/1.x. -.. versionadded:: 3.3 +.. versionadded:: 4.0 """ from __future__ import absolute_import, division, print_function, with_statement +import re + from tornado.concurrent import Future from tornado.escape import native_str, utf8 from tornado import gen @@ -56,7 +58,7 @@ class HTTP1ConnectionParameters(object): """ def __init__(self, no_keep_alive=False, chunk_size=None, max_header_size=None, header_timeout=None, max_body_size=None, - body_timeout=None, use_gzip=False): + body_timeout=None, decompress=False): """ :arg bool no_keep_alive: If true, always close the connection after one request. @@ -65,7 +67,8 @@ class HTTP1ConnectionParameters(object): :arg float header_timeout: how long to wait for all headers (seconds) :arg int max_body_size: maximum amount of data for body :arg float body_timeout: how long to wait while reading body (seconds) - :arg bool use_gzip: if true, decode incoming ``Content-Encoding: gzip`` + :arg bool decompress: if true, decode incoming + ``Content-Encoding: gzip`` """ self.no_keep_alive = no_keep_alive self.chunk_size = chunk_size or 65536 @@ -73,7 +76,7 @@ class HTTP1ConnectionParameters(object): self.header_timeout = header_timeout self.max_body_size = max_body_size self.body_timeout = body_timeout - self.use_gzip = use_gzip + self.decompress = decompress class HTTP1Connection(httputil.HTTPConnection): @@ -141,7 +144,7 @@ class HTTP1Connection(httputil.HTTPConnection): Returns a `.Future` that resolves to None after the full response has been read. """ - if self.params.use_gzip: + if self.params.decompress: delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) return self._read_message(delegate) @@ -190,8 +193,17 @@ class HTTP1Connection(httputil.HTTPConnection): skip_body = True code = start_line.code if code == 304: + # 304 responses may include the content-length header + # but do not actually have a body. + # http://tools.ietf.org/html/rfc7230#section-3.3 skip_body = True if code >= 100 and code < 200: + # 1xx responses should never indicate the presence of + # a body. + if ('Content-Length' in headers or + 'Transfer-Encoding' in headers): + raise httputil.HTTPInputError( + "Response code %d cannot have body" % code) # TODO: client delegates will get headers_received twice # in the case of a 100-continue. Document or change? yield self._read_message(delegate) @@ -200,7 +212,8 @@ class HTTP1Connection(httputil.HTTPConnection): not self._write_finished): self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") if not skip_body: - body_future = self._read_body(headers, delegate) + body_future = self._read_body( + start_line.code if self.is_client else 0, headers, delegate) if body_future is not None: if self._body_timeout is None: yield body_future @@ -231,7 +244,7 @@ class HTTP1Connection(httputil.HTTPConnection): self.close() if self.stream is None: raise gen.Return(False) - except httputil.HTTPInputException as e: + except httputil.HTTPInputError as e: gen_log.info("Malformed HTTP message from %s: %s", self.context, e) self.close() @@ -258,7 +271,7 @@ class HTTP1Connection(httputil.HTTPConnection): def set_close_callback(self, callback): """Sets a callback that will be run when the connection is closed. - .. deprecated:: 3.3 + .. deprecated:: 4.0 Use `.HTTPMessageDelegate.on_connection_close` instead. """ self._close_callback = stack_context.wrap(callback) @@ -377,7 +390,7 @@ class HTTP1Connection(httputil.HTTPConnection): if self._expected_content_remaining < 0: # Close the stream now to stop further framing errors. self.stream.close() - raise httputil.HTTPOutputException( + raise httputil.HTTPOutputError( "Tried to write more data than Content-Length") if self._chunking_output and chunk: # Don't write out empty chunks because that means END-OF-STREAM @@ -412,7 +425,7 @@ class HTTP1Connection(httputil.HTTPConnection): self._expected_content_remaining != 0 and not self.stream.closed()): self.stream.close() - raise httputil.HTTPOutputException( + raise httputil.HTTPOutputError( "Tried to write %d bytes less than Content-Length" % self._expected_content_remaining) if self._chunking_output: @@ -477,16 +490,40 @@ class HTTP1Connection(httputil.HTTPConnection): headers = httputil.HTTPHeaders.parse(data[eol:]) except ValueError: # probably form split() if there was no ':' in the line - raise httputil.HTTPInputException("Malformed HTTP headers: %r" % - data[eol:100]) + raise httputil.HTTPInputError("Malformed HTTP headers: %r" % + data[eol:100]) return start_line, headers - def _read_body(self, headers, delegate): - content_length = headers.get("Content-Length") - if content_length: - content_length = int(content_length) + def _read_body(self, code, headers, delegate): + if "Content-Length" in headers: + if "," in headers["Content-Length"]: + # Proxies sometimes cause Content-Length headers to get + # duplicated. If all the values are identical then we can + # use them but if they differ it's an error. + pieces = re.split(r',\s*', headers["Content-Length"]) + if any(i != pieces[0] for i in pieces): + raise httputil.HTTPInputError( + "Multiple unequal Content-Lengths: %r" % + headers["Content-Length"]) + headers["Content-Length"] = pieces[0] + content_length = int(headers["Content-Length"]) + if content_length > self._max_body_size: - raise httputil.HTTPInputException("Content-Length too long") + raise httputil.HTTPInputError("Content-Length too long") + else: + content_length = None + + if code == 204: + # This response code is not allowed to have a non-empty body, + # and has an implicit length of zero instead of read-until-close. + # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 + if ("Transfer-Encoding" in headers or + content_length not in (None, 0)): + raise httputil.HTTPInputError( + "Response with code %d should not have body" % code) + content_length = 0 + + if content_length is not None: return self._read_fixed_body(content_length, delegate) if headers.get("Transfer-Encoding") == "chunked": return self._read_chunked_body(delegate) @@ -515,7 +552,7 @@ class HTTP1Connection(httputil.HTTPConnection): return total_size += chunk_len if total_size > self._max_body_size: - raise httputil.HTTPInputException("chunked body too large") + raise httputil.HTTPInputError("chunked body too large") bytes_to_read = chunk_len while bytes_to_read: chunk = yield self.stream.read_bytes( @@ -581,6 +618,9 @@ class _GzipMessageDelegate(httputil.HTTPMessageDelegate): self._delegate.data_received(tail) return self._delegate.finish() + def on_connection_close(self): + return self._delegate.on_connection_close() + class HTTP1ServerConnection(object): """An HTTP/1.x server.""" diff --git a/libs/tornado/httpclient.py b/libs/tornado/httpclient.py index 94a4593..c8ecf47 100755 --- a/libs/tornado/httpclient.py +++ b/libs/tornado/httpclient.py @@ -22,14 +22,20 @@ to switch to ``curl_httpclient`` for reasons such as the following: * ``curl_httpclient`` was the default prior to Tornado 2.0. -Note that if you are using ``curl_httpclient``, it is highly recommended that -you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum -supported version is 7.18.2, and the recommended version is 7.21.1 or newer. -It is highly recommended that your ``libcurl`` installation is built with -asynchronous DNS resolver (threaded or c-ares), otherwise you may encounter -various problems with request timeouts (for more information, see +Note that if you are using ``curl_httpclient``, it is highly +recommended that you use a recent version of ``libcurl`` and +``pycurl``. Currently the minimum supported version of libcurl is +7.21.1, and the minimum version of pycurl is 7.18.2. It is highly +recommended that your ``libcurl`` installation is built with +asynchronous DNS resolver (threaded or c-ares), otherwise you may +encounter various problems with request timeouts (for more +information, see http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS and comments in curl_httpclient.py). + +To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup:: + + AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ from __future__ import absolute_import, division, print_function, with_statement @@ -110,10 +116,21 @@ class AsyncHTTPClient(Configurable): actually creates an instance of an implementation-specific subclass, and instances are reused as a kind of pseudo-singleton (one per `.IOLoop`). The keyword argument ``force_instance=True`` - can be used to suppress this singleton behavior. Constructor - arguments other than ``io_loop`` and ``force_instance`` are - deprecated. The implementation subclass as well as arguments to - its constructor can be set with the static method `configure()` + can be used to suppress this singleton behavior. Unless + ``force_instance=True`` is used, no arguments other than + ``io_loop`` should be passed to the `AsyncHTTPClient` constructor. + The implementation subclass as well as arguments to its + constructor can be set with the static method `configure()` + + All `AsyncHTTPClient` implementations support a ``defaults`` + keyword argument, which can be used to set default values for + `HTTPRequest` attributes. For example:: + + AsyncHTTPClient.configure( + None, defaults=dict(user_agent="MyUserAgent")) + # or with force_instance: + client = AsyncHTTPClient(force_instance=True, + defaults=dict(user_agent="MyUserAgent")) """ @classmethod def configurable_base(cls): @@ -133,12 +150,21 @@ class AsyncHTTPClient(Configurable): def __new__(cls, io_loop=None, force_instance=False, **kwargs): io_loop = io_loop or IOLoop.current() - if io_loop in cls._async_clients() and not force_instance: - return cls._async_clients()[io_loop] + if force_instance: + instance_cache = None + else: + instance_cache = cls._async_clients() + if instance_cache is not None and io_loop in instance_cache: + return instance_cache[io_loop] instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop, **kwargs) - if not force_instance: - cls._async_clients()[io_loop] = instance + # Make sure the instance knows which cache to remove itself from. + # It can't simply call _async_clients() because we may be in + # __new__(AsyncHTTPClient) but instance.__class__ may be + # SimpleAsyncHTTPClient. + instance._instance_cache = instance_cache + if instance_cache is not None: + instance_cache[instance.io_loop] = instance return instance def initialize(self, io_loop, defaults=None): @@ -146,6 +172,7 @@ class AsyncHTTPClient(Configurable): self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) + self._closed = False def close(self): """Destroys this HTTP client, freeing any file descriptors used. @@ -160,8 +187,13 @@ class AsyncHTTPClient(Configurable): ``close()``. """ - if self._async_clients().get(self.io_loop) is self: - del self._async_clients()[self.io_loop] + if self._closed: + return + self._closed = True + if self._instance_cache is not None: + if self._instance_cache.get(self.io_loop) is not self: + raise RuntimeError("inconsistent AsyncHTTPClient cache") + del self._instance_cache[self.io_loop] def fetch(self, request, callback=None, **kwargs): """Executes a request, asynchronously returning an `HTTPResponse`. @@ -179,6 +211,8 @@ class AsyncHTTPClient(Configurable): Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. """ + if self._closed: + raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) # We may modify this (to add Host, Accept-Encoding, etc), @@ -248,7 +282,7 @@ class HTTPRequest(object): request_timeout=20.0, follow_redirects=True, max_redirects=5, - use_gzip=True, + decompress_response=True, proxy_password='', allow_nonstandard_methods=False, validate_cert=True) @@ -265,7 +299,7 @@ class HTTPRequest(object): validate_cert=None, ca_certs=None, allow_ipv6=None, client_key=None, client_cert=None, body_producer=None, - expect_100_continue=False): + expect_100_continue=False, decompress_response=None): r"""All parameters except ``url`` are optional. :arg string url: URL to fetch @@ -284,7 +318,7 @@ class HTTPRequest(object): ``curl_httpclient``. When using ``body_producer`` it is recommended to pass a ``Content-Length`` in the headers as otherwise chunked encoding will be used, and many servers do not support chunked - encoding on requests. New in Tornado 3.3 + encoding on requests. New in Tornado 4.0 :arg string auth_username: Username for HTTP authentication :arg string auth_password: Password for HTTP authentication :arg string auth_mode: Authentication mode; default is "basic". @@ -299,7 +333,11 @@ class HTTPRequest(object): or return the 3xx response? :arg int max_redirects: Limit for ``follow_redirects`` :arg string user_agent: String to send as ``User-Agent`` header - :arg bool use_gzip: Request gzip encoding from the server + :arg bool decompress_response: Request a compressed response from + the server and decompress it after downloading. Default is True. + New in Tornado 4.0. + :arg bool use_gzip: Deprecated alias for ``decompress_response`` + since Tornado 4.0. :arg string network_interface: Network interface to use for request. ``curl_httpclient`` only; see note below. :arg callable streaming_callback: If set, ``streaming_callback`` will @@ -342,7 +380,6 @@ class HTTPRequest(object): before sending the request body. Only supported with simple_httpclient. - .. note:: When using ``curl_httpclient`` certain options may be @@ -358,7 +395,7 @@ class HTTPRequest(object): .. versionadded:: 3.1 The ``auth_mode`` argument. - .. versionadded:: 3.3 + .. versionadded:: 4.0 The ``body_producer`` and ``expect_100_continue`` arguments. """ # Note that some of these attributes go through property setters @@ -383,7 +420,10 @@ class HTTPRequest(object): self.follow_redirects = follow_redirects self.max_redirects = max_redirects self.user_agent = user_agent - self.use_gzip = use_gzip + if decompress_response is not None: + self.decompress_response = decompress_response + else: + self.decompress_response = use_gzip self.network_interface = network_interface self.streaming_callback = streaming_callback self.header_callback = header_callback diff --git a/libs/tornado/httpserver.py b/libs/tornado/httpserver.py index 469374e..03b5fc7 100755 --- a/libs/tornado/httpserver.py +++ b/libs/tornado/httpserver.py @@ -20,7 +20,7 @@ Typical applications have little direct interaction with the `HTTPServer` class except to start a server at the beginning of the process (and even that is often done indirectly via `tornado.web.Application.listen`). -.. versionchanged:: 3.3 +.. versionchanged:: 4.0 The ``HTTPRequest`` class that used to live in this module has been moved to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. @@ -128,14 +128,15 @@ class HTTPServer(TCPServer, httputil.HTTPServerConnectionDelegate): servers if you want to create your listening sockets in some way other than `tornado.netutil.bind_sockets`. - .. versionchanged:: 3.3 - Added ``gzip``, ``chunk_size``, ``max_header_size``, + .. versionchanged:: 4.0 + Added ``decompress_request``, ``chunk_size``, ``max_header_size``, ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` arguments. Added support for `.HTTPServerConnectionDelegate` instances as ``request_callback``. """ def __init__(self, request_callback, no_keep_alive=False, io_loop=None, - xheaders=False, ssl_options=None, protocol=None, gzip=False, + xheaders=False, ssl_options=None, protocol=None, + decompress_request=False, chunk_size=None, max_header_size=None, idle_connection_timeout=None, body_timeout=None, max_body_size=None, max_buffer_size=None): @@ -144,7 +145,7 @@ class HTTPServer(TCPServer, httputil.HTTPServerConnectionDelegate): self.xheaders = xheaders self.protocol = protocol self.conn_params = HTTP1ConnectionParameters( - use_gzip=gzip, + decompress=decompress_request, chunk_size=chunk_size, max_header_size=max_header_size, header_timeout=idle_connection_timeout or 3600, diff --git a/libs/tornado/httputil.py b/libs/tornado/httputil.py index 6e110d9..a674897 100755 --- a/libs/tornado/httputil.py +++ b/libs/tornado/httputil.py @@ -319,7 +319,7 @@ class HTTPServerRequest(object): are typically kept open in HTTP/1.1, multiple requests can be handled sequentially on a single connection. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Moved from ``tornado.httpserver.HTTPRequest``. """ def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, @@ -352,7 +352,7 @@ class HTTPServerRequest(object): def supports_http_1_1(self): """Returns True if this request supports HTTP/1.1 semantics. - .. deprecated:: 3.3 + .. deprecated:: 4.0 Applications are less likely to need this information with the introduction of `.HTTPConnection`. If you still need it, access the ``version`` attribute directly. @@ -375,7 +375,7 @@ class HTTPServerRequest(object): def write(self, chunk, callback=None): """Writes the given chunk to the response stream. - .. deprecated:: 3.3 + .. deprecated:: 4.0 Use ``request.connection`` and the `.HTTPConnection` methods to write the response. """ @@ -385,7 +385,7 @@ class HTTPServerRequest(object): def finish(self): """Finishes this HTTP request on the open connection. - .. deprecated:: 3.3 + .. deprecated:: 4.0 Use ``request.connection`` and the `.HTTPConnection` methods to write the response. """ @@ -445,19 +445,19 @@ class HTTPServerRequest(object): self.__class__.__name__, args, dict(self.headers)) -class HTTPInputException(Exception): +class HTTPInputError(Exception): """Exception class for malformed HTTP requests or responses from remote sources. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ pass -class HTTPOutputException(Exception): +class HTTPOutputError(Exception): """Exception class for errors in HTTP output. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ pass @@ -465,7 +465,7 @@ class HTTPOutputException(Exception): class HTTPServerConnectionDelegate(object): """Implement this interface to handle requests from `.HTTPServer`. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ def start_request(self, server_conn, request_conn): """This method is called by the server when a new request has started. @@ -491,7 +491,7 @@ class HTTPServerConnectionDelegate(object): class HTTPMessageDelegate(object): """Implement this interface to handle an HTTP request or response. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ def headers_received(self, start_line, headers): """Called when the HTTP headers have been received and parsed. @@ -531,7 +531,7 @@ class HTTPMessageDelegate(object): class HTTPConnection(object): """Applications use this interface to write their responses. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ def write_headers(self, start_line, headers, chunk=None, callback=None): """Write an HTTP header block. @@ -774,9 +774,9 @@ def parse_request_start_line(line): try: method, path, version = line.split(" ") except ValueError: - raise HTTPInputException("Malformed HTTP request line") + raise HTTPInputError("Malformed HTTP request line") if not version.startswith("HTTP/"): - raise HTTPInputException( + raise HTTPInputError( "Malformed HTTP version in HTTP Request-Line: %r" % version) return RequestStartLine(method, path, version) @@ -796,7 +796,7 @@ def parse_response_start_line(line): line = native_str(line) match = re.match("(HTTP/1.[01]) ([0-9]+) ([^\r]*)", line) if not match: - raise HTTPInputException("Error parsing response start line") + raise HTTPInputError("Error parsing response start line") return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3)) diff --git a/libs/tornado/ioloop.py b/libs/tornado/ioloop.py index cd59bfe..e15252d 100755 --- a/libs/tornado/ioloop.py +++ b/libs/tornado/ioloop.py @@ -45,8 +45,7 @@ import traceback from tornado.concurrent import TracebackFuture, is_future from tornado.log import app_log, gen_log from tornado import stack_context -from tornado.util import Configurable -from tornado.util import errno_from_exception +from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds try: import signal @@ -162,7 +161,7 @@ class IOLoop(Configurable): def clear_instance(): """Clear the global `IOLoop` instance. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ if hasattr(IOLoop, "_instance"): del IOLoop._instance @@ -267,7 +266,7 @@ class IOLoop(Configurable): When an event occurs, ``handler(fd, events)`` will be run. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ @@ -276,7 +275,7 @@ class IOLoop(Configurable): def update_handler(self, fd, events): """Changes the events we listen for ``fd``. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ @@ -285,7 +284,7 @@ class IOLoop(Configurable): def remove_handler(self, fd): """Stop listening for events on ``fd``. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ @@ -433,7 +432,7 @@ class IOLoop(Configurable): """ return time.time() - def add_timeout(self, deadline, callback): + def add_timeout(self, deadline, callback, *args, **kwargs): """Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to @@ -442,13 +441,59 @@ class IOLoop(Configurable): ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the - current time. + current time. Since Tornado 4.0, `call_later` is a more + convenient alternative for the relative case since it does not + require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. + + Subclasses of IOLoop must implement either `add_timeout` or + `call_at`; the default implementations of each will call + the other. `call_at` is usually easier to implement, but + subclasses that wish to maintain compatibility with Tornado + versions prior to 4.0 must use `add_timeout` instead. + + .. versionchanged:: 4.0 + Now passes through ``*args`` and ``**kwargs`` to the callback. """ - raise NotImplementedError() + if isinstance(deadline, numbers.Real): + return self.call_at(deadline, callback, *args, **kwargs) + elif isinstance(deadline, datetime.timedelta): + return self.call_at(self.time() + timedelta_to_seconds(deadline), + callback, *args, **kwargs) + else: + raise TypeError("Unsupported deadline %r" % deadline) + + def call_later(self, delay, callback, *args, **kwargs): + """Runs the ``callback`` after ``delay`` seconds have passed. + + Returns an opaque handle that may be passed to `remove_timeout` + to cancel. Note that unlike the `asyncio` method of the same + name, the returned object does not have a ``cancel()`` method. + + See `add_timeout` for comments on thread-safety and subclassing. + + .. versionadded:: 4.0 + """ + return self.call_at(self.time() + delay, callback, *args, **kwargs) + + def call_at(self, when, callback, *args, **kwargs): + """Runs the ``callback`` at the absolute time designated by ``when``. + + ``when`` must be a number using the same reference point as + `IOLoop.time`. + + Returns an opaque handle that may be passed to `remove_timeout` + to cancel. Note that unlike the `asyncio` method of the same + name, the returned object does not have a ``cancel()`` method. + + See `add_timeout` for comments on thread-safety and subclassing. + + .. versionadded:: 4.0 + """ + return self.add_timeout(when, callback, *args, **kwargs) def remove_timeout(self, timeout): """Cancels a pending timeout. @@ -486,6 +531,19 @@ class IOLoop(Configurable): """ raise NotImplementedError() + def spawn_callback(self, callback, *args, **kwargs): + """Calls the given callback on the next IOLoop iteration. + + Unlike all other callback-related methods on IOLoop, + ``spawn_callback`` does not associate the callback with its caller's + ``stack_context``, so it is suitable for fire-and-forget callbacks + that should not interfere with the caller. + + .. versionadded:: 4.0 + """ + with stack_context.NullContext(): + self.add_callback(callback, *args, **kwargs) + def add_future(self, future, callback): """Schedules a callback on the ``IOLoop`` when the given `.Future` is finished. @@ -504,7 +562,13 @@ class IOLoop(Configurable): For use in subclasses. """ try: - callback() + ret = callback() + if ret is not None and is_future(ret): + # Functions that return Futures typically swallow all + # exceptions and store them in the Future. If a Future + # makes it out to the IOLoop, ensure its exception (if any) + # gets logged too. + self.add_future(ret, lambda f: f.result()) except Exception: self.handle_callback_exception(callback) @@ -534,7 +598,7 @@ class IOLoop(Configurable): This method is provided for use by `IOLoop` subclasses and should not generally be used by application code. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ try: return fd.fileno(), fd @@ -551,7 +615,7 @@ class IOLoop(Configurable): implementations of ``IOLoop.close(all_fds=True)`` and should not generally be used by application code. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ try: try: @@ -587,7 +651,7 @@ class PollIOLoop(IOLoop): self._thread_ident = None self._blocking_signal_threshold = None self._timeout_counter = itertools.count() - + # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle self._waker = Waker() @@ -680,19 +744,16 @@ class PollIOLoop(IOLoop): try: while True: - poll_timeout = _POLL_TIMEOUT - # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. with self._callback_lock: callbacks = self._callbacks self._callbacks = [] - for callback in callbacks: - self._run_callback(callback) - # Closures may be holding on to a lot of memory, so allow - # them to be freed before we go into our poll wait. - callbacks = callback = None + # Add any timeouts that have come due to the callback list. + # Do not run anything until we have determined which ones + # are ready, so timeouts that call add_timeout cannot + # schedule anything in this iteration. if self._timeouts: now = self.time() while self._timeouts: @@ -702,11 +763,9 @@ class PollIOLoop(IOLoop): self._cancellations -= 1 elif self._timeouts[0].deadline <= now: timeout = heapq.heappop(self._timeouts) - self._run_callback(timeout.callback) + callbacks.append(timeout.callback) del timeout else: - seconds = self._timeouts[0].deadline - now - poll_timeout = min(seconds, poll_timeout) break if (self._cancellations > 512 and self._cancellations > (len(self._timeouts) >> 1)): @@ -717,10 +776,25 @@ class PollIOLoop(IOLoop): if x.callback is not None] heapq.heapify(self._timeouts) + for callback in callbacks: + self._run_callback(callback) + # Closures may be holding on to a lot of memory, so allow + # them to be freed before we go into our poll wait. + callbacks = callback = None + if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 + elif self._timeouts: + # If there are any timeouts, schedule the first one. + # Use self.time() instead of 'now' to account for time + # spent running callbacks. + poll_timeout = self._timeouts[0].deadline - self.time() + poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) + else: + # No timeouts and no callbacks, so use the default. + poll_timeout = _POLL_TIMEOUT if not self._running: break @@ -784,8 +858,11 @@ class PollIOLoop(IOLoop): def time(self): return self.time_func() - def add_timeout(self, deadline, callback): - timeout = _Timeout(deadline, stack_context.wrap(callback), self) + def call_at(self, deadline, callback, *args, **kwargs): + timeout = _Timeout( + deadline, + functools.partial(stack_context.wrap(callback), *args, **kwargs), + self) heapq.heappush(self._timeouts, timeout) return timeout @@ -840,24 +917,12 @@ class _Timeout(object): __slots__ = ['deadline', 'callback', 'tiebreaker'] def __init__(self, deadline, callback, io_loop): - if isinstance(deadline, numbers.Real): - self.deadline = deadline - elif isinstance(deadline, datetime.timedelta): - now = io_loop.time() - try: - self.deadline = now + deadline.total_seconds() - except AttributeError: # py2.6 - self.deadline = now + _Timeout.timedelta_to_seconds(deadline) - else: + if not isinstance(deadline, numbers.Real): raise TypeError("Unsupported deadline %r" % deadline) + self.deadline = deadline self.callback = callback self.tiebreaker = next(io_loop._timeout_counter) - @staticmethod - def timedelta_to_seconds(td): - """Equivalent to td.total_seconds() (introduced in python 2.7).""" - return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) - # Comparison methods to sort by deadline, with object id as a tiebreaker # to guarantee a consistent ordering. The heapq module uses __le__ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons @@ -904,10 +969,11 @@ class PeriodicCallback(object): if not self._running: return try: - self.callback() + return self.callback() except Exception: self.io_loop.handle_callback_exception(self.callback) - self._schedule_next() + finally: + self._schedule_next() def _schedule_next(self): if self._running: diff --git a/libs/tornado/iostream.py b/libs/tornado/iostream.py index 3874bf7..99c681d 100755 --- a/libs/tornado/iostream.py +++ b/libs/tornado/iostream.py @@ -57,11 +57,24 @@ except ImportError: # some they differ. _ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) +if hasattr(errno, "WSAEWOULDBLOCK"): + _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) + # These errnos indicate that a connection has been abruptly terminated. # They should be caught and handled less noisily than other errors. -_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE) +_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, + errno.ETIMEDOUT) + +if hasattr(errno, "WSAECONNRESET"): + _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) + +# More non-portable errnos: +_ERRNO_INPROGRESS = (errno.EINPROGRESS,) +if hasattr(errno, "WSAEINPROGRESS"): + _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) +####################################################### class StreamClosedError(IOError): """Exception raised by `IOStream` methods when the stream is closed. @@ -116,7 +129,7 @@ class BaseIOStream(object): :arg max_write_buffer_size: Amount of outgoing data to buffer; defaults to unlimited. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Add the ``max_write_buffer_size`` parameter. Changed default ``read_chunk_size`` to 64KB. """ @@ -203,7 +216,7 @@ class BaseIOStream(object): if more than ``max_bytes`` bytes have been read and the regex is not satisfied. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. """ @@ -230,7 +243,7 @@ class BaseIOStream(object): if more than ``max_bytes`` bytes have been read and the delimiter is not found. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. """ @@ -259,7 +272,7 @@ class BaseIOStream(object): If ``partial`` is true, the callback is run as soon as we have any bytes to return (but never more than ``num_bytes``) - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ``partial`` argument. The callback argument is now optional and a `.Future` will be returned if it is omitted. """ @@ -280,7 +293,7 @@ class BaseIOStream(object): If a callback is given, it will be run with the data as an argument; if not, this method returns a `.Future`. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 The callback argument is now optional and a `.Future` will be returned if it is omitted. """ @@ -308,7 +321,7 @@ class BaseIOStream(object): completed. If `write` is called again before that `.Future` has resolved, the previous future will be orphaned and will never resolve. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. """ assert isinstance(data, bytes_type) @@ -492,7 +505,7 @@ class BaseIOStream(object): def wrapper(): self._pending_callbacks -= 1 try: - callback(*args) + return callback(*args) except Exception: app_log.error("Uncaught exception, closing connection.", exc_info=True) @@ -504,7 +517,8 @@ class BaseIOStream(object): # Re-raise the exception so that IOLoop.handle_callback_exception # can see it and log the error raise - self._maybe_add_error_listener() + finally: + self._maybe_add_error_listener() # We schedule callbacks to be run on the next IOLoop iteration # rather than running them directly for several reasons: # * Prevents unbounded stack growth when a callback calls an @@ -949,11 +963,19 @@ class IOStream(BaseIOStream): May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the - same format as for `socket.connect `, - i.e. a ``(host, port)`` tuple. If ``callback`` is specified, - it will be called with no arguments when the connection is - completed; if not this method returns a `.Future` (whose result - after a successful connection will be the stream itself). + same format as for `socket.connect ` for + the type of socket passed to the IOStream constructor, + e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, + but will be resolved synchronously and block the IOLoop. + If you have a hostname instead of an IP address, the `.TCPClient` + class is recommended instead of calling this method directly. + `.TCPClient` will do asynchronous DNS resolution and handle + both IPv4 and IPv6. + + If ``callback`` is specified, it will be called with no + arguments when the connection is completed; if not this method + returns a `.Future` (whose result after a successful + connection will be the stream itself). If specified, the ``server_hostname`` parameter will be used in SSL connections for certificate validation (if requested in @@ -966,8 +988,9 @@ class IOStream(BaseIOStream): is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 If no callback is given, returns a `.Future`. + """ self._connecting = True try: @@ -980,7 +1003,7 @@ class IOStream(BaseIOStream): # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. - if (errno_from_exception(e) != errno.EINPROGRESS and + if (errno_from_exception(e) not in _ERRNO_INPROGRESS and errno_from_exception(e) not in _ERRNO_WOULDBLOCK): gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), e) @@ -1021,7 +1044,7 @@ class IOStream(BaseIOStream): If a close callback is defined on this stream, it will be transferred to the new stream. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ if (self._read_callback or self._read_future or self._write_callback or self._write_future or diff --git a/libs/tornado/log.py b/libs/tornado/log.py index 7066466..374071d 100755 --- a/libs/tornado/log.py +++ b/libs/tornado/log.py @@ -179,7 +179,7 @@ class LogFormatter(logging.Formatter): def enable_pretty_logging(options=None, logger=None): """Turns on formatted logging output as configured. - This is called automaticaly by `tornado.options.parse_command_line` + This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: diff --git a/libs/tornado/netutil.py b/libs/tornado/netutil.py index a9e05d1..336c806 100755 --- a/libs/tornado/netutil.py +++ b/libs/tornado/netutil.py @@ -57,6 +57,9 @@ u('foo').encode('idna') # some they differ. _ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) +if hasattr(errno, "WSAEWOULDBLOCK"): + _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) + def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None): """Creates listening sockets bound to the given port and address. diff --git a/libs/tornado/platform/asyncio.py b/libs/tornado/platform/asyncio.py index 552476b..b40f014 100644 --- a/libs/tornado/platform/asyncio.py +++ b/libs/tornado/platform/asyncio.py @@ -13,9 +13,9 @@ from __future__ import absolute_import, division, print_function, with_statement import datetime import functools -# _Timeout is used for its timedelta_to_seconds method for py26 compatibility. -from tornado.ioloop import IOLoop, _Timeout +from tornado.ioloop import IOLoop from tornado import stack_context +from tornado.util import timedelta_to_seconds try: # Import the real asyncio module for py33+ first. Older versions of the @@ -109,21 +109,13 @@ class BaseAsyncIOLoop(IOLoop): def stop(self): self.asyncio_loop.stop() - def _run_callback(self, callback, *args, **kwargs): - try: - callback(*args, **kwargs) - except Exception: - self.handle_callback_exception(callback) - - def add_timeout(self, deadline, callback): - if isinstance(deadline, (int, float)): - delay = max(deadline - self.time(), 0) - elif isinstance(deadline, datetime.timedelta): - delay = _Timeout.timedelta_to_seconds(deadline) - else: - raise TypeError("Unsupported deadline %r", deadline) - return self.asyncio_loop.call_later(delay, self._run_callback, - stack_context.wrap(callback)) + def call_at(self, when, callback, *args, **kwargs): + # asyncio.call_at supports *args but not **kwargs, so bind them here. + # We do not synchronize self.time and asyncio_loop.time, so + # convert from absolute to relative. + return self.asyncio_loop.call_later( + max(0, when - self.time()), self._run_callback, + functools.partial(stack_context.wrap(callback), *args, **kwargs)) def remove_timeout(self, timeout): timeout.cancel() @@ -131,13 +123,9 @@ class BaseAsyncIOLoop(IOLoop): def add_callback(self, callback, *args, **kwargs): if self.closing: raise RuntimeError("IOLoop is closing") - if kwargs: - self.asyncio_loop.call_soon_threadsafe(functools.partial( - self._run_callback, stack_context.wrap(callback), - *args, **kwargs)) - else: - self.asyncio_loop.call_soon_threadsafe( - self._run_callback, stack_context.wrap(callback), *args) + self.asyncio_loop.call_soon_threadsafe( + self._run_callback, + functools.partial(stack_context.wrap(callback), *args, **kwargs)) add_callback_from_signal = add_callback diff --git a/libs/tornado/platform/twisted.py b/libs/tornado/platform/twisted.py index 889aa3c..b271dfc 100755 --- a/libs/tornado/platform/twisted.py +++ b/libs/tornado/platform/twisted.py @@ -68,6 +68,7 @@ from __future__ import absolute_import, division, print_function, with_statement import datetime import functools +import numbers import socket import twisted.internet.abstract @@ -90,11 +91,7 @@ from tornado.log import app_log from tornado.netutil import Resolver from tornado.stack_context import NullContext, wrap from tornado.ioloop import IOLoop - -try: - long # py2 -except NameError: - long = int # py3 +from tornado.util import timedelta_to_seconds @implementer(IDelayedCall) @@ -475,28 +472,28 @@ class TwistedIOLoop(tornado.ioloop.IOLoop): def stop(self): self.reactor.crash() - def _run_callback(self, callback, *args, **kwargs): - try: - callback(*args, **kwargs) - except Exception: - self.handle_callback_exception(callback) - - def add_timeout(self, deadline, callback): - if isinstance(deadline, (int, long, float)): + def add_timeout(self, deadline, callback, *args, **kwargs): + # This method could be simplified (since tornado 4.0) by + # overriding call_at instead of add_timeout, but we leave it + # for now as a test of backwards-compatibility. + if isinstance(deadline, numbers.Real): delay = max(deadline - self.time(), 0) elif isinstance(deadline, datetime.timedelta): - delay = tornado.ioloop._Timeout.timedelta_to_seconds(deadline) + delay = timedelta_to_seconds(deadline) else: raise TypeError("Unsupported deadline %r") - return self.reactor.callLater(delay, self._run_callback, wrap(callback)) + return self.reactor.callLater( + delay, self._run_callback, + functools.partial(wrap(callback), *args, **kwargs)) def remove_timeout(self, timeout): if timeout.active(): timeout.cancel() def add_callback(self, callback, *args, **kwargs): - self.reactor.callFromThread(self._run_callback, - wrap(callback), *args, **kwargs) + self.reactor.callFromThread( + self._run_callback, + functools.partial(wrap(callback), *args, **kwargs)) def add_callback_from_signal(self, callback, *args, **kwargs): self.add_callback(callback, *args, **kwargs) diff --git a/libs/tornado/simple_httpclient.py b/libs/tornado/simple_httpclient.py index 06d7ecf..516dc20 100755 --- a/libs/tornado/simple_httpclient.py +++ b/libs/tornado/simple_httpclient.py @@ -277,7 +277,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): stream.close() return self.stream = stream - self.stream.set_close_callback(self._on_close) + self.stream.set_close_callback(self.on_connection_close) self._remove_timeout() if self.final_callback is None: return @@ -338,7 +338,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): if (self.request.method == "POST" and "Content-Type" not in self.request.headers): self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" - if self.request.use_gzip: + if self.request.decompress_response: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((self.parsed.path or '/') + (('?' + self.parsed.query) if self.parsed.query else '')) @@ -348,7 +348,7 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): HTTP1ConnectionParameters( no_keep_alive=True, max_header_size=self.max_header_size, - use_gzip=self.request.use_gzip), + decompress=self.request.decompress_response), self._sockaddr) start_line = httputil.RequestStartLine(self.request.method, req_path, 'HTTP/1.1') @@ -418,12 +418,15 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): # pass it along, unless it's just the stream being closed. return isinstance(value, StreamClosedError) - def _on_close(self): + def on_connection_close(self): if self.final_callback is not None: message = "Connection closed" if self.stream.error: raise self.stream.error - raise HTTPError(599, message) + try: + raise HTTPError(599, message) + except HTTPError: + self._handle_exception(*sys.exc_info()) def headers_received(self, first_line, headers): if self.request.expect_100_continue and first_line.code == 100: @@ -433,20 +436,6 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self.code = first_line.code self.reason = first_line.reason - if "Content-Length" in self.headers: - if "," in self.headers["Content-Length"]: - # Proxies sometimes cause Content-Length headers to get - # duplicated. If all the values are identical then we can - # use them but if they differ it's an error. - pieces = re.split(r',\s*', self.headers["Content-Length"]) - if any(i != pieces[0] for i in pieces): - raise ValueError("Multiple unequal Content-Lengths: %r" % - self.headers["Content-Length"]) - self.headers["Content-Length"] = pieces[0] - content_length = int(self.headers["Content-Length"]) - else: - content_length = None - if self.request.header_callback is not None: # Reassemble the start line. self.request.header_callback('%s %s %s\r\n' % first_line) @@ -454,14 +443,6 @@ class _HTTPConnection(httputil.HTTPMessageDelegate): self.request.header_callback("%s: %s\r\n" % (k, v)) self.request.header_callback('\r\n') - if 100 <= self.code < 200 or self.code == 204: - # These response codes never have bodies - # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 - if ("Transfer-Encoding" in self.headers or - content_length not in (None, 0)): - raise ValueError("Response with code %d should not have body" % - self.code) - def finish(self): data = b''.join(self.chunks) self._remove_timeout() diff --git a/libs/tornado/testing.py b/libs/tornado/testing.py index dc30e94..b4bfb27 100755 --- a/libs/tornado/testing.py +++ b/libs/tornado/testing.py @@ -70,8 +70,8 @@ def get_unused_port(): only that a series of get_unused_port calls in a single process return distinct ports. - **Deprecated**. Use bind_unused_port instead, which is guaranteed - to find an unused port. + .. deprecated:: + Use bind_unused_port instead, which is guaranteed to find an unused port. """ global _next_port port = _next_port @@ -459,7 +459,7 @@ def gen_test(func=None, timeout=None): The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment variable. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 The wrapper now passes along ``*args, **kwargs`` so it can be used on functions with arguments. """ diff --git a/libs/tornado/util.py b/libs/tornado/util.py index 49eea2c..b6e06c6 100755 --- a/libs/tornado/util.py +++ b/libs/tornado/util.py @@ -311,6 +311,11 @@ class ArgReplacer(object): return old_value, args, kwargs +def timedelta_to_seconds(td): + """Equivalent to td.total_seconds() (introduced in python 2.7).""" + return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) + + def _websocket_mask_python(mask, data): """Websocket masking function. diff --git a/libs/tornado/web.py b/libs/tornado/web.py index dd2b5ef..25ac56e 100755 --- a/libs/tornado/web.py +++ b/libs/tornado/web.py @@ -35,8 +35,7 @@ Here is a simple "Hello, world" example app:: application.listen(8888) tornado.ioloop.IOLoop.instance().start() -See the :doc:`Tornado overview ` for more details and a good getting -started guide. +See the :doc:`guide` for additional information. Thread-safety notes ------------------- @@ -48,6 +47,7 @@ not thread-safe. In particular, methods such as you use multiple threads it is important to use `.IOLoop.add_callback` to transfer control back to the main thread before finishing the request. + """ from __future__ import absolute_import, division, print_function, with_statement @@ -820,7 +820,7 @@ class RequestHandler(object): if another flush occurs before the previous flush's callback has been run, the previous callback will be discarded. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. """ chunk = b"".join(self._write_buffer) @@ -943,26 +943,7 @@ class RequestHandler(object): ``kwargs["exc_info"]``. Note that this exception may not be the "current" exception for purposes of methods like ``sys.exc_info()`` or ``traceback.format_exc``. - - For historical reasons, if a method ``get_error_html`` exists, - it will be used instead of the default ``write_error`` implementation. - ``get_error_html`` returned a string instead of producing output - normally, and had different semantics for exception handling. - Users of ``get_error_html`` are encouraged to convert their code - to override ``write_error`` instead. - """ - if hasattr(self, 'get_error_html'): - if 'exc_info' in kwargs: - exc_info = kwargs.pop('exc_info') - kwargs['exception'] = exc_info[1] - try: - # Put the traceback into sys.exc_info() - raise_exc_info(exc_info) - except Exception: - self.finish(self.get_error_html(status_code, **kwargs)) - else: - self.finish(self.get_error_html(status_code, **kwargs)) - return + """ if self.settings.get("serve_traceback") and "exc_info" in kwargs: # in debug mode, try to send a traceback self.set_header('Content-Type', 'text/plain') @@ -1147,14 +1128,15 @@ class RequestHandler(object): else: # Treat unknown versions as not present instead of failing. return None, None, None - elif len(cookie) == 32: + else: version = 1 - token = binascii.a2b_hex(utf8(cookie)) + try: + token = binascii.a2b_hex(utf8(cookie)) + except (binascii.Error, TypeError): + token = utf8(cookie) # We don't have a usable timestamp in older versions. timestamp = int(time.time()) return (version, token, timestamp) - else: - return None, None, None def check_xsrf_cookie(self): """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. @@ -1242,27 +1224,6 @@ class RequestHandler(object): return base + get_url(self.settings, path, **kwargs) - def async_callback(self, callback, *args, **kwargs): - """Obsolete - catches exceptions from the wrapped function. - - This function is unnecessary since Tornado 1.1. - """ - if callback is None: - return None - if args or kwargs: - callback = functools.partial(callback, *args, **kwargs) - - def wrapper(*args, **kwargs): - try: - return callback(*args, **kwargs) - except Exception as e: - if self._headers_written: - app_log.error("Exception after headers written", - exc_info=True) - else: - self._handle_request_exception(e) - return wrapper - def require_setting(self, name, feature="this feature"): """Raises an exception if the given app setting is not defined.""" if not self.application.settings.get(name): @@ -1405,6 +1366,11 @@ class RequestHandler(object): " (" + self.request.remote_ip + ")" def _handle_request_exception(self, e): + if isinstance(e, Finish): + # Not an error; just finish the request without logging. + if not self._finished: + self.finish() + return self.log_exception(*sys.exc_info()) if self._finished: # Extra errors after the request has been finished should @@ -1662,7 +1628,7 @@ class Application(httputil.HTTPServerConnectionDelegate): **settings): if transforms is None: self.transforms = [] - if settings.get("gzip"): + if settings.get("compress_response") or settings.get("gzip"): self.transforms.append(GZipContentEncoding) else: self.transforms = transforms @@ -1959,6 +1925,9 @@ class HTTPError(Exception): `RequestHandler.send_error` since it automatically ends the current function. + To customize the response sent with an `HTTPError`, override + `RequestHandler.write_error`. + :arg int status_code: HTTP status code. Must be listed in `httplib.responses ` unless the ``reason`` keyword argument is given. @@ -1987,6 +1956,25 @@ class HTTPError(Exception): return message +class Finish(Exception): + """An exception that ends the request without producing an error response. + + When `Finish` is raised in a `RequestHandler`, the request will end + (calling `RequestHandler.finish` if it hasn't already been called), + but the outgoing response will not be modified and the error-handling + methods (including `RequestHandler.write_error`) will not be called. + + This can be a more convenient way to implement custom error pages + than overriding ``write_error`` (especially in library code):: + + if self.current_user is None: + self.set_status(401) + self.set_header('WWW-Authenticate', 'Basic realm="something"') + raise Finish() + """ + pass + + class MissingArgumentError(HTTPError): """Exception raised by `RequestHandler.get_argument`. @@ -2367,7 +2355,7 @@ class StaticFileHandler(RequestHandler): .. versionadded:: 3.1 - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 This method is now always called, instead of only when partial results are requested. """ @@ -2514,9 +2502,9 @@ class FallbackHandler(RequestHandler): class OutputTransform(object): """A transform modifies the result of an HTTP request (e.g., GZip encoding) - A new transform instance is created for every request. See the - GZipContentEncoding example below if you want to implement a - new Transform. + Applications are not expected to create their own OutputTransforms + or interact with them directly; the framework chooses which transforms + (if any) to apply. """ def __init__(self, request): pass @@ -2533,7 +2521,7 @@ class GZipContentEncoding(OutputTransform): See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Now compresses all mime types beginning with ``text/``, instead of just a whitelist. (the whitelist is still used for certain non-text mime types). @@ -2767,7 +2755,7 @@ class URLSpec(object): in the regex will be passed in to the handler's get/post/etc methods as arguments. - * ``handler_class``: `RequestHandler` subclass to be invoked. + * ``handler``: `RequestHandler` subclass to be invoked. * ``kwargs`` (optional): A dictionary of additional arguments to be passed to the handler's constructor. diff --git a/libs/tornado/websocket.py b/libs/tornado/websocket.py index c0065c7..ed520d5 100755 --- a/libs/tornado/websocket.py +++ b/libs/tornado/websocket.py @@ -3,18 +3,17 @@ `WebSockets `_ allow for bidirectional communication between the browser and server. -.. warning:: - - The WebSocket protocol was recently finalized as `RFC 6455 - `_ and is not yet supported in - all browsers. Refer to http://caniuse.com/websockets for details - on compatibility. In addition, during development the protocol - went through several incompatible versions, and some browsers only - support older versions. By default this module only supports the - latest version of the protocol, but optional support for an older - version (known as "draft 76" or "hixie-76") can be enabled by - overriding `WebSocketHandler.allow_draft76` (see that method's - documentation for caveats). +WebSockets are supported in the current versions of all major browsers, +although older versions that do not support WebSockets are still in use +(refer to http://caniuse.com/websockets for details). + +This module implements the final version of the WebSocket protocol as +defined in `RFC 6455 `_. Certain +browser versions (notably Safari 5.x) implemented an earlier draft of +the protocol (known as "draft 76") and are not compatible with this module. + +.. versionchanged:: 4.0 + Removed support for the draft 76 protocol version. """ from __future__ import absolute_import, division, print_function, with_statement @@ -22,11 +21,9 @@ from __future__ import absolute_import, division, print_function, with_statement import base64 import collections -import functools import hashlib import os import struct -import time import tornado.escape import tornado.web @@ -38,7 +35,7 @@ from tornado.iostream import StreamClosedError from tornado.log import gen_log, app_log from tornado import simple_httpclient from tornado.tcpclient import TCPClient -from tornado.util import bytes_type, unicode_type, _websocket_mask +from tornado.util import bytes_type, _websocket_mask try: from urllib.parse import urlparse # py2 @@ -108,6 +105,21 @@ class WebSocketHandler(tornado.web.RequestHandler): }; This script pops up an alert box that says "You said: Hello, world". + + Web browsers allow any site to open a websocket connection to any other, + instead of using the same-origin policy that governs other network + access from javascript. This can be surprising and is a potential + security hole, so since Tornado 4.0 `WebSocketHandler` requires + applications that wish to receive cross-origin websockets to opt in + by overriding the `~WebSocketHandler.check_origin` method (see that + method's docs for details). Failure to do so is the most likely + cause of 403 errors when making a websocket connection. + + When using a secure websocket connection (``wss://``) with a self-signed + certificate, the connection from a browser may fail because it wants + to show the "accept this certificate" dialog but has nowhere to show it. + You must first visit a regular HTML page using the same certificate + to accept it before the websocket connection will succeed. """ def __init__(self, application, request, **kwargs): tornado.web.RequestHandler.__init__(self, application, request, @@ -115,22 +127,17 @@ class WebSocketHandler(tornado.web.RequestHandler): self.ws_connection = None self.close_code = None self.close_reason = None + self.stream = None @tornado.web.asynchronous def get(self, *args, **kwargs): self.open_args = args self.open_kwargs = kwargs - self.stream = self.request.connection.detach() - self.stream.set_close_callback(self.on_connection_close) - # Upgrade header should be present and should be equal to WebSocket if self.request.headers.get("Upgrade", "").lower() != 'websocket': - self.stream.write(tornado.escape.utf8( - "HTTP/1.1 400 Bad Request\r\n\r\n" - "Can \"Upgrade\" only to \"WebSocket\"." - )) - self.stream.close() + self.set_status(400) + self.finish("Can \"Upgrade\" only to \"WebSocket\".") return # Connection header should be upgrade. Some proxy servers/load balancers @@ -138,11 +145,8 @@ class WebSocketHandler(tornado.web.RequestHandler): headers = self.request.headers connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(",")) if 'upgrade' not in connection: - self.stream.write(tornado.escape.utf8( - "HTTP/1.1 400 Bad Request\r\n\r\n" - "\"Connection\" must be \"Upgrade\"." - )) - self.stream.close() + self.set_status(400) + self.finish("\"Connection\" must be \"Upgrade\".") return # Handle WebSocket Origin naming convention differences @@ -159,19 +163,16 @@ class WebSocketHandler(tornado.web.RequestHandler): # according to check_origin. When the origin is None, we assume it # did not come from a browser and that it can be passed on. if origin is not None and not self.check_origin(origin): - self.stream.write(tornado.escape.utf8( - "HTTP/1.1 403 Cross Origin Websockets Disabled\r\n\r\n" - )) - self.stream.close() + self.set_status(403) + self.finish("Cross origin websockets not allowed") return + self.stream = self.request.connection.detach() + self.stream.set_close_callback(self.on_connection_close) + if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"): self.ws_connection = WebSocketProtocol13(self) self.ws_connection.accept_connection() - elif (self.allow_draft76() and - "Sec-WebSocket-Version" not in self.request.headers): - self.ws_connection = WebSocketProtocol76(self) - self.ws_connection.accept_connection() else: self.stream.write(tornado.escape.utf8( "HTTP/1.1 426 Upgrade Required\r\n" @@ -245,7 +246,7 @@ class WebSocketHandler(tornado.web.RequestHandler): phrase was supplied, these values will be available as the attributes ``self.close_code`` and ``self.close_reason``. - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added ``close_code`` and ``close_reason`` attributes. """ @@ -263,10 +264,7 @@ class WebSocketHandler(tornado.web.RequestHandler): closing. These values are made available to the client, but are not otherwise interpreted by the websocket protocol. - The ``code`` and ``reason`` arguments are ignored in the "draft76" - protocol version. - - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ``code`` and ``reason`` arguments. """ @@ -292,7 +290,20 @@ class WebSocketHandler(tornado.web.RequestHandler): browsers, since WebSockets are allowed to bypass the usual same-origin policies and don't use CORS headers. - .. versionadded:: 3.3 + To accept all cross-origin traffic (which was the default prior to + Tornado 4.0), simply override this method to always return true:: + + def check_origin(self, origin): + return True + + To allow connections from any subdomain of your site, you might + do something like:: + + def check_origin(self, origin): + parsed_origin = urllib.parse.urlparse(origin) + return parsed_origin.netloc.endswith(".mydomain.com") + + .. versionadded:: 4.0 """ parsed_origin = urlparse(origin) origin = parsed_origin.netloc @@ -303,21 +314,6 @@ class WebSocketHandler(tornado.web.RequestHandler): # Check to see that origin matches host directly, including ports return origin == host - def allow_draft76(self): - """Override to enable support for the older "draft76" protocol. - - The draft76 version of the websocket protocol is disabled by - default due to security concerns, but it can be enabled by - overriding this method to return True. - - Connections using the draft76 protocol do not support the - ``binary=True`` flag to `write_message`. - - Support for the draft76 protocol is deprecated and will be - removed in a future version of Tornado. - """ - return False - def set_nodelay(self, value): """Set the no-delay flag for this stream. @@ -334,29 +330,6 @@ class WebSocketHandler(tornado.web.RequestHandler): """ self.stream.set_nodelay(value) - def get_websocket_scheme(self): - """Return the url scheme used for this request, either "ws" or "wss". - - This is normally decided by HTTPServer, but applications - may wish to override this if they are using an SSL proxy - that does not provide the X-Scheme header as understood - by HTTPServer. - - Note that this is only used by the draft76 protocol. - """ - return "wss" if self.request.protocol == "https" else "ws" - - def async_callback(self, callback, *args, **kwargs): - """Obsolete - catches exceptions from the wrapped function. - - This function is normally unncecessary thanks to - `tornado.stack_context`. - """ - return self.ws_connection.async_callback(callback, *args, **kwargs) - - def _not_supported(self, *args, **kwargs): - raise Exception("Method not supported for Web Sockets") - def on_connection_close(self): if self.ws_connection: self.ws_connection.on_connection_close() @@ -364,9 +337,17 @@ class WebSocketHandler(tornado.web.RequestHandler): self.on_close() +def _wrap_method(method): + def _disallow_for_websocket(self, *args, **kwargs): + if self.stream is None: + method(self, *args, **kwargs) + else: + raise RuntimeError("Method not supported for Web Sockets") + return _disallow_for_websocket for method in ["write", "redirect", "set_header", "send_error", "set_cookie", "set_status", "flush", "finish"]: - setattr(WebSocketHandler, method, WebSocketHandler._not_supported) + setattr(WebSocketHandler, method, + _wrap_method(getattr(WebSocketHandler, method))) class WebSocketProtocol(object): @@ -379,23 +360,17 @@ class WebSocketProtocol(object): self.client_terminated = False self.server_terminated = False - def async_callback(self, callback, *args, **kwargs): - """Wrap callbacks with this if they are used on asynchronous requests. + def _run_callback(self, callback, *args, **kwargs): + """Runs the given callback with exception handling. - Catches exceptions properly and closes this WebSocket if an exception - is uncaught. + On error, aborts the websocket connection and returns False. """ - if args or kwargs: - callback = functools.partial(callback, *args, **kwargs) - - def wrapper(*args, **kwargs): - try: - return callback(*args, **kwargs) - except Exception: - app_log.error("Uncaught exception in %s", - self.request.path, exc_info=True) - self._abort() - return wrapper + try: + callback(*args, **kwargs) + except Exception: + app_log.error("Uncaught exception in %s", + self.request.path, exc_info=True) + self._abort() def on_connection_close(self): self._abort() @@ -408,174 +383,6 @@ class WebSocketProtocol(object): self.close() # let the subclass cleanup -class WebSocketProtocol76(WebSocketProtocol): - """Implementation of the WebSockets protocol, version hixie-76. - - This class provides basic functionality to process WebSockets requests as - specified in - http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76 - """ - def __init__(self, handler): - WebSocketProtocol.__init__(self, handler) - self.challenge = None - self._waiting = None - - def accept_connection(self): - try: - self._handle_websocket_headers() - except ValueError: - gen_log.debug("Malformed WebSocket request received") - self._abort() - return - - scheme = self.handler.get_websocket_scheme() - - # draft76 only allows a single subprotocol - subprotocol_header = '' - subprotocol = self.request.headers.get("Sec-WebSocket-Protocol", None) - if subprotocol: - selected = self.handler.select_subprotocol([subprotocol]) - if selected: - assert selected == subprotocol - subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected - - # Write the initial headers before attempting to read the challenge. - # This is necessary when using proxies (such as HAProxy), which - # need to see the Upgrade headers before passing through the - # non-HTTP traffic that follows. - self.stream.write(tornado.escape.utf8( - "HTTP/1.1 101 WebSocket Protocol Handshake\r\n" - "Upgrade: WebSocket\r\n" - "Connection: Upgrade\r\n" - "Server: TornadoServer/%(version)s\r\n" - "Sec-WebSocket-Origin: %(origin)s\r\n" - "Sec-WebSocket-Location: %(scheme)s://%(host)s%(uri)s\r\n" - "%(subprotocol)s" - "\r\n" % (dict( - version=tornado.version, - origin=self.request.headers["Origin"], - scheme=scheme, - host=self.request.host, - uri=self.request.uri, - subprotocol=subprotocol_header)))) - self.stream.read_bytes(8, self._handle_challenge) - - def challenge_response(self, challenge): - """Generates the challenge response that's needed in the handshake - - The challenge parameter should be the raw bytes as sent from the - client. - """ - key_1 = self.request.headers.get("Sec-Websocket-Key1") - key_2 = self.request.headers.get("Sec-Websocket-Key2") - try: - part_1 = self._calculate_part(key_1) - part_2 = self._calculate_part(key_2) - except ValueError: - raise ValueError("Invalid Keys/Challenge") - return self._generate_challenge_response(part_1, part_2, challenge) - - def _handle_challenge(self, challenge): - try: - challenge_response = self.challenge_response(challenge) - except ValueError: - gen_log.debug("Malformed key data in WebSocket request") - self._abort() - return - self._write_response(challenge_response) - - def _write_response(self, challenge): - self.stream.write(challenge) - self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs) - self._receive_message() - - def _handle_websocket_headers(self): - """Verifies all invariant- and required headers - - If a header is missing or have an incorrect value ValueError will be - raised - """ - fields = ("Origin", "Host", "Sec-Websocket-Key1", - "Sec-Websocket-Key2") - if not all(map(lambda f: self.request.headers.get(f), fields)): - raise ValueError("Missing/Invalid WebSocket headers") - - def _calculate_part(self, key): - """Processes the key headers and calculates their key value. - - Raises ValueError when feed invalid key.""" - # pyflakes complains about variable reuse if both of these lines use 'c' - number = int(''.join(c for c in key if c.isdigit())) - spaces = len([c2 for c2 in key if c2.isspace()]) - try: - key_number = number // spaces - except (ValueError, ZeroDivisionError): - raise ValueError - return struct.pack(">I", key_number) - - def _generate_challenge_response(self, part_1, part_2, part_3): - m = hashlib.md5() - m.update(part_1) - m.update(part_2) - m.update(part_3) - return m.digest() - - def _receive_message(self): - self.stream.read_bytes(1, self._on_frame_type) - - def _on_frame_type(self, byte): - frame_type = ord(byte) - if frame_type == 0x00: - self.stream.read_until(b"\xff", self._on_end_delimiter) - elif frame_type == 0xff: - self.stream.read_bytes(1, self._on_length_indicator) - else: - self._abort() - - def _on_end_delimiter(self, frame): - if not self.client_terminated: - self.async_callback(self.handler.on_message)( - frame[:-1].decode("utf-8", "replace")) - if not self.client_terminated: - self._receive_message() - - def _on_length_indicator(self, byte): - if ord(byte) != 0x00: - self._abort() - return - self.client_terminated = True - self.close() - - def write_message(self, message, binary=False): - """Sends the given message to the client of this Web Socket.""" - if binary: - raise ValueError( - "Binary messages not supported by this version of websockets") - if isinstance(message, unicode_type): - message = message.encode("utf-8") - assert isinstance(message, bytes_type) - self.stream.write(b"\x00" + message + b"\xff") - - def write_ping(self, data): - """Send ping frame.""" - raise ValueError("Ping messages not supported by this version of websockets") - - def close(self, code=None, reason=None): - """Closes the WebSocket connection.""" - if not self.server_terminated: - if not self.stream.closed(): - self.stream.write("\xff\x00") - self.server_terminated = True - if self.client_terminated: - if self._waiting is not None: - self.stream.io_loop.remove_timeout(self._waiting) - self._waiting = None - self.stream.close() - elif self._waiting is None: - self._waiting = self.stream.io_loop.add_timeout( - time.time() + 5, self._abort) - - class WebSocketProtocol13(WebSocketProtocol): """Implementation of the WebSocket protocol from RFC 6455. @@ -645,7 +452,8 @@ class WebSocketProtocol13(WebSocketProtocol): "%s" "\r\n" % (self._challenge_response(), subprotocol_header))) - self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs) + self._run_callback(self.handler.open, *self.handler.open_args, + **self.handler.open_kwargs) self._receive_frame() def _write_frame(self, fin, opcode, data): @@ -803,10 +611,10 @@ class WebSocketProtocol13(WebSocketProtocol): except UnicodeDecodeError: self._abort() return - self.async_callback(self.handler.on_message)(decoded) + self._run_callback(self.handler.on_message, decoded) elif opcode == 0x2: # Binary data - self.async_callback(self.handler.on_message)(data) + self._run_callback(self.handler.on_message, data) elif opcode == 0x8: # Close self.client_terminated = True @@ -820,7 +628,7 @@ class WebSocketProtocol13(WebSocketProtocol): self._write_frame(True, 0xA, data) elif opcode == 0xA: # Pong - self.async_callback(self.handler.on_pong)(data) + self._run_callback(self.handler.on_pong, data) else: self._abort() @@ -885,7 +693,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): .. versionadded:: 3.2 - .. versionchanged:: 3.3 + .. versionchanged:: 4.0 Added the ``code`` and ``reason`` arguments. """ @@ -893,10 +701,12 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): self.protocol.close(code, reason) self.protocol = None - def _on_close(self): + def on_connection_close(self): + if not self.connect_future.done(): + self.connect_future.set_exception(StreamClosedError()) self.on_message(None) - self.resolver.close() - super(WebSocketClientConnection, self)._on_close() + self.tcp_client.close() + super(WebSocketClientConnection, self).on_connection_close() def _on_http_response(self, response): if not self.connect_future.done(): @@ -925,7 +735,12 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): self._timeout = None self.stream = self.connection.detach() - self.stream.set_close_callback(self._on_close) + self.stream.set_close_callback(self.on_connection_close) + # Once we've taken over the connection, clear the final callback + # we set on the http request. This deactivates the error handling + # in simple_httpclient that would otherwise interfere with our + # ability to see exceptions. + self.final_callback = None self.connect_future.set_result(self) diff --git a/libs/tornado/wsgi.py b/libs/tornado/wsgi.py index 47a0590..6e115e1 100755 --- a/libs/tornado/wsgi.py +++ b/libs/tornado/wsgi.py @@ -77,7 +77,7 @@ else: class WSGIApplication(web.Application): """A WSGI equivalent of `tornado.web.Application`. - .. deprecated: 3.3:: + .. deprecated:: 4.0 Use a regular `.Application` and wrap it in `WSGIAdapter` instead. """ @@ -126,7 +126,7 @@ class _WSGIConnection(httputil.HTTPConnection): if self._expected_content_remaining is not None: self._expected_content_remaining -= len(chunk) if self._expected_content_remaining < 0: - self._error = httputil.HTTPOutputException( + self._error = httputil.HTTPOutputError( "Tried to write more data than Content-Length") raise self._error self._write_buffer.append(chunk) @@ -137,7 +137,7 @@ class _WSGIConnection(httputil.HTTPConnection): def finish(self): if (self._expected_content_remaining is not None and self._expected_content_remaining != 0): - self._error = httputil.HTTPOutputException( + self._error = httputil.HTTPOutputError( "Tried to write %d bytes less than Content-Length" % self._expected_content_remaining) raise self._error @@ -183,7 +183,7 @@ class WSGIAdapter(object): that it is not possible to use `.AsyncHTTPClient`, or the `tornado.auth` or `tornado.websocket` modules. - .. versionadded:: 3.3 + .. versionadded:: 4.0 """ def __init__(self, application): if isinstance(application, WSGIApplication):