Browse Source

Update requests lib

pull/2670/merge
Ruud 11 years ago
parent
commit
91f3cda995
  1. 8
      libs/requests/__init__.py
  2. 134
      libs/requests/adapters.py
  3. 65
      libs/requests/auth.py
  4. 8212
      libs/requests/cacert.pem
  5. 6
      libs/requests/compat.py
  6. 74
      libs/requests/cookies.py
  7. 10
      libs/requests/exceptions.py
  8. 201
      libs/requests/models.py
  9. 34
      libs/requests/packages/charade/__init__.py
  10. 7
      libs/requests/packages/charade/__main__.py
  11. 2
      libs/requests/packages/charade/jpcntx.py
  12. 2
      libs/requests/packages/charade/latin1prober.py
  13. 12
      libs/requests/packages/charade/universaldetector.py
  14. 2
      libs/requests/packages/urllib3/__init__.py
  15. 25
      libs/requests/packages/urllib3/_collections.py
  16. 107
      libs/requests/packages/urllib3/connection.py
  17. 360
      libs/requests/packages/urllib3/connectionpool.py
  18. 2
      libs/requests/packages/urllib3/contrib/ntlmpool.py
  19. 203
      libs/requests/packages/urllib3/contrib/pyopenssl.py
  20. 30
      libs/requests/packages/urllib3/exceptions.py
  21. 177
      libs/requests/packages/urllib3/fields.py
  22. 59
      libs/requests/packages/urllib3/filepost.py
  23. 74
      libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
  24. 105
      libs/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
  25. 120
      libs/requests/packages/urllib3/poolmanager.py
  26. 2
      libs/requests/packages/urllib3/request.py
  27. 85
      libs/requests/packages/urllib3/response.py
  28. 289
      libs/requests/packages/urllib3/util.py
  29. 155
      libs/requests/sessions.py
  30. 3
      libs/requests/status_codes.py
  31. 2
      libs/requests/structures.py
  32. 167
      libs/requests/utils.py

8
libs/requests/__init__.py

@ -23,7 +23,7 @@ usage:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
>>> print(r.text)
{
...
"form": {
@ -42,15 +42,15 @@ is at <http://python-requests.org>.
"""
__title__ = 'requests'
__version__ = '1.2.3'
__build__ = 0x010203
__version__ = '2.1.0'
__build__ = 0x020100
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from requests.packages.urllib3.contrib import pyopenssl
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass

134
libs/requests/adapters.py

@ -11,18 +11,20 @@ and maintain connections.
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
except_on_missing_scheme, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
@ -71,6 +73,7 @@ class HTTPAdapter(BaseAdapter):
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
@ -118,7 +121,7 @@ class HTTPAdapter(BaseAdapter):
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.startswith('https') and verify:
if url.lower().startswith('https') and verify:
cert_loc = None
@ -184,18 +187,28 @@ class HTTPAdapter(BaseAdapter):
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`.
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url).scheme)
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy))
except_on_missing_scheme(proxy)
proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers)
conn = self.proxy_manager[proxy].connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
@ -211,10 +224,10 @@ class HTTPAdapter(BaseAdapter):
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be
used. Otherwise, we should only use the path portion of the URL.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This shoudl not be called from user code, and is only exposed for use
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
@ -222,9 +235,10 @@ class HTTPAdapter(BaseAdapter):
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme)
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy:
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
@ -232,8 +246,9 @@ class HTTPAdapter(BaseAdapter):
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this adds a
Proxy-Authorization header.
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
@ -242,12 +257,22 @@ class HTTPAdapter(BaseAdapter):
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
proxies = kwargs.get('proxies', {})
pass
if proxies is None:
proxies = {}
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
proxy = proxies.get(urlparse(request.url).scheme)
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
@ -255,8 +280,10 @@ class HTTPAdapter(BaseAdapter):
# to decode them.
username = unquote(username)
password = unquote(password)
request.headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
@ -265,7 +292,7 @@ class HTTPAdapter(BaseAdapter):
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
@ -273,10 +300,15 @@ class HTTPAdapter(BaseAdapter):
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if stream:
timeout = TimeoutSauce(connect=timeout)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
@ -298,27 +330,40 @@ class HTTPAdapter(BaseAdapter):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr)
@ -326,6 +371,9 @@ class HTTPAdapter(BaseAdapter):
except MaxRetryError as e:
raise ConnectionError(e)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)

65
libs/requests/auth.py

@ -16,9 +16,9 @@ import logging
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
@ -64,6 +64,7 @@ class HTTPDigestAuth(AuthBase):
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
@ -78,7 +79,7 @@ class HTTPDigestAuth(AuthBase):
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5':
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
@ -90,7 +91,7 @@ class HTTPDigestAuth(AuthBase):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
# XXX MD5-sess
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
@ -106,23 +107,28 @@ class HTTPDigestAuth(AuthBase):
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2))
respdig = KD(hash_utf8(A1), noncebit)
elif qop is None:
respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2)))
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
@ -139,13 +145,17 @@ class HTTPDigestAuth(AuthBase):
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
@ -159,10 +169,15 @@ class HTTPDigestAuth(AuthBase):
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url)
_r = r.connection.send(r.request, **kwargs)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
@ -173,5 +188,9 @@ class HTTPDigestAuth(AuthBase):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r

8212
libs/requests/cacert.pem

File diff suppressed because it is too large

6
libs/requests/compat.py

@ -83,13 +83,14 @@ except ImportError:
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
@ -100,11 +101,12 @@ if is_py2:
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str

74
libs/requests/cookies.py

@ -6,8 +6,9 @@ Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import time
import collections
from .compat import cookielib, urlparse, Morsel
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
@ -44,7 +45,18 @@ class MockRequest(object):
return self.get_host()
def get_full_url(self):
return self._r.url
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
@ -73,6 +85,10 @@ class MockRequest(object):
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
@ -102,6 +118,9 @@ def extract_cookies_to_jar(jar, request, response):
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
@ -258,6 +277,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
@ -354,19 +378,23 @@ def create_cookie(name, value, **kwargs):
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel["max-age"]:
expires = time.time() + morsel["max-age"]
elif morsel['expires']:
expires = morsel['expires']
if type(expires) == type(""):
time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = time.mktime(time.strptime(expires, time_template))
c = create_cookie(
name=morsel.key,
value=morsel.value,
version=morsel['version'] or 0,
port=None,
port_specified=False,
domain=morsel['domain'],
domain_specified=bool(morsel['domain']),
domain_initial_dot=morsel['domain'].startswith('.'),
path=morsel['path'],
path_specified=bool(morsel['path']),
secure=bool(morsel['secure']),
expires=morsel['max-age'] or morsel['expires'],
expires=expires,
discard=False,
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
@ -375,15 +403,43 @@ def morsel_to_cookie(morsel):
return c
def cookiejar_from_dict(cookie_dict, cookiejar=None):
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar

10
libs/requests/exceptions.py

@ -9,7 +9,7 @@ This module contains the set of Requests' exceptions.
"""
class RequestException(RuntimeError):
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
@ -27,6 +27,10 @@ class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
@ -53,3 +57,7 @@ class InvalidSchema(RequestException, ValueError):
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""

201
libs/requests/models.py

@ -11,22 +11,25 @@ import collections
import logging
import datetime
from io import BytesIO
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
@ -88,12 +91,14 @@ class RequestEncodingMixin(object):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
@ -104,6 +109,10 @@ class RequestEncodingMixin(object):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
@ -111,11 +120,14 @@ class RequestEncodingMixin(object):
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
@ -124,11 +136,10 @@ class RequestEncodingMixin(object):
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
@ -139,6 +150,9 @@ class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
@ -184,8 +198,8 @@ class Request(RequestHooksMixin):
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
@ -209,7 +223,6 @@ class Request(RequestHooksMixin):
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
@ -217,19 +230,17 @@ class Request(RequestHooksMixin):
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
p.prepare_auth(self.auth, self.url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
@ -259,14 +270,43 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
@ -284,11 +324,17 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
@ -337,8 +383,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""Prepares the given HTTP headers."""
if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
@ -352,7 +397,6 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
@ -363,8 +407,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
try:
length = super_len(data)
except (TypeError, AttributeError):
length = False
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
@ -372,13 +416,10 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = str(length)
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
@ -402,12 +443,12 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = str(l)
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
@ -437,14 +478,13 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
self._cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
self._cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
@ -457,6 +497,19 @@ class Response(object):
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
@ -496,6 +549,24 @@ class Response(object):
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
@ -537,11 +608,22 @@ class Response(object):
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size, decode_content=True)
if not chunk:
break
yield chunk
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size,
decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
@ -605,8 +687,8 @@ class Response(object):
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
If Response.encoding is None, encoding will be guessed using
``charade``.
"""
# Try charset from content-type
@ -648,7 +730,7 @@ class Response(object):
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
return json.loads(self.text, **kwargs)
@property
def links(self):
@ -683,4 +765,9 @@ class Response(object):
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()

34
libs/requests/packages/charade/__init__.py

@ -30,3 +30,37 @@ def detect(aBuf):
u.feed(aBuf)
u.close()
return u.result
def _description_of(path):
"""Return a string describing the probable encoding of a file."""
from charade.universaldetector import UniversalDetector
u = UniversalDetector()
for line in open(path, 'rb'):
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (path,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % path
def charade_cli():
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect.py somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
"""
from sys import argv
for path in argv[1:]:
print(_description_of(path))

7
libs/requests/packages/charade/__main__.py

@ -0,0 +1,7 @@
'''
support ';python -m charade <file1> [file2] ...' package execution syntax (2.7+)
'''
from charade import charade_cli
charade_cli()

2
libs/requests/packages/charade/jpcntx.py

@ -169,7 +169,7 @@ class JapaneseContextAnalysis:
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
return float(self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW

2
libs/requests/packages/charade/latin1prober.py

@ -129,7 +129,7 @@ class Latin1Prober(CharSetProber):
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0

12
libs/requests/packages/charade/universaldetector.py

@ -74,12 +74,10 @@ class UniversalDetector:
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
elif aBuf[:4] in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
self.result = {'encoding': "UTF-32", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
@ -92,12 +90,10 @@ class UniversalDetector:
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
elif aBuf[:2] == codecs.BOM_LE or aBuf[:2] == codecs.BOM_BE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self.result = {'encoding': "UTF-16", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):

2
libs/requests/packages/urllib3/__init__.py

@ -23,7 +23,7 @@ from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host
from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings.

25
libs/requests/packages/urllib3/_collections.py

@ -5,7 +5,16 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import Lock
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
@ -40,18 +49,18 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self._lock = Lock()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self._lock:
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self._lock:
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
@ -65,21 +74,21 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self._lock:
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self._lock:
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self._lock:
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
@ -90,5 +99,5 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func(value)
def keys(self):
with self._lock:
with self.lock:
return self._container.keys()

107
libs/requests/packages/urllib3/connection.py

@ -0,0 +1,107 @@
# urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port),
timeout=self.timeout,
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
if self._tunnel_host:
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
if ssl:
HTTPSConnection = VerifiedHTTPSConnection

360
libs/requests/packages/urllib3/connectionpool.py

@ -4,57 +4,45 @@
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version, assert_fingerprint
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import (
ClosedPoolError,
ConnectTimeoutError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
ReadTimeoutError,
ProxyError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
assert_fingerprint,
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange
@ -64,56 +52,11 @@ log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
'http': 80,
'https': 443,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
else:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
## Pool objects
class ConnectionPool(object):
@ -126,6 +69,9 @@ class ConnectionPool(object):
QueueCls = LifoQueue
def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host
self.port = port
@ -133,6 +79,8 @@ class ConnectionPool(object):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
@ -151,9 +99,15 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
@ -171,20 +125,40 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
@ -200,9 +174,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port,
strict=self.strict)
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
return self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
**extra_params)
def _get_conn(self, timeout=None):
"""
@ -263,31 +242,100 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
% self.host)
# Connection never got put back into the pool, close it.
conn.close()
if conn:
conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
timeout_obj = self._get_timeout(timeout)
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
@ -367,7 +415,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry.
303, 307, 308). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
@ -375,7 +423,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
@ -402,22 +452,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
@ -444,27 +494,31 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
except Empty:
# Timed out by queue
raise TimeoutError(self, url,
"Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, url,
"Request timed out. (timeout=%s)" %
timeout)
raise EmptyPoolError(self, "No pool connections are available.")
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except TimeoutError as e:
# Connection broken, discard.
conn = None
# Save the error off for retry logic.
err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
@ -513,6 +567,7 @@ class HTTPSConnectionPool(HTTPConnectionPool):
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
@ -521,17 +576,18 @@ class HTTPSConnectionPool(HTTPConnectionPool):
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port,
strict, timeout, maxsize,
block, headers)
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
@ -540,6 +596,34 @@ class HTTPSConnectionPool(HTTPConnectionPool):
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
@ -548,26 +632,26 @@ class HTTPSConnectionPool(HTTPConnectionPool):
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
connection = VerifiedHTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
connection.ssl_version = self.ssl_version
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
return connection
return self._prepare_conn(conn)
def connection_from_url(url, **kw):

2
libs/requests/packages/urllib3/contrib/ntlmpool.py

@ -33,7 +33,7 @@ class NTLMConnectionPool(HTTPSConnectionPool):
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)

203
libs/requests/packages/urllib3/contrib/pyopenssl.py

@ -20,15 +20,16 @@ Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
'''
from ndg.httpsclient.ssl_peer_verification import (ServerSSLCertVerification,
SUBJ_ALT_NAME_SUPPORT)
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from socket import _fileobject
import ssl
import select
from cStringIO import StringIO
from .. import connectionpool
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
@ -51,20 +52,20 @@ _openssl_verify = {
orig_util_HAS_SNI = util.HAS_SNI
orig_connectionpool_ssl_wrap_socket = connectionpool.ssl_wrap_socket
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connectionpool.ssl_wrap_socket = ssl_wrap_socket
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connectionpool.ssl_wrap_socket = orig_connectionpool_ssl_wrap_socket
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
@ -99,6 +100,172 @@ def get_subj_alt_name(peer_cert):
return dns_name
class fileobject(_fileobject):
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
@ -106,8 +273,11 @@ class WrappedSocket(object):
self.connection = connection
self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
return _fileobject(self.connection, mode, bufsize)
return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
@ -115,10 +285,14 @@ class WrappedSocket(object):
def sendall(self, data):
return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
raise ssl.SSLError('')
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
@ -159,9 +333,14 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
try:
cnx.do_handshake()
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)

30
libs/requests/packages/urllib3/exceptions.py

@ -39,6 +39,11 @@ class SSLError(HTTPError):
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
@ -70,8 +75,29 @@ class HostChangedError(RequestError):
self.retries = retries
class TimeoutError(RequestError):
"Raised when a socket timeout occurs."
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass

177
libs/requests/packages/urllib3/fields.py

@ -0,0 +1,177 @@
# urllib3/fields.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetimes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from parameter
of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type)
tuple where the MIME type is optional. For example: ::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as
`k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location

59
libs/requests/packages/urllib3/filepost.py

@ -1,5 +1,5 @@
# urllib3/filepost.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -12,6 +12,7 @@ from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
@ -23,15 +24,38 @@ def choose_boundary():
return uuid4().hex
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
Iterate over fields.
.. deprecated ::
The addition of `~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
`~urllib3.fields.RequestField` objects, instead.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
@ -44,15 +68,7 @@ def encode_multipart_formdata(fields, boundary=None):
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, value) or (key, value, MIME type)
field tuples. The key is treated as the field name, and the value as
the body of the form-data bytes. If the value is a tuple of two
elements, then the first element is treated as the filename of the
form-data section and a suitable MIME type is guessed based on the
filename. If the value is a tuple of three elements, then the third
element is treated as an explicit MIME type of the form-data section.
Field names and filenames must be unicode.
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
@ -62,24 +78,11 @@ def encode_multipart_formdata(fields, boundary=None):
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = get_content_type(filename)
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(content_type,)))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'\r\n')
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility

74
libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py

@ -1,61 +1,13 @@
"""The match_hostname() function from Python 3.2, essential when using SSL."""
import re
__version__ = '3.2.2'
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from _implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')

105
libs/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py

@ -0,0 +1,105 @@
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")

120
libs/requests/packages/urllib3/poolmanager.py

@ -6,9 +6,14 @@
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import connection_from_url, port_by_scheme
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
@ -55,6 +60,8 @@ class PoolManager(RequestMethods):
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
@ -94,20 +101,23 @@ class PoolManager(RequestMethods):
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
# If the scheme, host, or port doesn't match existing open connections,
# open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
@ -139,12 +149,19 @@ class PoolManager(RequestMethods):
if 'headers' not in kw:
kw['headers'] = self.headers
response = conn.urlopen(method, u.request_uri, **kw)
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
@ -154,15 +171,59 @@ class PoolManager(RequestMethods):
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(RequestMethods):
class ProxyManager(PoolManager):
"""
Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method
will make requests to any url through the defined proxy. The ProxyManager
class will automatically set the 'Host' header if it is not provided.
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param poxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_pool):
self.proxy_pool = proxy_pool
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
@ -171,22 +232,27 @@ class ProxyManager(RequestMethods):
"""
headers_ = {'Accept': '*/*'}
host = parse_url(url).host
if host:
headers_['Host'] = host
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, **kw):
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
kw['assert_same_host'] = False
kw['headers'] = self._set_proxy_headers(url, headers=kw.get('headers'))
return self.proxy_pool.urlopen(method, url, **kw)
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **pool_kw):
proxy_pool = connection_from_url(url, **pool_kw)
return ProxyManager(proxy_pool)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)

2
libs/requests/packages/urllib3/request.py

@ -30,7 +30,7 @@ class RequestMethods(object):
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-orm-urlencoded
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the

85
libs/requests/packages/urllib3/response.py

@ -1,5 +1,5 @@
# urllib3/response.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -7,9 +7,11 @@
import logging
import zlib
import io
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
@ -48,7 +50,7 @@ def _get_decoder(mode):
return DeflateDecoder()
class HTTPResponse(object):
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
@ -72,6 +74,7 @@ class HTTPResponse(object):
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
@ -87,6 +90,7 @@ class HTTPResponse(object):
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
@ -105,7 +109,7 @@ class HTTPResponse(object):
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
@ -126,6 +130,14 @@ class HTTPResponse(object):
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
@ -180,14 +192,18 @@ class HTTPResponse(object):
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error):
raise DecodeError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and self._decoder:
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
@ -200,6 +216,29 @@ class HTTPResponse(object):
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
@ -239,3 +278,35 @@ class HTTPResponse(object):
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True

289
libs/requests/packages/urllib3/util.py

@ -6,10 +6,11 @@
from base64 import b64encode
from binascii import hexlify, unhexlify
from collections import namedtuple
from socket import error as SocketError
from hashlib import md5, sha1
from binascii import hexlify, unhexlify
from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
import time
try:
from select import poll, POLLIN
@ -31,9 +32,238 @@ try: # Test for SSL features
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError
from .exceptions import LocationParseError, SSLError, TimeoutStateError
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
@ -61,6 +291,13 @@ class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query',
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
@ -114,7 +351,7 @@ def parse_url(url):
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
@ -139,11 +376,13 @@ def parse_url(url):
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
@ -152,10 +391,14 @@ def parse_url(url):
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
@ -183,7 +426,7 @@ def get_host(url):
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
basic_auth=None, proxy_basic_auth=None):
"""
Shortcuts for generating request headers.
@ -204,6 +447,10 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
@ -231,6 +478,10 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers
@ -341,6 +592,20 @@ def assert_fingerprint(cert, fingerprint):
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
if hasattr(obj, 'fp'):
# Object is a container for another file-like object that gets released
# on exhaustion (e.g. HTTPResponse)
return obj.fp is None
return obj.closed
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,

155
libs/requests/sessions.py

@ -12,8 +12,9 @@ import os
from collections import Mapping
from datetime import datetime
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar
from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers
@ -65,21 +66,32 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
prepared_request = PreparedRequest()
prepared_request.body = req.body
prepared_request.headers = req.headers.copy()
prepared_request.hooks = req.hooks
prepared_request.method = req.method
prepared_request.url = req.url
# ((resp.status_code is codes.see_other))
while (('location' in resp.headers and resp.status_code in REDIRECT_STATI)):
while ('location' in resp.headers and resp.status_code in REDIRECT_STATI):
prepared_request = req.copy()
resp.content # Consume socket so it can be released
@ -90,13 +102,17 @@ class SessionRedirectMixin(object):
resp.close()
url = resp.headers['location']
method = prepared_request.method
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
@ -109,12 +125,17 @@ class SessionRedirectMixin(object):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if (resp.status_code == codes.see_other and
prepared_request.method != 'HEAD'):
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
if (resp.status_code in (codes.moved, codes.found) and
prepared_request.method not in ('GET', 'HEAD')):
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
@ -132,7 +153,9 @@ class SessionRedirectMixin(object):
except KeyError:
pass
prepared_request.prepare_cookies(self.cookies)
extract_cookies_to_jar(prepared_request._cookies,
prepared_request, resp.raw)
prepared_request.prepare_cookies(prepared_request._cookies)
resp = self.send(
prepared_request,
@ -153,7 +176,7 @@ class SessionRedirectMixin(object):
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistience, connection-pooling, and configuration.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
@ -208,7 +231,10 @@ class Session(SessionRedirectMixin):
#: Should we trust the environment?
self.trust_env = True
# Set up a CookieJar to be used by default
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
@ -222,6 +248,45 @@ class Session(SessionRedirectMixin):
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
@ -266,18 +331,23 @@ class Session(SessionRedirectMixin):
If Tuple, ('cert', 'key') pair.
"""
cookies = cookies or {}
proxies = proxies or {}
method = builtin_str(method)
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
# Merge with session cookies
merged_cookies = RequestsCookieJar()
merged_cookies.update(self.cookies)
merged_cookies.update(cookies)
cookies = merged_cookies
proxies = proxies or {}
# Gather clues from the surrounding environment.
if self.trust_env:
@ -286,10 +356,6 @@ class Session(SessionRedirectMixin):
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Set environment's basic authentication.
if not auth:
auth = get_netrc_auth(url)
# Look for configuration.
if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE')
@ -299,30 +365,11 @@ class Session(SessionRedirectMixin):
verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs.
params = merge_setting(params, self.params)
headers = merge_setting(headers, self.headers, dict_class=CaseInsensitiveDict)
auth = merge_setting(auth, self.auth)
proxies = merge_setting(proxies, self.proxies)
hooks = merge_setting(hooks, self.hooks)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
# Create the Request.
req = Request()
req.method = method.upper()
req.url = url
req.headers = headers
req.files = files
req.data = data
req.params = params
req.auth = auth
req.cookies = cookies
req.hooks = hooks
# Prepare the Request.
prep = req.prepare()
# Send the request.
send_kwargs = {
'stream': stream,
@ -416,7 +463,7 @@ class Session(SessionRedirectMixin):
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if getattr(request, 'prepare', None):
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of
@ -443,6 +490,10 @@ class Session(SessionRedirectMixin):
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
@ -467,7 +518,7 @@ class Session(SessionRedirectMixin):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.startswith(prefix):
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
@ -475,7 +526,7 @@ class Session(SessionRedirectMixin):
def close(self):
"""Closes all adapters and as such the session"""
for _, v in self.adapters.items():
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):

3
libs/requests/status_codes.py

@ -18,7 +18,8 @@ _codes = {
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('im_used',),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),

2
libs/requests/structures.py

@ -103,7 +103,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))

167
libs/requests/utils.py

@ -12,18 +12,22 @@ that are also useful for external consumption.
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
from netrc import netrc, NetrcParseError
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
@ -44,16 +48,28 @@ def dict_to_sequence(d):
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
return os.fstat(o.fileno()).st_size
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES)
netrc_path = None
@ -264,8 +280,12 @@ def get_encodings_from_content(content):
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return charset_re.findall(content)
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
@ -301,7 +321,7 @@ def stream_decode_response_unicode(iterator, r):
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode('', final=True)
rv = decoder.decode(b'', final=True)
if rv:
yield rv
@ -361,7 +381,11 @@ def unquote_unreserved(uri):
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
c = chr(int(h, 16))
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
@ -383,42 +407,96 @@ def requote_uri(uri):
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
proxy_keys = [
'all',
'http',
'https',
'ftp',
'socks'
]
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.split(',')
netloc = urlparse(url).netloc
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return {}
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
if proxy_bypass(netloc):
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to.
proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys]
return dict([(key, val) for (key, val) in proxies if val])
# anywhere that no_proxy applies to, and the system settings don't require
# bypassing the proxy for the current URL.
return getproxies()
def default_user_agent():
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
@ -444,7 +522,7 @@ def default_user_agent():
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['python-requests/%s' % __version__,
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
@ -526,25 +604,40 @@ def guess_json_utf(data):
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
def except_on_missing_scheme(url):
"""Given a URL, raise a MissingSchema exception if the scheme is missing.
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
return urlunparse((scheme, netloc, path, params, query, fragment))
if not scheme:
raise MissingSchema('Proxy URLs must have explicit schemes.')
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
if url:
url = unquote(url)
parsed = urlparse(url)
return (parsed.username, parsed.password)
else:
return ('', '')
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out

Loading…
Cancel
Save