Browse Source

Merge branch 'refs/heads/develop' into tv

Conflicts:
	couchpotato/core/media/movie/_base/main.py
	couchpotato/core/providers/torrent/bitsoup/main.py
	couchpotato/core/providers/torrent/iptorrents/main.py
	couchpotato/core/providers/torrent/sceneaccess/main.py
	couchpotato/core/providers/torrent/torrentleech/main.py
	couchpotato/core/providers/torrent/torrentshack/main.py
old/tv
Ruud 11 years ago
parent
commit
2a2fe448e7
  1. 4
      couchpotato/core/_base/_core/main.py
  2. 4
      couchpotato/core/downloaders/base.py
  3. 1
      couchpotato/core/downloaders/blackhole/main.py
  4. 11
      couchpotato/core/downloaders/deluge/main.py
  5. 7
      couchpotato/core/downloaders/nzbget/__init__.py
  6. 12
      couchpotato/core/downloaders/nzbget/main.py
  7. 8
      couchpotato/core/downloaders/nzbvortex/main.py
  8. 1
      couchpotato/core/downloaders/pneumatic/main.py
  9. 4
      couchpotato/core/downloaders/rtorrent/main.py
  10. 6
      couchpotato/core/downloaders/sabnzbd/main.py
  11. 2
      couchpotato/core/downloaders/synology/main.py
  12. 4
      couchpotato/core/downloaders/transmission/main.py
  13. 73
      couchpotato/core/downloaders/utorrent/main.py
  14. 8
      couchpotato/core/helpers/encoding.py
  15. 6
      couchpotato/core/helpers/variable.py
  16. 1
      couchpotato/core/media/movie/_base/main.py
  17. 4
      couchpotato/core/media/movie/searcher/main.py
  18. 4
      couchpotato/core/notifications/boxcar/main.py
  19. 2
      couchpotato/core/notifications/prowl/main.py
  20. 2
      couchpotato/core/notifications/pushalot/main.py
  21. 4
      couchpotato/core/notifications/pushbullet/main.py
  22. 2
      couchpotato/core/notifications/trakt/main.py
  23. 23
      couchpotato/core/notifications/xbmc/main.py
  24. 79
      couchpotato/core/plugins/base.py
  25. 2
      couchpotato/core/plugins/file/main.py
  26. 1
      couchpotato/core/plugins/quality/main.py
  27. 9
      couchpotato/core/plugins/release/main.py
  28. 38
      couchpotato/core/plugins/renamer/main.py
  29. 3
      couchpotato/core/plugins/scanner/main.py
  30. 2
      couchpotato/core/providers/automation/imdb/__init__.py
  31. 30
      couchpotato/core/providers/base.py
  32. 7
      couchpotato/core/providers/info/_modifier/main.py
  33. 4
      couchpotato/core/providers/info/couchpotatoapi/main.py
  34. 8
      couchpotato/core/providers/info/omdbapi/main.py
  35. 18
      couchpotato/core/providers/info/themoviedb/main.py
  36. 4
      couchpotato/core/providers/metadata/xbmc/main.py
  37. 4
      couchpotato/core/providers/nzb/binsearch/main.py
  38. 1
      couchpotato/core/providers/nzb/newznab/main.py
  39. 6
      couchpotato/core/providers/torrent/bithdtv/main.py
  40. 11
      couchpotato/core/providers/torrent/hdbits/main.py
  41. 8
      couchpotato/core/providers/torrent/ilovetorrents/main.py
  42. 6
      couchpotato/core/providers/torrent/passthepopcorn/main.py
  43. 6
      couchpotato/core/providers/torrent/torrentbytes/main.py
  44. 11
      couchpotato/core/providers/torrent/torrentday/main.py
  45. 4
      couchpotato/core/providers/userscript/base.py
  46. 2
      couchpotato/core/providers/userscript/imdb/main.py
  47. 2
      couchpotato/core/providers/userscript/tmdb/main.py
  48. 2
      couchpotato/runner.py
  49. 8
      libs/requests/__init__.py
  50. 96
      libs/requests/adapters.py
  51. 43
      libs/requests/auth.py
  52. 8212
      libs/requests/cacert.pem
  53. 6
      libs/requests/compat.py
  54. 70
      libs/requests/cookies.py
  55. 10
      libs/requests/exceptions.py
  56. 191
      libs/requests/models.py
  57. 34
      libs/requests/packages/charade/__init__.py
  58. 7
      libs/requests/packages/charade/__main__.py
  59. 2
      libs/requests/packages/charade/jpcntx.py
  60. 2
      libs/requests/packages/charade/latin1prober.py
  61. 12
      libs/requests/packages/charade/universaldetector.py
  62. 2
      libs/requests/packages/urllib3/__init__.py
  63. 25
      libs/requests/packages/urllib3/_collections.py
  64. 107
      libs/requests/packages/urllib3/connection.py
  65. 348
      libs/requests/packages/urllib3/connectionpool.py
  66. 2
      libs/requests/packages/urllib3/contrib/ntlmpool.py
  67. 195
      libs/requests/packages/urllib3/contrib/pyopenssl.py
  68. 30
      libs/requests/packages/urllib3/exceptions.py
  69. 177
      libs/requests/packages/urllib3/fields.py
  70. 59
      libs/requests/packages/urllib3/filepost.py
  71. 74
      libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
  72. 105
      libs/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
  73. 106
      libs/requests/packages/urllib3/poolmanager.py
  74. 2
      libs/requests/packages/urllib3/request.py
  75. 85
      libs/requests/packages/urllib3/response.py
  76. 283
      libs/requests/packages/urllib3/util.py
  77. 155
      libs/requests/sessions.py
  78. 3
      libs/requests/status_codes.py
  79. 155
      libs/requests/utils.py
  80. 4
      libs/tornado/__init__.py
  81. 7
      libs/tornado/concurrent.py
  82. 6
      libs/tornado/curl_httpclient.py
  83. 12
      libs/tornado/escape.py
  84. 12
      libs/tornado/gen.py
  85. 59
      libs/tornado/httpclient.py
  86. 3
      libs/tornado/ioloop.py
  87. 1
      libs/tornado/iostream.py
  88. 2
      libs/tornado/locale.py
  89. 24
      libs/tornado/log.py
  90. 10
      libs/tornado/netutil.py
  91. 2
      libs/tornado/platform/asyncio.py
  92. 6
      libs/tornado/platform/twisted.py
  93. 15
      libs/tornado/template.py
  94. 4
      libs/tornado/web.py
  95. 1
      libs/tornado/websocket.py

4
couchpotato/core/_base/_core/main.py

@ -55,6 +55,10 @@ class Core(Plugin):
if not Env.get('desktop'): if not Env.get('desktop'):
self.signalHandler() self.signalHandler()
# Set default urlopen timeout
import socket
socket.setdefaulttimeout(30)
def md5Password(self, value): def md5Password(self, value):
return md5(value) if value else '' return md5(value) if value else ''

4
couchpotato/core/downloaders/base.py

@ -13,6 +13,7 @@ class Downloader(Provider):
protocol = [] protocol = []
http_time_between_calls = 0 http_time_between_calls = 0
status_support = True
torrent_sources = [ torrent_sources = [
'http://torrage.com/torrent/%s.torrent', 'http://torrage.com/torrent/%s.torrent',
@ -69,7 +70,7 @@ class Downloader(Provider):
return return
def getAllDownloadStatus(self, ids): def getAllDownloadStatus(self, ids):
return return []
def _removeFailed(self, release_download): def _removeFailed(self, release_download):
if self.isDisabled(manual = True, data = {}): if self.isDisabled(manual = True, data = {}):
@ -133,6 +134,7 @@ class Downloader(Provider):
def downloadReturnId(self, download_id): def downloadReturnId(self, download_id):
return { return {
'downloader': self.getName(), 'downloader': self.getName(),
'status_support': self.status_support,
'id': download_id 'id': download_id
} }

1
couchpotato/core/downloaders/blackhole/main.py

@ -11,6 +11,7 @@ log = CPLog(__name__)
class Blackhole(Downloader): class Blackhole(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet'] protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
if not media: media = {} if not media: media = {}

11
couchpotato/core/downloaders/deluge/main.py

@ -91,19 +91,18 @@ class Deluge(Downloader):
log.debug('Checking Deluge download status.') log.debug('Checking Deluge download status.')
if not self.connect(): if not self.connect():
return False return []
release_downloads = ReleaseDownloadList(self) release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents() queue = self.drpc.get_alltorrents(ids)
if not queue: if not queue:
log.debug('Nothing in queue or error') log.debug('Nothing in queue or error')
return False return []
for torrent_id in queue: for torrent_id in queue:
torrent = queue[torrent_id] torrent = queue[torrent_id]
if torrent['hash'] in ids:
log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused'])) log.debug('name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing. # Deluge has no easy way to work out if a torrent is stalled or failing.
@ -209,11 +208,11 @@ class DelugeRPC(object):
return torrent_id return torrent_id
def get_alltorrents(self): def get_alltorrents(self, ids):
ret = False ret = False
try: try:
self.connect() self.connect()
ret = self.client.core.get_torrents_status({}, {}).get() ret = self.client.core.get_torrents_status({'id': ids}, {}).get()
except Exception, err: except Exception, err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc())) log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally: finally:

7
couchpotato/core/downloaders/nzbget/__init__.py

@ -26,6 +26,13 @@ config = [{
'description': 'Hostname with port. Usually <strong>localhost:6789</strong>', 'description': 'Hostname with port. Usually <strong>localhost:6789</strong>',
}, },
{ {
'name': 'ssl',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>',
},
{
'name': 'username', 'name': 'username',
'default': 'nzbget', 'default': 'nzbget',
'advanced': True, 'advanced': True,

12
couchpotato/core/downloaders/nzbget/main.py

@ -17,7 +17,7 @@ class NZBGet(Downloader):
protocol = ['nzb'] protocol = ['nzb']
url = 'http://%(username)s:%(password)s@%(host)s/xmlrpc' url = '%(protocol)s://%(username)s:%(password)s@%(host)s/xmlrpc'
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
if not media: media = {} if not media: media = {}
@ -29,7 +29,7 @@ class NZBGet(Downloader):
log.info('Sending "%s" to NZBGet.', data.get('name')) log.info('Sending "%s" to NZBGet.', data.get('name'))
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')} url = self.url % {'protocol': 'https' if self.conf('ssl') else 'http', 'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
nzb_name = ss('%s.nzb' % self.createNzbName(data, media)) nzb_name = ss('%s.nzb' % self.createNzbName(data, media))
rpc = xmlrpclib.ServerProxy(url) rpc = xmlrpclib.ServerProxy(url)
@ -71,7 +71,7 @@ class NZBGet(Downloader):
log.debug('Checking NZBGet download status.') log.debug('Checking NZBGet download status.')
url = self.url % {'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')} url = self.url % {'protocol': 'https' if self.conf('ssl') else 'http', 'host': self.conf('host'), 'username': self.conf('username'), 'password': self.conf('password')}
rpc = xmlrpclib.ServerProxy(url) rpc = xmlrpclib.ServerProxy(url)
try: try:
@ -81,13 +81,13 @@ class NZBGet(Downloader):
log.info('Successfully connected to NZBGet, but unable to send a message') log.info('Successfully connected to NZBGet, but unable to send a message')
except socket.error: except socket.error:
log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
return False return []
except xmlrpclib.ProtocolError, e: except xmlrpclib.ProtocolError, e:
if e.errcode == 401: if e.errcode == 401:
log.error('Password is incorrect.') log.error('Password is incorrect.')
else: else:
log.error('Protocol Error: %s', e) log.error('Protocol Error: %s', e)
return False return []
# Get NZBGet data # Get NZBGet data
try: try:
@ -97,7 +97,7 @@ class NZBGet(Downloader):
history = rpc.history() history = rpc.history()
except: except:
log.error('Failed getting data: %s', traceback.format_exc(1)) log.error('Failed getting data: %s', traceback.format_exc(1))
return False return []
release_downloads = ReleaseDownloadList(self) release_downloads = ReleaseDownloadList(self)

8
couchpotato/core/downloaders/nzbvortex/main.py

@ -32,7 +32,7 @@ class NZBVortex(Downloader):
# Send the nzb # Send the nzb
try: try:
nzb_filename = self.createFileName(data, filedata, media) nzb_filename = self.createFileName(data, filedata, media)
self.call('nzb/add', params = {'file': (nzb_filename, filedata)}, multipart = True) self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
time.sleep(10) time.sleep(10)
raw_statuses = self.call('nzb') raw_statuses = self.call('nzb')
@ -117,10 +117,9 @@ class NZBVortex(Downloader):
params = tryUrlencode(parameters) params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call url = cleanHost(self.conf('host')) + 'api/' + call
url_opener = urllib2.build_opener(HTTPSHandler())
try: try:
data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs) data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
if data: if data:
return json.loads(data) return json.loads(data)
@ -142,10 +141,9 @@ class NZBVortex(Downloader):
if not self.api_level: if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel' url = cleanHost(self.conf('host')) + 'api/app/apilevel'
url_opener = urllib2.build_opener(HTTPSHandler())
try: try:
data = self.urlopen(url, opener = url_opener, show_error = False) data = self.urlopen(url, show_error = False)
self.api_level = float(json.loads(data).get('apilevel')) self.api_level = float(json.loads(data).get('apilevel'))
except URLError, e: except URLError, e:
if hasattr(e, 'code') and e.code == 403: if hasattr(e, 'code') and e.code == 403:

1
couchpotato/core/downloaders/pneumatic/main.py

@ -11,6 +11,7 @@ class Pneumatic(Downloader):
protocol = ['nzb'] protocol = ['nzb']
strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s' strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
if not media: media = {} if not media: media = {}

4
couchpotato/core/downloaders/rtorrent/main.py

@ -147,7 +147,7 @@ class rTorrent(Downloader):
log.debug('Checking rTorrent download status.') log.debug('Checking rTorrent download status.')
if not self.connect(): if not self.connect():
return False return []
try: try:
torrents = self.rt.get_torrents() torrents = self.rt.get_torrents()
@ -182,7 +182,7 @@ class rTorrent(Downloader):
except Exception, err: except Exception, err:
log.error('Failed to get status from rTorrent: %s', err) log.error('Failed to get status from rTorrent: %s', err)
return False return []
def pause(self, release_download, pause = True): def pause(self, release_download, pause = True):
if not self.connect(): if not self.connect():

6
couchpotato/core/downloaders/sabnzbd/main.py

@ -43,7 +43,7 @@ class Sabnzbd(Downloader):
try: try:
if nzb_filename and req_params.get('mode') is 'addfile': if nzb_filename and req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True) sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
else: else:
sab_data = self.call(req_params) sab_data = self.call(req_params)
except URLError: except URLError:
@ -75,7 +75,7 @@ class Sabnzbd(Downloader):
}) })
except: except:
log.error('Failed getting queue: %s', traceback.format_exc(1)) log.error('Failed getting queue: %s', traceback.format_exc(1))
return False return []
# Go through history items # Go through history items
try: try:
@ -85,7 +85,7 @@ class Sabnzbd(Downloader):
}) })
except: except:
log.error('Failed getting history json: %s', traceback.format_exc(1)) log.error('Failed getting history json: %s', traceback.format_exc(1))
return False return []
release_downloads = ReleaseDownloadList(self) release_downloads = ReleaseDownloadList(self)

2
couchpotato/core/downloaders/synology/main.py

@ -11,7 +11,7 @@ log = CPLog(__name__)
class Synology(Downloader): class Synology(Downloader):
protocol = ['nzb', 'torrent', 'torrent_magnet'] protocol = ['nzb', 'torrent', 'torrent_magnet']
log = CPLog(__name__) status_support = False
def download(self, data = None, media = None, filedata = None): def download(self, data = None, media = None, filedata = None):
if not media: media = {} if not media: media = {}

4
couchpotato/core/downloaders/transmission/main.py

@ -88,7 +88,7 @@ class Transmission(Downloader):
log.debug('Checking Transmission download status.') log.debug('Checking Transmission download status.')
if not self.connect(): if not self.connect():
return False return []
release_downloads = ReleaseDownloadList(self) release_downloads = ReleaseDownloadList(self)
@ -99,7 +99,7 @@ class Transmission(Downloader):
queue = self.trpc.get_alltorrents(return_params) queue = self.trpc.get_alltorrents(return_params)
if not (queue and queue.get('torrents')): if not (queue and queue.get('torrents')):
log.debug('Nothing in queue or error') log.debug('Nothing in queue or error')
return False return []
for torrent in queue['torrents']: for torrent in queue['torrents']:
if torrent['hashString'] in ids: if torrent['hashString'] in ids:

73
couchpotato/core/downloaders/utorrent/main.py

@ -24,6 +24,16 @@ class uTorrent(Downloader):
protocol = ['torrent', 'torrent_magnet'] protocol = ['torrent', 'torrent_magnet']
utorrent_api = None utorrent_api = None
status_flags = {
'STARTED' : 1,
'CHECKING' : 2,
'CHECK-START' : 4,
'CHECKED' : 8,
'ERROR' : 16,
'PAUSED' : 32,
'QUEUED' : 64,
'LOADED' : 128
}
def connect(self): def connect(self):
# Load host from config and split out port. # Load host from config and split out port.
@ -40,7 +50,7 @@ class uTorrent(Downloader):
if not media: media = {} if not media: media = {}
if not data: data = {} if not data: data = {}
log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('protocol'))) log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol')))
if not self.connect(): if not self.connect():
return False return False
@ -75,7 +85,7 @@ class uTorrent(Downloader):
torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers) torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
else: else:
info = bdecode(filedata)["info"] info = bdecode(filedata)['info']
torrent_hash = sha1(benc(info)).hexdigest().upper() torrent_hash = sha1(benc(info)).hexdigest().upper()
torrent_filename = self.createFileName(data, filedata, media) torrent_filename = self.createFileName(data, filedata, media)
@ -110,23 +120,23 @@ class uTorrent(Downloader):
log.debug('Checking uTorrent download status.') log.debug('Checking uTorrent download status.')
if not self.connect(): if not self.connect():
return False return []
release_downloads = ReleaseDownloadList(self) release_downloads = ReleaseDownloadList(self)
data = self.utorrent_api.get_status() data = self.utorrent_api.get_status()
if not data: if not data:
log.error('Error getting data from uTorrent') log.error('Error getting data from uTorrent')
return False return []
queue = json.loads(data) queue = json.loads(data)
if queue.get('error'): if queue.get('error'):
log.error('Error getting data from uTorrent: %s', queue.get('error')) log.error('Error getting data from uTorrent: %s', queue.get('error'))
return False return []
if not queue.get('torrents'): if not queue.get('torrents'):
log.debug('Nothing in queue') log.debug('Nothing in queue')
return False return []
# Get torrents # Get torrents
for torrent in queue['torrents']: for torrent in queue['torrents']:
@ -140,21 +150,10 @@ class uTorrent(Downloader):
except: except:
log.debug('Failed getting files from torrent: %s', torrent[2]) log.debug('Failed getting files from torrent: %s', torrent[2])
status_flags = {
"STARTED" : 1,
"CHECKING" : 2,
"CHECK-START" : 4,
"CHECKED" : 8,
"ERROR" : 16,
"PAUSED" : 32,
"QUEUED" : 64,
"LOADED" : 128
}
status = 'busy' status = 'busy'
if (torrent[1] & status_flags["STARTED"] or torrent[1] & status_flags["QUEUED"]) and torrent[4] == 1000: if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
status = 'seeding' status = 'seeding'
elif (torrent[1] & status_flags["ERROR"]): elif (torrent[1] & self.status_flags['ERROR']):
status = 'failed' status = 'failed'
elif torrent[4] == 1000: elif torrent[4] == 1000:
status = 'completed' status = 'completed'
@ -224,7 +223,7 @@ class uTorrentAPI(object):
if time.time() > self.last_time + 1800: if time.time() > self.last_time + 1800:
self.last_time = time.time() self.last_time = time.time()
self.token = self.get_token() self.token = self.get_token()
request = urllib2.Request(self.url + "?token=" + self.token + "&" + action, data) request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data)
try: try:
open_request = self.opener.open(request) open_request = self.opener.open(request)
response = open_request.read() response = open_request.read()
@ -244,52 +243,52 @@ class uTorrentAPI(object):
return False return False
def get_token(self): def get_token(self):
request = self.opener.open(self.url + "token.html") request = self.opener.open(self.url + 'token.html')
token = re.findall("<div.*?>(.*?)</", request.read())[0] token = re.findall('<div.*?>(.*?)</', request.read())[0]
return token return token
def add_torrent_uri(self, filename, torrent, add_folder = False): def add_torrent_uri(self, filename, torrent, add_folder = False):
action = "action=add-url&s=%s" % urllib.quote(torrent) action = 'action=add-url&s=%s' % urllib.quote(torrent)
if add_folder: if add_folder:
action += "&path=%s" % urllib.quote(filename) action += '&path=%s' % urllib.quote(filename)
return self._request(action) return self._request(action)
def add_torrent_file(self, filename, filedata, add_folder = False): def add_torrent_file(self, filename, filedata, add_folder = False):
action = "action=add-file" action = 'action=add-file'
if add_folder: if add_folder:
action += "&path=%s" % urllib.quote(filename) action += '&path=%s' % urllib.quote(filename)
return self._request(action, {"torrent_file": (ss(filename), filedata)}) return self._request(action, {'torrent_file': (ss(filename), filedata)})
def set_torrent(self, hash, params): def set_torrent(self, hash, params):
action = "action=setprops&hash=%s" % hash action = 'action=setprops&hash=%s' % hash
for k, v in params.iteritems(): for k, v in params.iteritems():
action += "&s=%s&v=%s" % (k, v) action += '&s=%s&v=%s' % (k, v)
return self._request(action) return self._request(action)
def pause_torrent(self, hash, pause = True): def pause_torrent(self, hash, pause = True):
if pause: if pause:
action = "action=pause&hash=%s" % hash action = 'action=pause&hash=%s' % hash
else: else:
action = "action=unpause&hash=%s" % hash action = 'action=unpause&hash=%s' % hash
return self._request(action) return self._request(action)
def stop_torrent(self, hash): def stop_torrent(self, hash):
action = "action=stop&hash=%s" % hash action = 'action=stop&hash=%s' % hash
return self._request(action) return self._request(action)
def remove_torrent(self, hash, remove_data = False): def remove_torrent(self, hash, remove_data = False):
if remove_data: if remove_data:
action = "action=removedata&hash=%s" % hash action = 'action=removedata&hash=%s' % hash
else: else:
action = "action=remove&hash=%s" % hash action = 'action=remove&hash=%s' % hash
return self._request(action) return self._request(action)
def get_status(self): def get_status(self):
action = "list=1" action = 'list=1'
return self._request(action) return self._request(action)
def get_settings(self): def get_settings(self):
action = "action=getsettings" action = 'action=getsettings'
settings_dict = {} settings_dict = {}
try: try:
utorrent_settings = json.loads(self._request(action)) utorrent_settings = json.loads(self._request(action))
@ -321,5 +320,5 @@ class uTorrentAPI(object):
return self._request(action) return self._request(action)
def get_files(self, hash): def get_files(self, hash):
action = "action=getfiles&hash=%s" % hash action = 'action=getfiles&hash=%s' % hash
return self._request(action) return self._request(action)

8
couchpotato/core/helpers/encoding.py

@ -60,9 +60,17 @@ def sp(path, *args):
path = os.path.normcase(os.path.normpath(ss(path, *args))) path = os.path.normcase(os.path.normpath(ss(path, *args)))
# Remove any trailing path separators
if path != os.path.sep: if path != os.path.sep:
path = path.rstrip(os.path.sep) path = path.rstrip(os.path.sep)
# Add a trailing separator in case it is a root folder on windows (crashes guessit)
if len(path) == 2 and path[1] == ':':
path = path + os.path.sep
# Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
path = re.sub('^//', '/', path)
return path return path
def ek(original, *args): def ek(original, *args):

6
couchpotato/core/helpers/variable.py

@ -2,7 +2,7 @@ from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
import collections import collections
import hashlib import hashlib
import os.path import os
import platform import platform
import random import random
import re import re
@ -219,3 +219,7 @@ def splitString(str, split_on = ',', clean = True):
def dictIsSubset(a, b): def dictIsSubset(a, b):
return all([k in b and b[k] == v for k, v in a.items()]) return all([k in b and b[k] == v for k, v in a.items()])
def isSubFolder(sub_folder, base_folder):
# Returns True is sub_folder is the same as or in base_folder
return base_folder.rstrip(os.path.sep) + os.path.sep in sub_folder.rstrip(os.path.sep) + os.path.sep

1
couchpotato/core/media/movie/_base/main.py

@ -35,6 +35,7 @@ class MovieBase(MovieTypeBase):
'params': { 'params': {
'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'}, 'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'}, 'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, 'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
} }
}) })

4
couchpotato/core/media/movie/searcher/main.py

@ -283,6 +283,10 @@ class MovieSearcher(SearcherBase, MovieTypeBase):
return True return True
else: else:
# Don't allow movies with years to far in the future
if year is not None and year > now_year + 1:
return False
# For movies before 1972 # For movies before 1972
if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True return True

4
couchpotato/core/notifications/boxcar/main.py

@ -16,14 +16,14 @@ class Boxcar(Notification):
try: try:
message = message.strip() message = message.strip()
params = { data = {
'email': self.conf('email'), 'email': self.conf('email'),
'notification[from_screen_name]': self.default_title, 'notification[from_screen_name]': self.default_title,
'notification[message]': toUnicode(message), 'notification[message]': toUnicode(message),
'notification[from_remote_service_id]': int(time.time()), 'notification[from_remote_service_id]': int(time.time()),
} }
self.urlopen(self.url, params = params) self.urlopen(self.url, data = data)
except: except:
log.error('Check your email and added services on boxcar.io') log.error('Check your email and added services on boxcar.io')
return False return False

2
couchpotato/core/notifications/prowl/main.py

@ -26,7 +26,7 @@ class Prowl(Notification):
} }
try: try:
self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False) self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
log.info('Prowl notifications sent.') log.info('Prowl notifications sent.')
return True return True
except: except:

2
couchpotato/core/notifications/pushalot/main.py

@ -29,7 +29,7 @@ class Pushalot(Notification):
} }
try: try:
self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False) self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False)
return True return True
except: except:
log.error('PushAlot failed: %s', traceback.format_exc()) log.error('PushAlot failed: %s', traceback.format_exc())

4
couchpotato/core/notifications/pushbullet/main.py

@ -74,9 +74,9 @@ class Pushbullet(Notification):
} }
if cache: if cache:
return self.getJsonData(self.url % method, headers = headers, params = kwargs) return self.getJsonData(self.url % method, headers = headers, data = kwargs)
else: else:
data = self.urlopen(self.url % method, headers = headers, params = kwargs) data = self.urlopen(self.url % method, headers = headers, data = kwargs)
return json.loads(data) return json.loads(data)
except Exception, ex: except Exception, ex:

2
couchpotato/core/notifications/trakt/main.py

@ -35,7 +35,7 @@ class Trakt(Notification):
def call(self, method_url, post_data): def call(self, method_url, post_data):
try: try:
response = self.getJsonData(self.urls['base'] % method_url, params = post_data, cache_timeout = 1) response = self.getJsonData(self.urls['base'] % method_url, data = post_data, cache_timeout = 1)
if response: if response:
if response.get('status') == "success": if response.get('status') == "success":
log.info('Successfully called Trakt') log.info('Successfully called Trakt')

23
couchpotato/core/notifications/xbmc/main.py

@ -7,6 +7,7 @@ import json
import socket import socket
import traceback import traceback
import urllib import urllib
import requests
log = CPLog(__name__) log = CPLog(__name__)
@ -167,22 +168,18 @@ class XBMC(Notification):
# manually fake expected response array # manually fake expected response array
return [{'result': 'Error'}] return [{'result': 'Error'}]
except URLError, e: except requests.exceptions.Timeout:
if isinstance(e.reason, socket.timeout): log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
log.info('Couldn\'t send request to XBMC, assuming it\'s turned off')
return [{'result': 'Error'}]
else:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}] return [{'result': 'Error'}]
except: except:
log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc()) log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc())
return [{'result': 'Error'}] return [{'result': 'Error'}]
def request(self, host, requests): def request(self, host, do_requests):
server = 'http://%s/jsonrpc' % host server = 'http://%s/jsonrpc' % host
data = [] data = []
for req in requests: for req in do_requests:
method, kwargs = req method, kwargs = req
data.append({ data.append({
'method': method, 'method': method,
@ -202,16 +199,12 @@ class XBMC(Notification):
try: try:
log.debug('Sending request to %s: %s', (host, data)) log.debug('Sending request to %s: %s', (host, data))
response = self.getJsonData(server, headers = headers, params = data, timeout = 3, show_error = False) response = self.getJsonData(server, headers = headers, data = data, timeout = 3, show_error = False)
log.debug('Returned from request %s: %s', (host, response)) log.debug('Returned from request %s: %s', (host, response))
return response return response
except URLError, e: except requests.exceptions.Timeout:
if isinstance(e.reason, socket.timeout): log.info2('Couldn\'t send request to XBMC, assuming it\'s turned off')
log.info('Couldn\'t send request to XBMC, assuming it\'s turned off')
return []
else:
log.error('Failed sending request to XBMC: %s', traceback.format_exc())
return [] return []
except: except:
log.error('Failed sending request to XBMC: %s', traceback.format_exc()) log.error('Failed sending request to XBMC: %s', traceback.format_exc())

79
couchpotato/core/plugins/base.py

@ -1,19 +1,15 @@
from StringIO import StringIO
from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString, \ from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP from couchpotato.core.helpers.variable import getExt, md5, isLocalIP
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.environment import Env from couchpotato.environment import Env
from multipartpost import MultipartPostHandler import requests
from tornado import template from tornado import template
from tornado.web import StaticFileHandler from tornado.web import StaticFileHandler
from urlparse import urlparse from urlparse import urlparse
import cookielib
import glob import glob
import gzip
import inspect import inspect
import math
import os.path import os.path
import re import re
import time import time
@ -39,6 +35,7 @@ class Plugin(object):
http_time_between_calls = 0 http_time_between_calls = 0
http_failed_request = {} http_failed_request = {}
http_failed_disabled = {} http_failed_disabled = {}
http_opener = requests.Session()
def __new__(typ, *args, **kwargs): def __new__(typ, *args, **kwargs):
new_plugin = super(Plugin, typ).__new__(typ) new_plugin = super(Plugin, typ).__new__(typ)
@ -106,7 +103,9 @@ class Plugin(object):
f.close() f.close()
os.chmod(path, Env.getPermission('file')) os.chmod(path, Env.getPermission('file'))
except Exception, e: except Exception, e:
log.error('Unable writing to file "%s": %s', (path, e)) log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path): def makeDir(self, path):
path = ss(path) path = ss(path)
@ -120,11 +119,11 @@ class Plugin(object):
return False return False
# http request # http request
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True): def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, return_raw = False):
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]") url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {} if not headers: headers = {}
if not params: params = {} if not data: data = {}
# Fill in some headers # Fill in some headers
parsed_url = urlparse(url) parsed_url = urlparse(url)
@ -137,6 +136,8 @@ class Plugin(object):
headers['Connection'] = headers.get('Connection', 'keep-alive') headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0') headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = self.http_opener
# Don't try for failed requests # Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0: if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900): if self.http_failed_disabled[host] > (time.time() - 900):
@ -152,45 +153,18 @@ class Plugin(object):
self.wait(host) self.wait(host)
try: try:
# Make sure opener has the correct headers kwargs = {
if opener: 'headers': headers,
opener.add_headers = headers 'data': data if len(data) > 0 else None,
'timeout': timeout,
if multipart: 'files': files,
log.info('Opening multipart url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data')) }
request = urllib2.Request(url, params, headers) method = 'post' if len(data) > 0 or files else 'get'
if opener:
opener.add_handler(MultipartPostHandler())
else:
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
response = opener.open(request, timeout = timeout)
else:
log.info('Opening url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data'))
if isinstance(params, (str, unicode)) and len(params) > 0:
data = params
else:
data = tryUrlencode(params) if len(params) > 0 else None
request = urllib2.Request(url, data, headers) log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.iterkeys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, verify = False, **kwargs)
if opener: data = response.content if return_raw else response.text
response = opener.open(request, timeout = timeout)
else:
response = urllib2.urlopen(request, timeout = timeout)
# unzip if needed
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj = buf)
data = f.read()
f.close()
else:
data = response.read()
response.close()
self.http_failed_request[host] = 0 self.http_failed_request[host] = 0
except IOError: except IOError:
@ -218,15 +192,19 @@ class Plugin(object):
return data return data
def wait(self, host = ''): def wait(self, host = ''):
if self.http_time_between_calls == 0:
return
now = time.time() now = time.time()
last_use = self.http_last_use.get(host, 0) last_use = self.http_last_use.get(host, 0)
if last_use > 0:
wait = math.ceil(last_use - now + self.http_time_between_calls) wait = (last_use - now) + self.http_time_between_calls
if wait > 0: if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait)) log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(last_use - now + self.http_time_between_calls) time.sleep(wait)
def beforeCall(self, handler): def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__)) self.isRunning('%s.%s' % (self.getName(), handler.__name__))
@ -269,18 +247,19 @@ class Plugin(object):
try: try:
cache_timeout = 300 cache_timeout = 300
if kwargs.get('cache_timeout'): if kwargs.has_key('cache_timeout'):
cache_timeout = kwargs.get('cache_timeout') cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout'] del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs) data = self.urlopen(url, **kwargs)
if data: if data and cache_timeout > 0:
self.setCache(cache_key, data, timeout = cache_timeout) self.setCache(cache_key, data, timeout = cache_timeout)
return data return data
except: except:
if not kwargs.get('show_error', True): if not kwargs.get('show_error', True):
raise raise
log.error('Failed getting cache: %s', (traceback.format_exc()))
return '' return ''
def setCache(self, cache_key, value, timeout = 300): def setCache(self, cache_key, value, timeout = 300):

2
couchpotato/core/plugins/file/main.py

@ -93,7 +93,7 @@ class FileManager(Plugin):
return dest return dest
try: try:
filedata = self.urlopen(url, **urlopen_kwargs) filedata = self.urlopen(url, return_raw = True, **urlopen_kwargs)
except: except:
log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) log.error('Failed downloading file %s: %s', (url, traceback.format_exc()))
return False return False

1
couchpotato/core/plugins/quality/main.py

@ -314,6 +314,7 @@ class QualityPlugin(Plugin):
'Movie.Name.1999.DVDRip-Group': 'dvdrip', 'Movie.Name.1999.DVDRip-Group': 'dvdrip',
'Movie.Name.1999.DVD-Rip-Group': 'dvdrip', 'Movie.Name.1999.DVD-Rip-Group': 'dvdrip',
'Movie.Name.1999.DVD-R-Group': 'dvdr', 'Movie.Name.1999.DVD-R-Group': 'dvdr',
'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': '720p',
} }
correct = 0 correct = 0

9
couchpotato/core/plugins/release/main.py

@ -247,13 +247,13 @@ class Release(Plugin):
'files': {} 'files': {}
}), manual = True) }), manual = True)
if success: if success == True:
db.expunge_all() db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again?? rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return { return {
'success': success 'success': success == True
} }
def download(self, data, media, manual = False): def download(self, data, media, manual = False):
@ -266,20 +266,21 @@ class Release(Plugin):
# Test to see if any downloaders are enabled for this type # Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True) downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if not downloader_enabled: if not downloader_enabled:
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('protocol'))) log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol'))
return False return False
# Download NZB or torrent file # Download NZB or torrent file
filedata = None filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
log.info('Tried to download, but the "%s" provider gave an error', data.get('protocol'))
if filedata == 'try_next': if filedata == 'try_next':
return filedata return filedata
# Send NZB or torrent file to downloader # Send NZB or torrent file to downloader
download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True) download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True)
if not download_result: if not download_result:
log.info('Tried to download, but the "%s" downloader gave an error', (data.get('protocol'))) log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol'))
return False return False
log.debug('Downloader result: %s', download_result) log.debug('Downloader result: %s', download_result)

38
couchpotato/core/plugins/renamer/main.py

@ -3,7 +3,7 @@ from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss, sp from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt, splitString, fnEscape getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Library, File, Profile, Release, \ from couchpotato.core.settings.model import Library, File, Profile, Release, \
@ -125,7 +125,7 @@ class Renamer(Plugin):
return return
else: else:
for item in no_process: for item in no_process:
if '%s%s' % (base_folder, os.path.sep) in item: if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.') log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.')
return return
@ -157,13 +157,13 @@ class Renamer(Plugin):
if media_folder: if media_folder:
for item in no_process: for item in no_process:
if '%s%s' % (media_folder, os.path.sep) in item: if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.') log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.')
return return
# Make sure a checkSnatched marked all downloads/seeds as such # Make sure a checkSnatched marked all downloads/seeds as such
if not release_download and self.conf('run_every') > 0: if not release_download and self.conf('run_every') > 0:
fireEvent('renamer.check_snatched') self.checkSnatched(fire_scan = False)
self.renaming_started = True self.renaming_started = True
@ -508,7 +508,10 @@ class Renamer(Plugin):
os.remove(src) os.remove(src)
parent_dir = os.path.dirname(src) parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and not '%s%s' % (parent_dir, os.path.sep) in [destination, media_folder] and not '%s%s' % (base_folder, os.path.sep) in parent_dir: if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \
not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \
not isSubFolder(parent_dir, base_folder):
delete_folders.append(parent_dir) delete_folders.append(parent_dir)
except: except:
@ -806,7 +809,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
except: except:
loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
def checkSnatched(self): def checkSnatched(self, fire_scan = True):
if self.checking_snatched: if self.checking_snatched:
log.debug('Already checking snatched') log.debug('Already checking snatched')
@ -829,20 +832,29 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Collect all download information with the download IDs from the releases # Collect all download information with the download IDs from the releases
download_ids = [] download_ids = []
no_status_support = []
try: try:
for rel in rels: for rel in rels:
rel_dict = rel.to_dict({'info': {}}) rel_dict = rel.to_dict({'info': {}})
if rel_dict['info'].get('download_id') and rel_dict['info'].get('download_downloader'): if rel_dict['info'].get('download_id') and rel_dict['info'].get('download_downloader'):
download_ids.append({'id': rel_dict['info']['download_id'], 'downloader': rel_dict['info']['download_downloader']}) download_ids.append({'id': rel_dict['info']['download_id'], 'downloader': rel_dict['info']['download_downloader']})
ds = rel_dict['info'].get('download_status_support')
if ds == False or ds == 'False':
no_status_support.append(ss(rel_dict['info'].get('download_downloader')))
except: except:
log.error('Error getting download IDs from database') log.error('Error getting download IDs from database')
self.checking_snatched = False self.checking_snatched = False
return False return False
release_downloads = fireEvent('download.status', download_ids, merge = True) release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else []
if len(no_status_support) > 0:
log.debug('Download status functionality is not implemented for one of the active downloaders: %s', no_status_support)
if not release_downloads: if not release_downloads:
log.debug('Download status functionality is not implemented for any active downloaders.') if fire_scan:
fireEvent('renamer.scan') self.scan()
self.checking_snatched = False self.checking_snatched = False
return True return True
@ -982,7 +994,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if release_download['scan']: if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link': if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True) fireEvent('download.pause', release_download = release_download, pause = True, single = True)
fireEvent('renamer.scan', release_download = release_download) self.scan(release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link': if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True) fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']: if release_download['process_complete']:
@ -993,8 +1005,8 @@ Remove it if you want it to be renamed (again, or at least let it try again)
# Ask the downloader to process the item # Ask the downloader to process the item
fireEvent('download.process_complete', release_download = release_download, single = True) fireEvent('download.process_complete', release_download = release_download, single = True)
if scan_required: if fire_scan and (scan_required or len(no_status_support) > 0):
fireEvent('renamer.scan') self.scan()
self.checking_snatched = False self.checking_snatched = False
return True return True
@ -1044,7 +1056,7 @@ Remove it if you want it to be renamed (again, or at least let it try again)
return release_download['id'] and release_download['downloader'] and release_download['folder'] return release_download['id'] and release_download['downloader'] and release_download['folder']
def movieInFromFolder(self, media_folder): def movieInFromFolder(self, media_folder):
return media_folder and '%s%s' % (sp(self.conf('from')), os.path.sep) in sp(media_folder) or not media_folder return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False): def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = [] if not files: files = []

3
couchpotato/core/plugins/scanner/main.py

@ -80,7 +80,8 @@ class Scanner(Plugin):
'hdtv': ['hdtv'] 'hdtv': ['hdtv']
} }
clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)' clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \
'|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
multipart_regex = [ multipart_regex = [
'[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
'[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1

2
couchpotato/core/providers/automation/imdb/__init__.py

@ -59,7 +59,7 @@ config = [{
{ {
'name': 'automation_charts_boxoffice', 'name': 'automation_charts_boxoffice',
'type': 'bool', 'type': 'bool',
'label': 'Box offce TOP 10', 'label': 'Box office TOP 10',
'description': 'IMDB Box office <a href="http://www.imdb.com/chart/">TOP 10</a> chart', 'description': 'IMDB Box office <a href="http://www.imdb.com/chart/">TOP 10</a> chart',
'default': True, 'default': True,
}, },

30
couchpotato/core/providers/base.py

@ -5,12 +5,10 @@ from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env from couchpotato.environment import Env
from urlparse import urlparse from urlparse import urlparse
import cookielib
import json import json
import re import re
import time import time
import traceback import traceback
import urllib2
import xml.etree.ElementTree as XMLTree import xml.etree.ElementTree as XMLTree
log = CPLog(__name__) log = CPLog(__name__)
@ -95,7 +93,7 @@ class Provider(Plugin):
def getHTMLData(self, url, **kwargs): def getHTMLData(self, url, **kwargs):
cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {}))) cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('data', {})))
return self.getCache(cache_key, url, **kwargs) return self.getCache(cache_key, url, **kwargs)
@ -111,8 +109,7 @@ class YarrProvider(Provider):
sizeMb = ['mb', 'mib'] sizeMb = ['mb', 'mib']
sizeKb = ['kb', 'kib'] sizeKb = ['kb', 'kib']
login_opener = None last_login_check = None
last_login_check = 0
def __init__(self): def __init__(self):
addEvent('provider.enabled_protocols', self.getEnabledProtocol) addEvent('provider.enabled_protocols', self.getEnabledProtocol)
@ -131,35 +128,30 @@ class YarrProvider(Provider):
# Check if we are still logged in every hour # Check if we are still logged in every hour
now = time.time() now = time.time()
if self.login_opener and self.last_login_check < (now - 3600): if self.last_login_check and self.last_login_check < (now - 3600):
try: try:
output = self.urlopen(self.urls['login_check'], opener = self.login_opener) output = self.urlopen(self.urls['login_check'])
if self.loginCheckSuccess(output): if self.loginCheckSuccess(output):
self.last_login_check = now self.last_login_check = now
return True return True
else: except: pass
self.login_opener = None self.last_login_check = None
except:
self.login_opener = None
if self.login_opener: if self.last_login_check:
return True return True
try: try:
cookiejar = cookielib.CookieJar() output = self.urlopen(self.urls['login'], data = self.getLoginParams())
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
output = self.urlopen(self.urls['login'], params = self.getLoginParams(), opener = opener)
if self.loginSuccess(output): if self.loginSuccess(output):
self.last_login_check = now self.last_login_check = now
self.login_opener = opener
return True return True
error = 'unknown' error = 'unknown'
except: except:
error = traceback.format_exc() error = traceback.format_exc()
self.login_opener = None self.last_login_check = None
log.error('Failed to login %s: %s', (self.getName(), error)) log.error('Failed to login %s: %s', (self.getName(), error))
return False return False
@ -173,12 +165,12 @@ class YarrProvider(Provider):
try: try:
if not self.login(): if not self.login():
log.error('Failed downloading from %s', self.getName()) log.error('Failed downloading from %s', self.getName())
return self.urlopen(url, opener = self.login_opener) return self.urlopen(url)
except: except:
log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self): def getLoginParams(self):
return '' return {}
def download(self, url = '', nzb_id = ''): def download(self, url = '', nzb_id = ''):
try: try:

7
couchpotato/core/providers/info/_modifier/main.py

@ -32,14 +32,17 @@ class Movie(ModifierBase):
'poster': [], 'poster': [],
'backdrop': [], 'backdrop': [],
'poster_original': [], 'poster_original': [],
'backdrop_original': [] 'backdrop_original': [],
'actors': {}
}, },
'runtime': 0, 'runtime': 0,
'plot': '', 'plot': '',
'tagline': '', 'tagline': '',
'imdb': '', 'imdb': '',
'genres': [], 'genres': [],
'mpaa': None 'mpaa': None,
'actors': [],
'actor_roles': {}
} }
def __init__(self): def __init__(self):

4
couchpotato/core/providers/info/couchpotatoapi/main.py

@ -74,7 +74,7 @@ class CouchPotatoApi(MovieProvider):
return True return True
def getInfo(self, identifier = None): def getInfo(self, identifier = None, **kwargs):
if not identifier: if not identifier:
return return
@ -97,7 +97,7 @@ class CouchPotatoApi(MovieProvider):
if not ignore: ignore = [] if not ignore: ignore = []
if not movies: movies = [] if not movies: movies = []
suggestions = self.getJsonData(self.urls['suggest'], params = { suggestions = self.getJsonData(self.urls['suggest'], data = {
'movies': ','.join(movies), 'movies': ','.join(movies),
'ignore': ','.join(ignore), 'ignore': ','.join(ignore),
}, headers = self.getRequestHeaders()) }, headers = self.getRequestHeaders())

8
couchpotato/core/providers/info/omdbapi/main.py

@ -46,7 +46,7 @@ class OMDBAPI(MovieProvider):
return [] return []
def getInfo(self, identifier = None): def getInfo(self, identifier = None, **kwargs):
if not identifier: if not identifier:
return {} return {}
@ -88,10 +88,6 @@ class OMDBAPI(MovieProvider):
year = tryInt(movie.get('Year', '')) year = tryInt(movie.get('Year', ''))
actors = {}
for actor in splitString(movie.get('Actors', '')):
actors[actor] = '' #omdb does not return actor roles
movie_data = { movie_data = {
'type': 'movie', 'type': 'movie',
'via_imdb': True, 'via_imdb': True,
@ -113,7 +109,7 @@ class OMDBAPI(MovieProvider):
'genres': splitString(movie.get('Genre', '')), 'genres': splitString(movie.get('Genre', '')),
'directors': splitString(movie.get('Director', '')), 'directors': splitString(movie.get('Director', '')),
'writers': splitString(movie.get('Writer', '')), 'writers': splitString(movie.get('Writer', '')),
'actor_roles': actors, 'actors': splitString(movie.get('Actors', '')),
} }
movie_data = dict((k, v) for k, v in movie_data.iteritems() if v) movie_data = dict((k, v) for k, v in movie_data.iteritems() if v)
except: except:

18
couchpotato/core/providers/info/themoviedb/main.py

@ -11,8 +11,8 @@ log = CPLog(__name__)
class TheMovieDb(MovieProvider): class TheMovieDb(MovieProvider):
def __init__(self): def __init__(self):
addEvent('info.search', self.search, priority = 2) #addEvent('info.search', self.search, priority = 2)
addEvent('movie.search', self.search, priority = 2) #addEvent('movie.search', self.search, priority = 2)
addEvent('movie.info', self.getInfo, priority = 2) addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info_by_tmdb', self.getInfo) addEvent('movie.info_by_tmdb', self.getInfo)
@ -45,7 +45,7 @@ class TheMovieDb(MovieProvider):
nr = 0 nr = 0
for movie in raw: for movie in raw:
results.append(self.parseMovie(movie, with_titles = False)) results.append(self.parseMovie(movie, extended = False))
nr += 1 nr += 1
if nr == limit: if nr == limit:
@ -61,7 +61,7 @@ class TheMovieDb(MovieProvider):
return results return results
def getInfo(self, identifier = None): def getInfo(self, identifier = None, extended = True):
if not identifier: if not identifier:
return {} return {}
@ -73,14 +73,14 @@ class TheMovieDb(MovieProvider):
try: try:
log.debug('Getting info: %s', cache_key) log.debug('Getting info: %s', cache_key)
movie = tmdb3.Movie(identifier) movie = tmdb3.Movie(identifier)
result = self.parseMovie(movie) result = self.parseMovie(movie, extended = extended)
self.setCache(cache_key, result) self.setCache(cache_key, result)
except: except:
pass pass
return result return result
def parseMovie(self, movie, with_titles = True): def parseMovie(self, movie, extended = True):
cache_key = 'tmdb.cache.%s' % movie.id cache_key = 'tmdb.cache.%s' % movie.id
movie_data = self.getCache(cache_key) movie_data = self.getCache(cache_key)
@ -97,6 +97,7 @@ class TheMovieDb(MovieProvider):
#'backdrop': [backdrop] if backdrop else [], #'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [], 'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [], 'backdrop_original': [backdrop_original] if backdrop_original else [],
'actors': {}
} }
# Genres # Genres
@ -112,10 +113,11 @@ class TheMovieDb(MovieProvider):
# Gather actors data # Gather actors data
actors = {} actors = {}
if extended:
for cast_item in movie.cast: for cast_item in movie.cast:
try: try:
actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character) actors[toUnicode(cast_item.name)] = toUnicode(cast_item.character)
images['actor %s' % toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original') images['actors'][toUnicode(cast_item.name)] = self.getImage(cast_item, type = 'profile', size = 'original')
except: except:
log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc())) log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc()))
@ -139,7 +141,7 @@ class TheMovieDb(MovieProvider):
movie_data = dict((k, v) for k, v in movie_data.iteritems() if v) movie_data = dict((k, v) for k, v in movie_data.iteritems() if v)
# Add alternative names # Add alternative names
if with_titles: if extended:
movie_data['titles'].append(movie.originaltitle) movie_data['titles'].append(movie.originaltitle)
for alt in movie.alternate_titles: for alt in movie.alternate_titles:
alt_name = alt.title alt_name = alt.title

4
couchpotato/core/providers/metadata/xbmc/main.py

@ -98,9 +98,9 @@ class XBMC(MetaDataBase):
if role_name: if role_name:
role = SubElement(actor, 'role') role = SubElement(actor, 'role')
role.text = toUnicode(role_name) role.text = toUnicode(role_name)
if movie_info['images'].get('actor %s' % actor_name, ''): if movie_info['images']['actors'].get(actor_name):
thumb = SubElement(actor, 'thumb') thumb = SubElement(actor, 'thumb')
thumb.text = toUnicode(movie_info['images'].get('actor %s' % actor_name)) thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name))
# Directors # Directors
for director_name in movie_info.get('directors', []): for director_name in movie_info.get('directors', []):

4
couchpotato/core/providers/nzb/binsearch/main.py

@ -85,13 +85,13 @@ class Base(NZBProvider):
def download(self, url = '', nzb_id = ''): def download(self, url = '', nzb_id = ''):
params = { data = {
'action': 'nzb', 'action': 'nzb',
nzb_id: 'on' nzb_id: 'on'
} }
try: try:
return self.urlopen(url, params = params, show_error = False) return self.urlopen(url, data = data, show_error = False)
except: except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))

1
couchpotato/core/providers/nzb/newznab/main.py

@ -165,6 +165,7 @@ class Base(NZBProvider, RSS):
# Get final redirected url # Get final redirected url
log.debug('Checking %s for redirects.', url) log.debug('Checking %s for redirects.', url)
req = urllib2.Request(url) req = urllib2.Request(url)
req.add_header('User-Agent', self.user_agent)
res = urllib2.urlopen(req) res = urllib2.urlopen(req)
finalurl = res.geturl() finalurl = res.geturl()
if finalurl != url: if finalurl != url:

6
couchpotato/core/providers/torrent/bithdtv/main.py

@ -34,7 +34,7 @@ class Base(TorrentProvider):
url = "%s&%s" % (self.urls['search'], query) url = "%s&%s" % (self.urls['search'], query)
data = self.getHTMLData(url, opener = self.login_opener) data = self.getHTMLData(url)
if data: if data:
# Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
@ -71,10 +71,10 @@ class Base(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self): def getLoginParams(self):
return tryUrlencode({ return {
'username': self.conf('username'), 'username': self.conf('username'),
'password': self.conf('password'), 'password': self.conf('password'),
}) }
def getMoreInfo(self, item): def getMoreInfo(self, item):
full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

11
couchpotato/core/providers/torrent/hdbits/main.py

@ -1,5 +1,4 @@
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider from couchpotato.core.providers.torrent.base import TorrentProvider
@ -23,7 +22,7 @@ class HDBits(TorrentProvider):
def _search(self, movie, quality, results): def _search(self, movie, quality, results):
data = self.getJsonData(self.urls['search'] % movie['library']['identifier'], opener = self.login_opener) data = self.getJsonData(self.urls['search'] % movie['library']['identifier'])
if data: if data:
try: try:
@ -42,15 +41,17 @@ class HDBits(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self): def getLoginParams(self):
data = self.getHTMLData('https://hdbits.org/login') data = self.getHTMLData('https://hdbits.org/login', cache_timeout = 0)
bs = BeautifulSoup(data) bs = BeautifulSoup(data)
secret = bs.find('input', attrs = {'name': 'lol'})['value'] secret = bs.find('input', attrs = {'name': 'lol'})['value']
return tryUrlencode({ return {
'uname': self.conf('username'), 'uname': self.conf('username'),
'password': self.conf('password'), 'password': self.conf('password'),
'returnto': '/',
'lol': secret 'lol': secret
}) }
def loginSuccess(self, output): def loginSuccess(self, output):
return '/logout.php' in output.lower() return '/logout.php' in output.lower()

8
couchpotato/core/providers/torrent/ilovetorrents/main.py

@ -42,7 +42,7 @@ class ILoveTorrents(TorrentProvider):
search_url = self.urls['search'] % (movieTitle, page, cats[0]) search_url = self.urls['search'] % (movieTitle, page, cats[0])
page += 1 page += 1
data = self.getHTMLData(search_url, opener = self.login_opener) data = self.getHTMLData(search_url)
if data: if data:
try: try:
soup = BeautifulSoup(data) soup = BeautifulSoup(data)
@ -96,11 +96,11 @@ class ILoveTorrents(TorrentProvider):
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self): def getLoginParams(self):
return tryUrlencode({ return {
'username': self.conf('username'), 'username': self.conf('username'),
'password': self.conf('password'), 'password': self.conf('password'),
'submit': 'Welcome to ILT', 'submit': 'Welcome to ILT',
}) }
def getMoreInfo(self, item): def getMoreInfo(self, item):
cache_key = 'ilt.%s' % item['id'] cache_key = 'ilt.%s' % item['id']
@ -109,7 +109,7 @@ class ILoveTorrents(TorrentProvider):
if not description: if not description:
try: try:
full_description = self.getHTMLData(item['detail_url'], opener = self.login_opener) full_description = self.getHTMLData(item['detail_url'])
html = BeautifulSoup(full_description) html = BeautifulSoup(full_description)
nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1] nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1]
description = toUnicode(nfo_pre.text) if nfo_pre else '' description = toUnicode(nfo_pre.text) if nfo_pre else ''

6
couchpotato/core/providers/torrent/passthepopcorn/main.py

@ -44,7 +44,7 @@ class Base(TorrentProvider):
}) })
url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params)) url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
res = self.getJsonData(url, opener = self.login_opener) res = self.getJsonData(url)
try: try:
if not 'Movies' in res: if not 'Movies' in res:
@ -167,13 +167,13 @@ class Base(TorrentProvider):
return self.unicodeToASCII(self.htmlToUnicode(text)) return self.unicodeToASCII(self.htmlToUnicode(text))
def getLoginParams(self): def getLoginParams(self):
return tryUrlencode({ return {
'username': self.conf('username'), 'username': self.conf('username'),
'password': self.conf('password'), 'password': self.conf('password'),
'passkey': self.conf('passkey'), 'passkey': self.conf('passkey'),
'keeplogged': '1', 'keeplogged': '1',
'login': 'Login' 'login': 'Login'
}) }
def loginSuccess(self, output): def loginSuccess(self, output):
try: try:

6
couchpotato/core/providers/torrent/torrentbytes/main.py

@ -35,7 +35,7 @@ class TorrentBytes(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results): def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0]) url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener) data = self.getHTMLData(url)
if data: if data:
html = BeautifulSoup(data) html = BeautifulSoup(data)
@ -69,11 +69,11 @@ class TorrentBytes(TorrentProvider):
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self): def getLoginParams(self):
return tryUrlencode({ return {
'username': self.conf('username'), 'username': self.conf('username'),
'password': self.conf('password'), 'password': self.conf('password'),
'login': 'submit', 'login': 'submit',
}) }
def loginSuccess(self, output): def loginSuccess(self, output):
return 'logout.php' in output.lower() or 'Welcome' in output.lower() return 'logout.php' in output.lower() or 'Welcome' in output.lower()

11
couchpotato/core/providers/torrent/torrentday/main.py

@ -1,4 +1,3 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog from couchpotato.core.logger import CPLog
from couchpotato.core.event import fireEvent from couchpotato.core.event import fireEvent
@ -31,7 +30,7 @@ class Base(TorrentProvider):
query = self.buildUrl(media) query = self.buildUrl(media)
params = { data = {
'/browse.php?': None, '/browse.php?': None,
'cata': 'yes', 'cata': 'yes',
'jxt': 8, 'jxt': 8,
@ -39,7 +38,7 @@ class Base(TorrentProvider):
'search': query, 'search': query,
} }
data = self.getJsonData(self.urls['search'], params = params, opener = self.login_opener) data = self.getJsonData(self.urls['search'], data = data)
try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', []) try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except: return except: return
@ -55,11 +54,13 @@ class Base(TorrentProvider):
}) })
def getLoginParams(self): def getLoginParams(self):
return tryUrlencode({ return {
'username': self.conf('username'), 'username': self.conf('username'),
'password': self.conf('password'), 'password': self.conf('password'),
'submit.x': 18,
'submit.y': 11,
'submit': 'submit', 'submit': 'submit',
}) }
def loginSuccess(self, output): def loginSuccess(self, output):
return 'Password not correct' not in output return 'Password not correct' not in output

4
couchpotato/core/providers/userscript/base.py

@ -25,7 +25,7 @@ class UserscriptBase(Plugin):
result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True) result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True)
if len(result) > 0: if len(result) > 0:
movie = fireEvent('movie.info', identifier = result[0].get('imdb'), merge = True) movie = fireEvent('movie.info', identifier = result[0].get('imdb'), extended = False, merge = True)
return movie return movie
else: else:
return None return None
@ -54,7 +54,7 @@ class UserscriptBase(Plugin):
return self.getInfo(getImdb(data)) return self.getInfo(getImdb(data))
def getInfo(self, identifier): def getInfo(self, identifier):
return fireEvent('movie.info', identifier = identifier, merge = True) return fireEvent('movie.info', identifier = identifier, extended = False, merge = True)
def getInclude(self): def getInclude(self):
return self.includes return self.includes

2
couchpotato/core/providers/userscript/imdb/main.py

@ -8,4 +8,4 @@ class IMDB(UserscriptBase):
includes = ['*://*.imdb.com/title/tt*', '*://imdb.com/title/tt*'] includes = ['*://*.imdb.com/title/tt*', '*://imdb.com/title/tt*']
def getMovie(self, url): def getMovie(self, url):
return fireEvent('movie.info', identifier = getImdb(url), merge = True) return self.getInfo(getImdb(url))

2
couchpotato/core/providers/userscript/tmdb/main.py

@ -9,7 +9,7 @@ class TMDB(UserscriptBase):
def getMovie(self, url): def getMovie(self, url):
match = re.search('(?P<id>\d+)', url) match = re.search('(?P<id>\d+)', url)
movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), merge = True) movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), extended = False, merge = True)
if movie['imdb']: if movie['imdb']:
return self.getInfo(movie['imdb']) return self.getInfo(movie['imdb'])

2
couchpotato/runner.py

@ -147,7 +147,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
Env.set('dev', development) Env.set('dev', development)
# Disable logging for some modules # Disable logging for some modules
for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado']: for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']:
logging.getLogger(logger_name).setLevel(logging.ERROR) logging.getLogger(logger_name).setLevel(logging.ERROR)
for logger_name in ['gntp', 'migrate']: for logger_name in ['gntp', 'migrate']:

8
libs/requests/__init__.py

@ -23,7 +23,7 @@ usage:
>>> payload = dict(key1='value1', key2='value2') >>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload) >>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text >>> print(r.text)
{ {
... ...
"form": { "form": {
@ -42,15 +42,15 @@ is at <http://python-requests.org>.
""" """
__title__ = 'requests' __title__ = 'requests'
__version__ = '1.2.3' __version__ = '2.1.0'
__build__ = 0x010203 __build__ = 0x020100
__author__ = 'Kenneth Reitz' __author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0' __license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz' __copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible # Attempt to enable urllib3's SNI support, if possible
try: try:
from requests.packages.urllib3.contrib import pyopenssl from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3() pyopenssl.inject_into_urllib3()
except ImportError: except ImportError:
pass pass

96
libs/requests/adapters.py

@ -11,18 +11,20 @@ and maintain connections.
import socket import socket
from .models import Response from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url) except_on_missing_scheme, get_auth_from_url)
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .cookies import extract_cookies_to_jar from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
from .auth import _basic_auth_str from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False DEFAULT_POOLBLOCK = False
@ -71,6 +73,7 @@ class HTTPAdapter(BaseAdapter):
pool_block=DEFAULT_POOLBLOCK): pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries self.max_retries = max_retries
self.config = {} self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__() super(HTTPAdapter, self).__init__()
@ -118,7 +121,7 @@ class HTTPAdapter(BaseAdapter):
:param verify: Whether we should actually verify the certificate. :param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify. :param cert: The SSL certificate to verify.
""" """
if url.startswith('https') and verify: if url.lower().startswith('https') and verify:
cert_loc = None cert_loc = None
@ -184,18 +187,28 @@ class HTTPAdapter(BaseAdapter):
def get_connection(self, url, proxies=None): def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be """Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`. :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to. :param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request. :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
""" """
proxies = proxies or {} proxies = proxies or {}
proxy = proxies.get(urlparse(url).scheme) proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy: if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url).scheme) except_on_missing_scheme(proxy)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy)) proxy_headers = self.proxy_headers(proxy)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers)
conn = self.proxy_manager[proxy].connection_from_url(url)
else: else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url) conn = self.poolmanager.connection_from_url(url)
return conn return conn
@ -211,10 +224,10 @@ class HTTPAdapter(BaseAdapter):
def request_url(self, request, proxies): def request_url(self, request, proxies):
"""Obtain the url to use when making the final request. """Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be If the message is being sent through a HTTP proxy, the full URL has to
used. Otherwise, we should only use the path portion of the URL. be used. Otherwise, we should only use the path portion of the URL.
This shoudl not be called from user code, and is only exposed for use This should not be called from user code, and is only exposed for use
when subclassing the when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
@ -222,9 +235,10 @@ class HTTPAdapter(BaseAdapter):
:param proxies: A dictionary of schemes to proxy URLs. :param proxies: A dictionary of schemes to proxy URLs.
""" """
proxies = proxies or {} proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme) scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy: if proxy and scheme != 'https':
url, _ = urldefrag(request.url) url, _ = urldefrag(request.url)
else: else:
url = request.path_url url = request.path_url
@ -232,8 +246,9 @@ class HTTPAdapter(BaseAdapter):
return url return url
def add_headers(self, request, **kwargs): def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this adds a """Add any headers needed by the connection. As of v2.0 this does
Proxy-Authorization header. nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use This should not be called from user code, and is only exposed for use
when subclassing the when subclassing the
@ -242,12 +257,22 @@ class HTTPAdapter(BaseAdapter):
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send(). :param kwargs: The keyword arguments from the call to send().
""" """
proxies = kwargs.get('proxies', {}) pass
if proxies is None: def proxy_headers(self, proxy):
proxies = {} """Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
proxy = proxies.get(urlparse(request.url).scheme) :param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy) username, password = get_auth_from_url(proxy)
if username and password: if username and password:
@ -255,9 +280,11 @@ class HTTPAdapter(BaseAdapter):
# to decode them. # to decode them.
username = unquote(username) username = unquote(username)
password = unquote(password) password = unquote(password)
request.headers['Proxy-Authorization'] = _basic_auth_str(username, headers['Proxy-Authorization'] = _basic_auth_str(username,
password) password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object. """Sends PreparedRequest object. Returns Response object.
@ -265,7 +292,7 @@ class HTTPAdapter(BaseAdapter):
:param stream: (optional) Whether to stream the request content. :param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request. :param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates. :param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted. :param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request. :param proxies: (optional) The proxies dictionary to apply to the request.
""" """
@ -273,10 +300,15 @@ class HTTPAdapter(BaseAdapter):
self.cert_verify(conn, request.url, verify, cert) self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies) url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies) self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers) chunked = not (request.body is None or 'Content-Length' in request.headers)
if stream:
timeout = TimeoutSauce(connect=timeout)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try: try:
if not chunked: if not chunked:
resp = conn.urlopen( resp = conn.urlopen(
@ -298,7 +330,11 @@ class HTTPAdapter(BaseAdapter):
conn = conn.proxy_pool conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout) low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items(): for header, value in request.headers.items():
low_conn.putheader(header, value) low_conn.putheader(header, value)
@ -313,12 +349,21 @@ class HTTPAdapter(BaseAdapter):
low_conn.send(b'0\r\n\r\n') low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse() r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r, resp = HTTPResponse.from_httplib(
r,
pool=conn, pool=conn,
connection=low_conn, connection=low_conn,
preload_content=False, preload_content=False,
decode_content=False decode_content=False
) )
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr: except socket.error as sockerr:
raise ConnectionError(sockerr) raise ConnectionError(sockerr)
@ -326,6 +371,9 @@ class HTTPAdapter(BaseAdapter):
except MaxRetryError as e: except MaxRetryError as e:
raise ConnectionError(e) raise ConnectionError(e)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e: except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError): if isinstance(e, _SSLError):
raise SSLError(e) raise SSLError(e)

43
libs/requests/auth.py

@ -16,9 +16,9 @@ import logging
from base64 import b64encode from base64 import b64encode
from .compat import urlparse, str from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header from .utils import parse_dict_header
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
@ -64,6 +64,7 @@ class HTTPDigestAuth(AuthBase):
self.last_nonce = '' self.last_nonce = ''
self.nonce_count = 0 self.nonce_count = 0
self.chal = {} self.chal = {}
self.pos = None
def build_digest_header(self, method, url): def build_digest_header(self, method, url):
@ -78,7 +79,7 @@ class HTTPDigestAuth(AuthBase):
else: else:
_algorithm = algorithm.upper() _algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level # lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5': if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x): def md5_utf8(x):
if isinstance(x, str): if isinstance(x, str):
x = x.encode('utf-8') x = x.encode('utf-8')
@ -90,7 +91,7 @@ class HTTPDigestAuth(AuthBase):
x = x.encode('utf-8') x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest() return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8 hash_utf8 = sha_utf8
# XXX MD5-sess
KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None: if hash_utf8 is None:
@ -106,12 +107,13 @@ class HTTPDigestAuth(AuthBase):
A1 = '%s:%s:%s' % (self.username, realm, self.password) A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path) A2 = '%s:%s' % (method, path)
if qop == 'auth': HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce: if nonce == self.last_nonce:
self.nonce_count += 1 self.nonce_count += 1
else: else:
self.nonce_count = 1 self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8') s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8') s += nonce.encode('utf-8')
@ -119,10 +121,14 @@ class HTTPDigestAuth(AuthBase):
s += os.urandom(8) s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16]) cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2)) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
respdig = KD(hash_utf8(A1), noncebit) if _algorithm == 'MD5-SESS':
elif qop is None: HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2)))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else: else:
# XXX handle auth-int. # XXX handle auth-int.
return None return None
@ -139,13 +145,17 @@ class HTTPDigestAuth(AuthBase):
if entdig: if entdig:
base += ', digest="%s"' % entdig base += ', digest="%s"' % entdig
if qop: if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base) return 'Digest %s' % (base)
def handle_401(self, r, **kwargs): def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed.""" """Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1) num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '') s_auth = r.headers.get('www-authenticate', '')
@ -159,10 +169,15 @@ class HTTPDigestAuth(AuthBase):
# to allow our new request to reuse the same one. # to allow our new request to reuse the same one.
r.content r.content
r.raw.release_conn() r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url) prep.headers['Authorization'] = self.build_digest_header(
_r = r.connection.send(r.request, **kwargs) prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r) _r.history.append(r)
_r.request = prep
return _r return _r
@ -173,5 +188,9 @@ class HTTPDigestAuth(AuthBase):
# If we have a saved nonce, skip the 401 # If we have a saved nonce, skip the 401
if self.last_nonce: if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url) r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401) r.register_hook('response', self.handle_401)
return r return r

8212
libs/requests/cacert.pem

File diff suppressed because it is too large

6
libs/requests/compat.py

@ -83,13 +83,14 @@ except ImportError:
# --------- # ---------
if is_py2: if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list from urllib2 import parse_http_list
import cookielib import cookielib
from Cookie import Morsel from Cookie import Morsel
from StringIO import StringIO from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str builtin_str = str
bytes = str bytes = str
@ -100,11 +101,12 @@ if is_py2:
elif is_py3: elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib from http import cookiejar as cookielib
from http.cookies import Morsel from http.cookies import Morsel
from io import StringIO from io import StringIO
from collections import OrderedDict from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str builtin_str = str
str = str str = str

70
libs/requests/cookies.py

@ -6,8 +6,9 @@ Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports. requests.utils imports from here, so be careful with imports.
""" """
import time
import collections import collections
from .compat import cookielib, urlparse, Morsel from .compat import cookielib, urlparse, urlunparse, Morsel
try: try:
import threading import threading
@ -44,7 +45,18 @@ class MockRequest(object):
return self.get_host() return self.get_host()
def get_full_url(self): def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self): def is_unverifiable(self):
return True return True
@ -73,6 +85,10 @@ class MockRequest(object):
def origin_req_host(self): def origin_req_host(self):
return self.get_origin_req_host() return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object): class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
@ -102,6 +118,9 @@ def extract_cookies_to_jar(jar, request, response):
:param request: our own requests.Request object :param request: our own requests.Request object
:param response: urllib3.HTTPResponse object :param response: urllib3.HTTPResponse object
""" """
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object, # the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request) req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock: # pull out the HTTPMessage with the headers and put it in the mock:
@ -258,6 +277,11 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name().""" """Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
remove_cookie_by_name(self, name) remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other): def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like""" """Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar): if isinstance(other, cookielib.CookieJar):
@ -354,19 +378,23 @@ def create_cookie(name, value, **kwargs):
def morsel_to_cookie(morsel): def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair.""" """Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel["max-age"]:
expires = time.time() + morsel["max-age"]
elif morsel['expires']:
expires = morsel['expires']
if type(expires) == type(""):
time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
expires = time.mktime(time.strptime(expires, time_template))
c = create_cookie( c = create_cookie(
name=morsel.key, name=morsel.key,
value=morsel.value, value=morsel.value,
version=morsel['version'] or 0, version=morsel['version'] or 0,
port=None, port=None,
port_specified=False,
domain=morsel['domain'], domain=morsel['domain'],
domain_specified=bool(morsel['domain']),
domain_initial_dot=morsel['domain'].startswith('.'),
path=morsel['path'], path=morsel['path'],
path_specified=bool(morsel['path']),
secure=bool(morsel['secure']), secure=bool(morsel['secure']),
expires=morsel['max-age'] or morsel['expires'], expires=expires,
discard=False, discard=False,
comment=morsel['comment'], comment=morsel['comment'],
comment_url=bool(morsel['comment']), comment_url=bool(morsel['comment']),
@ -375,15 +403,43 @@ def morsel_to_cookie(morsel):
return c return c
def cookiejar_from_dict(cookie_dict, cookiejar=None): def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary. """Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar. :param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
""" """
if cookiejar is None: if cookiejar is None:
cookiejar = RequestsCookieJar() cookiejar = RequestsCookieJar()
if cookie_dict is not None: if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict: for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar return cookiejar

10
libs/requests/exceptions.py

@ -9,7 +9,7 @@ This module contains the set of Requests' exceptions.
""" """
class RequestException(RuntimeError): class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your """There was an ambiguous exception that occurred while handling your
request.""" request."""
@ -27,6 +27,10 @@ class ConnectionError(RequestException):
"""A Connection error occurred.""" """A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError): class SSLError(ConnectionError):
"""An SSL error occurred.""" """An SSL error occurred."""
@ -53,3 +57,7 @@ class InvalidSchema(RequestException, ValueError):
class InvalidURL(RequestException, ValueError): class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """ """ The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""

191
libs/requests/models.py

@ -11,22 +11,25 @@ import collections
import logging import logging
import datetime import datetime
from io import BytesIO from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks from .hooks import default_hooks
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url from .packages.urllib3.util import parse_url
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError)
from .utils import ( from .utils import (
guess_filename, get_auth_from_url, requote_uri, guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links, stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len) iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import ( from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO, cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring) is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
CONTENT_CHUNK_SIZE = 10 * 1024 CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512 ITER_CHUNK_SIZE = 512
@ -88,12 +91,14 @@ class RequestEncodingMixin(object):
"""Build the body for a multipart/form-data request. """Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict. if parameters are supplied as a dict.
""" """
if (not files) or isinstance(data, str): if (not files):
return None raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = [] new_fields = []
fields = to_key_val_list(data or {}) fields = to_key_val_list(data or {})
@ -104,6 +109,10 @@ class RequestEncodingMixin(object):
val = [val] val = [val]
for v in val: for v in val:
if v is not None: if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append( new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field, (field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v)) v.encode('utf-8') if isinstance(v, str) else v))
@ -111,12 +120,15 @@ class RequestEncodingMixin(object):
for (k, v) in files: for (k, v) in files:
# support for explicit filename # support for explicit filename
ft = None ft = None
fh = None
if isinstance(v, (tuple, list)): if isinstance(v, (tuple, list)):
if len(v) == 2: if len(v) == 2:
fn, fp = v fn, fp = v
else: elif len(v) == 3:
fn, fp, ft = v fn, fp, ft = v
else: else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k fn = guess_filename(v) or k
fp = v fp = v
if isinstance(fp, str): if isinstance(fp, str):
@ -124,11 +136,10 @@ class RequestEncodingMixin(object):
if isinstance(fp, bytes): if isinstance(fp, bytes):
fp = BytesIO(fp) fp = BytesIO(fp)
if ft: rf = RequestField(name=k, data=fp.read(),
new_v = (fn, fp.read(), ft) filename=fn, headers=fh)
else: rf.make_multipart(content_type=ft)
new_v = (fn, fp.read()) new_fields.append(rf)
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields) body, content_type = encode_multipart_formdata(new_fields)
@ -139,6 +150,9 @@ class RequestHooksMixin(object):
def register_hook(self, event, hook): def register_hook(self, event, hook):
"""Properly register a hook.""" """Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable): if isinstance(hook, collections.Callable):
self.hooks[event].append(hook) self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'): elif hasattr(hook, '__iter__'):
@ -184,8 +198,8 @@ class Request(RequestHooksMixin):
url=None, url=None,
headers=None, headers=None,
files=None, files=None,
data=dict(), data=None,
params=dict(), params=None,
auth=None, auth=None,
cookies=None, cookies=None,
hooks=None): hooks=None):
@ -209,7 +223,6 @@ class Request(RequestHooksMixin):
self.params = params self.params = params
self.auth = auth self.auth = auth
self.cookies = cookies self.cookies = cookies
self.hooks = hooks
def __repr__(self): def __repr__(self):
return '<Request [%s]>' % (self.method) return '<Request [%s]>' % (self.method)
@ -217,19 +230,17 @@ class Request(RequestHooksMixin):
def prepare(self): def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest() p = PreparedRequest()
p.prepare(
p.prepare_method(self.method) method=self.method,
p.prepare_url(self.url, self.params) url=self.url,
p.prepare_headers(self.headers) headers=self.headers,
p.prepare_cookies(self.cookies) files=self.files,
p.prepare_body(self.data, self.files) data=self.data,
p.prepare_auth(self.auth, self.url) params=self.params,
# Note that prepare_auth must be last to enable authentication schemes auth=self.auth,
# such as OAuth to work on a fully prepared request. cookies=self.cookies,
hooks=self.hooks,
# This MUST go after prepare_auth. Authenticators could add a hook )
p.prepare_hooks(self.hooks)
return p return p
@ -259,14 +270,43 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.url = None self.url = None
#: dictionary of HTTP headers. #: dictionary of HTTP headers.
self.headers = None self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server. #: request body to send to the server.
self.body = None self.body = None
#: dictionary of callback hooks, for internal usage. #: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks() self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self): def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method) return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method): def prepare_method(self, method):
"""Prepares the given HTTP method.""" """Prepares the given HTTP method."""
self.method = method self.method = method
@ -284,11 +324,17 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
except UnicodeDecodeError: except UnicodeDecodeError:
pass pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths. # Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url) scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme: if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url) raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host: if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url) raise InvalidURL("Invalid URL %r: No host supplied" % url)
@ -337,8 +383,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""Prepares the given HTTP headers.""" """Prepares the given HTTP headers."""
if headers: if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items()) self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
else: else:
self.headers = CaseInsensitiveDict() self.headers = CaseInsensitiveDict()
@ -352,7 +397,6 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
body = None body = None
content_type = None content_type = None
length = None length = None
is_stream = False
is_stream = all([ is_stream = all([
hasattr(data, '__iter__'), hasattr(data, '__iter__'),
@ -363,8 +407,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
try: try:
length = super_len(data) length = super_len(data)
except (TypeError, AttributeError): except (TypeError, AttributeError, UnsupportedOperation):
length = False length = None
if is_stream: if is_stream:
body = data body = data
@ -372,13 +416,10 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
if files: if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.') raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length: if length is not None:
self.headers['Content-Length'] = str(length) self.headers['Content-Length'] = builtin_str(length)
else: else:
self.headers['Transfer-Encoding'] = 'chunked' self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else: else:
# Multi-part file uploads. # Multi-part file uploads.
if files: if files:
@ -402,12 +443,12 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
def prepare_content_length(self, body): def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'): if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2) body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell()) self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0) body.seek(0, 0)
elif body is not None: elif body is not None:
l = super_len(body) l = super_len(body)
if l: if l:
self.headers['Content-Length'] = str(l) self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'): elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0' self.headers['Content-Length'] = '0'
@ -437,12 +478,11 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""Prepares the given HTTP cookie data.""" """Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar): if isinstance(cookies, cookielib.CookieJar):
cookies = cookies self._cookies = cookies
else: else:
cookies = cookiejar_from_dict(cookies) self._cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers: cookie_header = get_cookie_header(self._cookies, self)
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None: if cookie_header is not None:
self.headers['Cookie'] = cookie_header self.headers['Cookie'] = cookie_header
@ -457,6 +497,19 @@ class Response(object):
server's response to an HTTP request. server's response to an HTTP request.
""" """
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self): def __init__(self):
super(Response, self).__init__() super(Response, self).__init__()
@ -496,6 +549,24 @@ class Response(object):
#: and the arrival of the response (as a timedelta) #: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0) self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
def __repr__(self): def __repr__(self):
return '<Response [%s]>' % (self.status_code) return '<Response [%s]>' % (self.status_code)
@ -537,11 +608,22 @@ class Response(object):
return iter_slices(self._content, chunk_size) return iter_slices(self._content, chunk_size)
def generate(): def generate():
while 1: try:
chunk = self.raw.read(chunk_size, decode_content=True) # Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size,
decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk: if not chunk:
break break
yield chunk yield chunk
self._content_consumed = True self._content_consumed = True
gen = generate() gen = generate()
@ -605,8 +687,8 @@ class Response(object):
def text(self): def text(self):
"""Content of the response, in unicode. """Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding If Response.encoding is None, encoding will be guessed using
will be guessed. ``charade``.
""" """
# Try charset from content-type # Try charset from content-type
@ -648,7 +730,7 @@ class Response(object):
encoding = guess_json_utf(self.content) encoding = guess_json_utf(self.content)
if encoding is not None: if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs) return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs) return json.loads(self.text, **kwargs)
@property @property
def links(self): def links(self):
@ -683,4 +765,9 @@ class Response(object):
raise HTTPError(http_error_msg, response=self) raise HTTPError(http_error_msg, response=self)
def close(self): def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn() return self.raw.release_conn()

34
libs/requests/packages/charade/__init__.py

@ -30,3 +30,37 @@ def detect(aBuf):
u.feed(aBuf) u.feed(aBuf)
u.close() u.close()
return u.result return u.result
def _description_of(path):
"""Return a string describing the probable encoding of a file."""
from charade.universaldetector import UniversalDetector
u = UniversalDetector()
for line in open(path, 'rb'):
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (path,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % path
def charade_cli():
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect.py somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
"""
from sys import argv
for path in argv[1:]:
print(_description_of(path))

7
libs/requests/packages/charade/__main__.py

@ -0,0 +1,7 @@
'''
support ';python -m charade <file1> [file2] ...' package execution syntax (2.7+)
'''
from charade import charade_cli
charade_cli()

2
libs/requests/packages/charade/jpcntx.py

@ -169,7 +169,7 @@ class JapaneseContextAnalysis:
def get_confidence(self): def get_confidence(self):
# This is just one way to calculate confidence. It works well for me. # This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD: if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel return float(self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else: else:
return DONT_KNOW return DONT_KNOW

2
libs/requests/packages/charade/latin1prober.py

@ -129,7 +129,7 @@ class Latin1Prober(CharSetProber):
if total < 0.01: if total < 0.01:
confidence = 0.0 confidence = 0.0
else: else:
confidence = ((self._mFreqCounter[3] / total) confidence = ((float(self._mFreqCounter[3]) / total)
- (self._mFreqCounter[1] * 20.0 / total)) - (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0: if confidence < 0.0:
confidence = 0.0 confidence = 0.0

12
libs/requests/packages/charade/universaldetector.py

@ -74,12 +74,10 @@ class UniversalDetector:
if aBuf[:3] == codecs.BOM: if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM # EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0} self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE: elif aBuf[:4] in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
# FF FE 00 00 UTF-32, little-endian BOM # FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM # 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0} self.result = {'encoding': "UTF-32", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00': elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412) # FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = { self.result = {
@ -92,12 +90,10 @@ class UniversalDetector:
'encoding': "X-ISO-10646-UCS-4-2143", 'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0 'confidence': 1.0
} }
elif aBuf[:2] == codecs.BOM_LE: elif aBuf[:2] == codecs.BOM_LE or aBuf[:2] == codecs.BOM_BE:
# FF FE UTF-16, little endian BOM # FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM # FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0} self.result = {'encoding': "UTF-16", 'confidence': 1.0}
self._mGotData = True self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0): if self.result['encoding'] and (self.result['confidence'] > 0.0):

2
libs/requests/packages/urllib3/__init__.py

@ -23,7 +23,7 @@ from . import exceptions
from .filepost import encode_multipart_formdata from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse from .response import HTTPResponse
from .util import make_headers, get_host from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings. # Set default logging handler to avoid "No handler found" warnings.

25
libs/requests/packages/urllib3/_collections.py

@ -5,7 +5,16 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping from collections import MutableMapping
from threading import Lock try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+ try: # Python 2.7+
from collections import OrderedDict from collections import OrderedDict
@ -40,18 +49,18 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func = dispose_func self.dispose_func = dispose_func
self._container = self.ContainerCls() self._container = self.ContainerCls()
self._lock = Lock() self.lock = RLock()
def __getitem__(self, key): def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line. # Re-insert the item, moving it to the end of the eviction line.
with self._lock: with self.lock:
item = self._container.pop(key) item = self._container.pop(key)
self._container[key] = item self._container[key] = item
return item return item
def __setitem__(self, key, value): def __setitem__(self, key, value):
evicted_value = _Null evicted_value = _Null
with self._lock: with self.lock:
# Possibly evict the existing value of 'key' # Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null) evicted_value = self._container.get(key, _Null)
self._container[key] = value self._container[key] = value
@ -65,21 +74,21 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func(evicted_value) self.dispose_func(evicted_value)
def __delitem__(self, key): def __delitem__(self, key):
with self._lock: with self.lock:
value = self._container.pop(key) value = self._container.pop(key)
if self.dispose_func: if self.dispose_func:
self.dispose_func(value) self.dispose_func(value)
def __len__(self): def __len__(self):
with self._lock: with self.lock:
return len(self._container) return len(self._container)
def __iter__(self): def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.') raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self): def clear(self):
with self._lock: with self.lock:
# Copy pointers to all values, then wipe the mapping # Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-| # under Python 2, this copies the list of values twice :-|
values = list(self._container.values()) values = list(self._container.values())
@ -90,5 +99,5 @@ class RecentlyUsedContainer(MutableMapping):
self.dispose_func(value) self.dispose_func(value)
def keys(self): def keys(self):
with self._lock: with self.lock:
return self._container.keys() return self._container.keys()

107
libs/requests/packages/urllib3/connection.py

@ -0,0 +1,107 @@
# urllib3/connection.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import socket
from socket import timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
ssl = None
HTTPSConnection = DummyConnection
class BaseSSLError(BaseException):
pass
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import (
ConnectTimeoutError,
)
from .packages.ssl_match_hostname import match_hostname
from .util import (
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
try:
sock = socket.create_connection(
address=(self.host, self.port),
timeout=self.timeout,
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
if self._tunnel_host:
self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
if ssl:
HTTPSConnection = VerifiedHTTPSConnection

348
libs/requests/packages/urllib3/connectionpool.py

@ -4,57 +4,45 @@
# This module is part of urllib3 and is released under # This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno import errno
import logging
from socket import error as SocketError, timeout as SocketTimeout from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version, assert_fingerprint import socket
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3 try: # Python 3
from queue import LifoQueue, Empty, Full from queue import LifoQueue, Empty, Full
except ImportError: except ImportError:
from Queue import LifoQueue, Empty, Full from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import ( from .exceptions import (
ClosedPoolError, ClosedPoolError,
ConnectTimeoutError,
EmptyPoolError, EmptyPoolError,
HostChangedError, HostChangedError,
MaxRetryError, MaxRetryError,
SSLError, SSLError,
TimeoutError, TimeoutError,
ReadTimeoutError,
ProxyError,
) )
from .packages.ssl_match_hostname import CertificateError
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six from .packages import six
from .connection import (
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util import (
assert_fingerprint,
get_host,
is_connection_dropped,
Timeout,
)
xrange = six.moves.xrange xrange = six.moves.xrange
@ -64,56 +52,11 @@ log = logging.getLogger(__name__)
_Default = object() _Default = object()
port_by_scheme = { port_by_scheme = {
'http': HTTP_PORT, 'http': 80,
'https': HTTPS_PORT, 'https': 443,
} }
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
else:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host)
## Pool objects ## Pool objects
class ConnectionPool(object): class ConnectionPool(object):
@ -126,6 +69,9 @@ class ConnectionPool(object):
QueueCls = LifoQueue QueueCls = LifoQueue
def __init__(self, host, port=None): def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses
host = host.strip('[]')
self.host = host self.host = host
self.port = port self.port = port
@ -133,6 +79,8 @@ class ConnectionPool(object):
return '%s(host=%r, port=%r)' % (type(self).__name__, return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port) self.host, self.port)
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods): class HTTPConnectionPool(ConnectionPool, RequestMethods):
""" """
@ -151,9 +99,15 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
as a valid HTTP/1.0 or 1.1 status line, passed into as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`. :class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout: :param timeout:
Socket timeout for each individual connection, can be a float. None Socket timeout in seconds for each individual connection. This can
disables timeout. be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize: :param maxsize:
Number of connections to save that can be reused. More than 1 is useful Number of connections to save that can be reused. More than 1 is useful
@ -171,20 +125,40 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
:param headers: :param headers:
Headers to include with all requests, unless other headers are given Headers to include with all requests, unless other headers are given
explicitly. explicitly.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
""" """
scheme = 'http' scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1, def __init__(self, host, port=None, strict=False,
block=False, headers=None): timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None):
ConnectionPool.__init__(self, host, port) ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers) RequestMethods.__init__(self, headers)
self.strict = strict self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
self.timeout = timeout self.timeout = timeout
self.pool = self.QueueCls(maxsize) self.pool = self.QueueCls(maxsize)
self.block = block self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly # Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize): for _ in xrange(maxsize):
self.pool.put(None) self.pool.put(None)
@ -200,9 +174,14 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
self.num_connections += 1 self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" % log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host)) (self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port, extra_params = {}
strict=self.strict) if not six.PY3: # Python 2
extra_params['strict'] = self.strict
return self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
**extra_params)
def _get_conn(self, timeout=None): def _get_conn(self, timeout=None):
""" """
@ -263,31 +242,100 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
% self.host) % self.host)
# Connection never got put back into the pool, close it. # Connection never got put back into the pool, close it.
if conn:
conn.close() conn.close()
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _make_request(self, conn, method, url, timeout=_Default, def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw): **httplib_request_kw):
""" """
Perform a request on a given httplib connection object taken from our Perform a request on a given httplib connection object taken from our
pool. pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
""" """
self.num_requests += 1 self.num_requests += 1
if timeout is _Default: timeout_obj = self._get_timeout(timeout)
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+ try:
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw) conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
# Set timeout raise ConnectTimeoutError(
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr. self, "Connection to %s timed out. (connect timeout=%s)" %
if sock: (self.host, timeout_obj.connect_timeout))
sock.settimeout(timeout)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if hasattr(conn, 'sock'):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7+, use buffering of HTTP responses try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True) httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse() httplib_response = conn.getresponse()
except SocketTimeout:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
except BaseSSLError as e:
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.")
raise
except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url,
"Read timed out. (read timeout=%s)" % read_timeout)
raise
# AppEngine doesn't have a version attr. # AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
@ -367,7 +415,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
:param redirect: :param redirect:
If True, automatically handle redirects (status codes 301, 302, If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry. 303, 307, 308). Each redirect counts as a retry.
:param assert_same_host: :param assert_same_host:
If ``True``, will make sure that the host of the pool requests is If ``True``, will make sure that the host of the pool requests is
@ -375,7 +423,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
use the pool on an HTTP proxy and request foreign hosts. use the pool on an HTTP proxy and request foreign hosts.
:param timeout: :param timeout:
If specified, overrides the default timeout for this one request. If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout: :param pool_timeout:
If set and the pool is set to block=True, then this method will If set and the pool is set to block=True, then this method will
@ -402,22 +452,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if retries < 0: if retries < 0:
raise MaxRetryError(self, url) raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None: if release_conn is None:
release_conn = response_kw.get('preload_content', True) release_conn = response_kw.get('preload_content', True)
# Check host # Check host
if assert_same_host and not self.is_same_host(url): if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1) raise HostChangedError(self, url, retries - 1)
conn = None conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
try: try:
# Request a connection from the queue # Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout) conn = self._get_conn(timeout=pool_timeout)
@ -444,27 +494,31 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# ``response.release_conn()`` is called (implicitly by # ``response.release_conn()`` is called (implicitly by
# ``response.read()``) # ``response.read()``)
except Empty as e: except Empty:
# Timed out by queue # Timed out by queue
raise TimeoutError(self, url, raise EmptyPoolError(self, "No pool connections are available.")
"Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, url,
"Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e: except BaseSSLError as e:
# SSL certificate error
raise SSLError(e) raise SSLError(e)
except CertificateError as e: except CertificateError as e:
# Name mismatch # Name mismatch
raise SSLError(e) raise SSLError(e)
except TimeoutError as e:
# Connection broken, discard.
conn = None
# Save the error off for retry logic.
err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e: except (HTTPException, SocketError) as e:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
# Connection broken, discard. It will be replaced next _get_conn(). # Connection broken, discard. It will be replaced next _get_conn().
conn = None conn = None
# This is necessary so we can access e below # This is necessary so we can access e below
@ -513,6 +567,7 @@ class HTTPSConnectionPool(HTTPConnectionPool):
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections. ``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into ``ssl_version`` are only used if :mod:`ssl` is available and are fed into
@ -521,17 +576,18 @@ class HTTPSConnectionPool(HTTPConnectionPool):
""" """
scheme = 'https' scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None, def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1, strict=False, timeout=None, maxsize=1,
block=False, headers=None, block=False, headers=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None, key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None, ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None): assert_hostname=None, assert_fingerprint=None):
HTTPConnectionPool.__init__(self, host, port, HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
strict, timeout, maxsize, block, headers, _proxy, _proxy_headers)
block, headers)
self.key_file = key_file self.key_file = key_file
self.cert_file = cert_file self.cert_file = cert_file
self.cert_reqs = cert_reqs self.cert_reqs = cert_reqs
@ -540,6 +596,34 @@ class HTTPSConnectionPool(HTTPConnectionPool):
self.assert_hostname = assert_hostname self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn
def _new_conn(self): def _new_conn(self):
""" """
Return a fresh :class:`httplib.HTTPSConnection`. Return a fresh :class:`httplib.HTTPSConnection`.
@ -548,26 +632,26 @@ class HTTPSConnectionPool(HTTPConnectionPool):
log.info("Starting new HTTPS connection (%d): %s" log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host)) % (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
if not HTTPSConnection or HTTPSConnection is object: # Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL " raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.") "module is not available.")
return HTTPSConnection(host=self.host, actual_host = self.host
port=self.port, actual_port = self.port
strict=self.strict) if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
connection = VerifiedHTTPSConnection(host=self.host, extra_params = {}
port=self.port, if not six.PY3: # Python 2
strict=self.strict) extra_params['strict'] = self.strict
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
connection.ssl_version = self.ssl_version conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
**extra_params)
return connection return self._prepare_conn(conn)
def connection_from_url(url, **kw): def connection_from_url(url, **kw):

2
libs/requests/packages/urllib3/contrib/ntlmpool.py

@ -33,7 +33,7 @@ class NTLMConnectionPool(HTTPSConnectionPool):
def __init__(self, user, pw, authurl, *args, **kwargs): def __init__(self, user, pw, authurl, *args, **kwargs):
""" """
authurl is a random URL on the server that is protected by NTLM. authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format. user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user. pw is the password for the user.
""" """
super(NTLMConnectionPool, self).__init__(*args, **kwargs) super(NTLMConnectionPool, self).__init__(*args, **kwargs)

195
libs/requests/packages/urllib3/contrib/pyopenssl.py

@ -20,15 +20,16 @@ Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed. when the required modules are installed.
''' '''
from ndg.httpsclient.ssl_peer_verification import (ServerSSLCertVerification, from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
SUBJ_ALT_NAME_SUPPORT)
from ndg.httpsclient.subj_alt_name import SubjectAltName from ndg.httpsclient.subj_alt_name import SubjectAltName
import OpenSSL.SSL import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder from pyasn1.codec.der import decoder as der_decoder
from socket import _fileobject from socket import _fileobject
import ssl import ssl
import select
from cStringIO import StringIO
from .. import connectionpool from .. import connection
from .. import util from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3'] __all__ = ['inject_into_urllib3', 'extract_from_urllib3']
@ -51,20 +52,20 @@ _openssl_verify = {
orig_util_HAS_SNI = util.HAS_SNI orig_util_HAS_SNI = util.HAS_SNI
orig_connectionpool_ssl_wrap_socket = connectionpool.ssl_wrap_socket orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3(): def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connectionpool.ssl_wrap_socket = ssl_wrap_socket connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI util.HAS_SNI = HAS_SNI
def extract_from_urllib3(): def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.' 'Undo monkey-patching by :func:`inject_into_urllib3`.'
connectionpool.ssl_wrap_socket = orig_connectionpool_ssl_wrap_socket connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI util.HAS_SNI = orig_util_HAS_SNI
@ -99,6 +100,172 @@ def get_subj_alt_name(peer_cert):
return dns_name return dns_name
class fileobject(_fileobject):
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object): class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.''' '''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
@ -106,8 +273,11 @@ class WrappedSocket(object):
self.connection = connection self.connection = connection
self.socket = socket self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1): def makefile(self, mode, bufsize=-1):
return _fileobject(self.connection, mode, bufsize) return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout): def settimeout(self, timeout):
return self.socket.settimeout(timeout) return self.socket.settimeout(timeout)
@ -115,10 +285,14 @@ class WrappedSocket(object):
def sendall(self, data): def sendall(self, data):
return self.connection.sendall(data) return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False): def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate() x509 = self.connection.get_peer_certificate()
if not x509: if not x509:
raise ssl.SSLError('') return x509
if binary_form: if binary_form:
return OpenSSL.crypto.dump_certificate( return OpenSSL.crypto.dump_certificate(
@ -159,9 +333,14 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
cnx = OpenSSL.SSL.Connection(ctx, sock) cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname) cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state() cnx.set_connect_state()
while True:
try: try:
cnx.do_handshake() cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e: except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e) raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock) return WrappedSocket(cnx, sock)

30
libs/requests/packages/urllib3/exceptions.py

@ -39,6 +39,11 @@ class SSLError(HTTPError):
pass pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError): class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails." "Raised when automatic decoding based on Content-Type fails."
pass pass
@ -70,8 +75,29 @@ class HostChangedError(RequestError):
self.retries = retries self.retries = retries
class TimeoutError(RequestError): class TimeoutStateError(HTTPError):
"Raised when a socket timeout occurs." """ Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass pass

177
libs/requests/packages/urllib3/fields.py

@ -0,0 +1,177 @@
# urllib3/fields.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetimes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from parameter
of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type)
tuple where the MIME type is optional. For example: ::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as
`k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location

59
libs/requests/packages/urllib3/filepost.py

@ -1,5 +1,5 @@
# urllib3/filepost.py # urllib3/filepost.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# #
# This module is part of urllib3 and is released under # This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -12,6 +12,7 @@ from io import BytesIO
from .packages import six from .packages import six
from .packages.six import b from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3] writer = codecs.lookup('utf-8')[3]
@ -23,15 +24,38 @@ def choose_boundary():
return uuid4().hex return uuid4().hex
def get_content_type(filename): def iter_field_objects(fields):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream' """
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields): def iter_fields(fields):
""" """
Iterate over fields. Iterate over fields.
.. deprecated ::
The addition of `~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
`~urllib3.fields.RequestField` objects, instead.
Supports list of (k, v) tuples and dicts. Supports list of (k, v) tuples and dicts.
""" """
if isinstance(fields, dict): if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in six.iteritems(fields))
@ -44,15 +68,7 @@ def encode_multipart_formdata(fields, boundary=None):
Encode a dictionary of ``fields`` using the multipart/form-data MIME format. Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields: :param fields:
Dictionary of fields or list of (key, value) or (key, value, MIME type) Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
field tuples. The key is treated as the field name, and the value as
the body of the form-data bytes. If the value is a tuple of two
elements, then the first element is treated as the filename of the
form-data section and a suitable MIME type is guessed based on the
filename. If the value is a tuple of three elements, then the third
element is treated as an explicit MIME type of the form-data section.
Field names and filenames must be unicode.
:param boundary: :param boundary:
If not specified, then a random boundary will be generated using If not specified, then a random boundary will be generated using
@ -62,24 +78,11 @@ def encode_multipart_formdata(fields, boundary=None):
if boundary is None: if boundary is None:
boundary = choose_boundary() boundary = choose_boundary()
for fieldname, value in iter_fields(fields): for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary))) body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple): writer(body).write(field.render_headers())
if len(value) == 3: data = field.data
filename, data, content_type = value
else:
filename, data = value
content_type = get_content_type(filename)
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(content_type,)))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'\r\n')
if isinstance(data, int): if isinstance(data, int):
data = str(data) # Backwards compatibility data = str(data) # Backwards compatibility

74
libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py

@ -1,61 +1,13 @@
"""The match_hostname() function from Python 3.2, essential when using SSL.""" try:
# Python 3.2+
import re from ssl import CertificateError, match_hostname
except ImportError:
__version__ = '3.2.2' try:
# Backport of the function from a pypi module
class CertificateError(ValueError): from backports.ssl_match_hostname import CertificateError, match_hostname
pass except ImportError:
# Our vendored copy
def _dnsname_to_pat(dn): from _implementation import CertificateError, match_hostname
pats = []
for frag in dn.split(r'.'): # Not needed, but documenting what we provide.
if frag == '*': __all__ = ('CertificateError', 'match_hostname')
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")

105
libs/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py

@ -0,0 +1,105 @@
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")

106
libs/requests/packages/urllib3/poolmanager.py

@ -6,9 +6,14 @@
import logging import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import connection_from_url, port_by_scheme from .connectionpool import port_by_scheme
from .request import RequestMethods from .request import RequestMethods
from .util import parse_url from .util import parse_url
@ -55,6 +60,8 @@ class PoolManager(RequestMethods):
""" """
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw): def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers) RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw self.connection_pool_kw = connection_pool_kw
@ -94,13 +101,16 @@ class PoolManager(RequestMethods):
If ``port`` isn't given, it will be derived from the ``scheme`` using If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. ``urllib3.connectionpool.port_by_scheme``.
""" """
scheme = scheme or 'http' scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80) port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port) pool_key = (scheme, host, port)
# If the scheme, host, or port doesn't match existing open connections, with self.pools.lock:
# open a new ConnectionPool. # If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key) pool = self.pools.get(pool_key)
if pool: if pool:
return pool return pool
@ -139,12 +149,19 @@ class PoolManager(RequestMethods):
if 'headers' not in kw: if 'headers' not in kw:
kw['headers'] = self.headers kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw) response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location() redirect_location = redirect and response.get_redirect_location()
if not redirect_location: if not redirect_location:
return response return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303: if response.status == 303:
method = 'GET' method = 'GET'
@ -154,15 +171,59 @@ class PoolManager(RequestMethods):
return self.urlopen(method, redirect_location, **kw) return self.urlopen(method, redirect_location, **kw)
class ProxyManager(RequestMethods): class ProxyManager(PoolManager):
""" """
Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method Behaves just like :class:`PoolManager`, but sends all requests through
will make requests to any url through the defined proxy. The ProxyManager the defined proxy, using the CONNECT method for HTTPS URLs.
class will automatically set the 'Host' header if it is not provided.
:param poxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
""" """
def __init__(self, proxy_pool): def __init__(self, proxy_url, num_pools=10, headers=None,
self.proxy_pool = proxy_pool proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None): def _set_proxy_headers(self, url, headers=None):
""" """
@ -171,22 +232,27 @@ class ProxyManager(RequestMethods):
""" """
headers_ = {'Accept': '*/*'} headers_ = {'Accept': '*/*'}
host = parse_url(url).host netloc = parse_url(url).netloc
if host: if netloc:
headers_['Host'] = host headers_['Host'] = netloc
if headers: if headers:
headers_.update(headers) headers_.update(headers)
return headers_ return headers_
def urlopen(self, method, url, **kw): def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute." "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
kw['assert_same_host'] = False u = parse_url(url)
kw['headers'] = self._set_proxy_headers(url, headers=kw.get('headers'))
return self.proxy_pool.urlopen(method, url, **kw) if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **pool_kw): def proxy_from_url(url, **kw):
proxy_pool = connection_from_url(url, **pool_kw) return ProxyManager(proxy_url=url, **kw)
return ProxyManager(proxy_pool)

2
libs/requests/packages/urllib3/request.py

@ -30,7 +30,7 @@ class RequestMethods(object):
in the URL (such as GET, HEAD, DELETE). in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are :meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-orm-urlencoded encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH). (such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the :meth:`.request` is for making any kind of request, it will look up the

85
libs/requests/packages/urllib3/response.py

@ -1,5 +1,5 @@
# urllib3/response.py # urllib3/response.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
# #
# This module is part of urllib3 and is released under # This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -7,9 +7,11 @@
import logging import logging
import zlib import zlib
import io
from .exceptions import DecodeError from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -48,7 +50,7 @@ def _get_decoder(mode):
return DeflateDecoder() return DeflateDecoder()
class HTTPResponse(object): class HTTPResponse(io.IOBase):
""" """
HTTP Response container. HTTP Response container.
@ -72,6 +74,7 @@ class HTTPResponse(object):
""" """
CONTENT_DECODERS = ['gzip', 'deflate'] CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None, def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True, strict=0, preload_content=True, decode_content=True,
@ -87,6 +90,7 @@ class HTTPResponse(object):
self._body = body if body and isinstance(body, basestring) else None self._body = body if body and isinstance(body, basestring) else None
self._fp = None self._fp = None
self._original_response = original_response self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool self._pool = pool
self._connection = connection self._connection = connection
@ -105,7 +109,7 @@ class HTTPResponse(object):
code and valid location. ``None`` if redirect status and no code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code. location. ``False`` if not a redirect status code.
""" """
if self.status in [301, 302, 303, 307]: if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location') return self.headers.get('location')
return False return False
@ -126,6 +130,14 @@ class HTTPResponse(object):
if self._fp: if self._fp:
return self.read(cache_content=True) return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False): def read(self, amt=None, decode_content=None, cache_content=False):
""" """
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
@ -180,14 +192,18 @@ class HTTPResponse(object):
self._fp.close() self._fp.close()
flush_decoder = True flush_decoder = True
self._fp_bytes_read += len(data)
try: try:
if decode_content and self._decoder: if decode_content and self._decoder:
data = self._decoder.decompress(data) data = self._decoder.decompress(data)
except (IOError, zlib.error): except (IOError, zlib.error) as e:
raise DecodeError("Received response with content-encoding: %s, but " raise DecodeError(
"failed to decode it." % content_encoding) "Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and self._decoder: if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type()) buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush() data += buf + self._decoder.flush()
@ -200,6 +216,29 @@ class HTTPResponse(object):
if self._original_response and self._original_response.isclosed(): if self._original_response and self._original_response.isclosed():
self.release_conn() self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod @classmethod
def from_httplib(ResponseCls, r, **response_kw): def from_httplib(ResponseCls, r, **response_kw):
""" """
@ -239,3 +278,35 @@ class HTTPResponse(object):
def getheader(self, name, default=None): def getheader(self, name, default=None):
return self.headers.get(name, default) return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True

283
libs/requests/packages/urllib3/util.py

@ -6,10 +6,11 @@
from base64 import b64encode from base64 import b64encode
from binascii import hexlify, unhexlify
from collections import namedtuple from collections import namedtuple
from socket import error as SocketError
from hashlib import md5, sha1 from hashlib import md5, sha1
from binascii import hexlify, unhexlify from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
import time
try: try:
from select import poll, POLLIN from select import poll, POLLIN
@ -31,9 +32,238 @@ try: # Test for SSL features
except ImportError: except ImportError:
pass pass
from .packages import six from .packages import six
from .exceptions import LocationParseError, SSLError from .exceptions import LocationParseError, SSLError, TimeoutStateError
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
@ -61,6 +291,13 @@ class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query',
return uri return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims): def split_first(s, delims):
""" """
@ -114,7 +351,7 @@ def parse_url(url):
# While this code has overlap with stdlib's urlparse, it is much # While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying. # simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal # Additionally, this implementations does silly things to be optimal
# on CPython. # on CPython.
scheme = None scheme = None
@ -139,11 +376,13 @@ def parse_url(url):
# Auth # Auth
if '@' in url: if '@' in url:
auth, url = url.split('@', 1) # Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6 # IPv6
if url and url[0] == '[': if url and url[0] == '[':
host, url = url[1:].split(']', 1) host, url = url.split(']', 1)
host += ']'
# Port # Port
if ':' in url: if ':' in url:
@ -152,10 +391,14 @@ def parse_url(url):
if not host: if not host:
host = _host host = _host
if port:
# If given, ports must be integers.
if not port.isdigit(): if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url) raise LocationParseError("Failed to parse: %s" % url)
port = int(port) port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url: elif not host and url:
host = url host = url
@ -183,7 +426,7 @@ def get_host(url):
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None): basic_auth=None, proxy_basic_auth=None):
""" """
Shortcuts for generating request headers. Shortcuts for generating request headers.
@ -204,6 +447,10 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
Colon-separated username:password string for 'authorization: basic ...' Colon-separated username:password string for 'authorization: basic ...'
auth header. auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: :: Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0") >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
@ -231,6 +478,10 @@ def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
headers['authorization'] = 'Basic ' + \ headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8') b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers return headers
@ -341,6 +592,20 @@ def assert_fingerprint(cert, fingerprint):
.format(hexlify(fingerprint_bytes), .format(hexlify(fingerprint_bytes),
hexlify(cert_digest))) hexlify(cert_digest)))
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
if hasattr(obj, 'fp'):
# Object is a container for another file-like object that gets released
# on exhaustion (e.g. HTTPResponse)
return obj.fp is None
return obj.closed
if SSLContext is not None: # Python 3.2+ if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,

155
libs/requests/sessions.py

@ -12,8 +12,9 @@ import os
from collections import Mapping from collections import Mapping
from datetime import datetime from datetime import datetime
from .compat import cookielib, OrderedDict, urljoin, urlparse from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str
from .cookies import cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest from .models import Request, PreparedRequest
from .hooks import default_hooks, dispatch_hook from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers from .utils import to_key_val_list, default_headers
@ -65,21 +66,32 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
return merged_setting return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object): class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None, def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None): verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses.""" """Receives a Response. Returns a generator of Responses."""
i = 0 i = 0
prepared_request = PreparedRequest()
prepared_request.body = req.body
prepared_request.headers = req.headers.copy()
prepared_request.hooks = req.hooks
prepared_request.method = req.method
prepared_request.url = req.url
# ((resp.status_code is codes.see_other)) # ((resp.status_code is codes.see_other))
while (('location' in resp.headers and resp.status_code in REDIRECT_STATI)): while ('location' in resp.headers and resp.status_code in REDIRECT_STATI):
prepared_request = req.copy()
resp.content # Consume socket so it can be released resp.content # Consume socket so it can be released
@ -90,13 +102,17 @@ class SessionRedirectMixin(object):
resp.close() resp.close()
url = resp.headers['location'] url = resp.headers['location']
method = prepared_request.method method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4) # Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'): if url.startswith('//'):
parsed_rurl = urlparse(resp.url) parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url) url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate non-RFC2616-compliant 'location' headers # Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url. # Compliant with RFC3986, we percent encode the url.
@ -109,12 +125,17 @@ class SessionRedirectMixin(object):
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if (resp.status_code == codes.see_other and if (resp.status_code == codes.see_other and
prepared_request.method != 'HEAD'): method != 'HEAD'):
method = 'GET' method = 'GET'
# Do what the browsers do, despite standards... # Do what the browsers do, despite standards...
if (resp.status_code in (codes.moved, codes.found) and # First, turn 302s into GETs.
prepared_request.method not in ('GET', 'HEAD')): if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET' method = 'GET'
prepared_request.method = method prepared_request.method = method
@ -132,7 +153,9 @@ class SessionRedirectMixin(object):
except KeyError: except KeyError:
pass pass
prepared_request.prepare_cookies(self.cookies) extract_cookies_to_jar(prepared_request._cookies,
prepared_request, resp.raw)
prepared_request.prepare_cookies(prepared_request._cookies)
resp = self.send( resp = self.send(
prepared_request, prepared_request,
@ -153,7 +176,7 @@ class SessionRedirectMixin(object):
class Session(SessionRedirectMixin): class Session(SessionRedirectMixin):
"""A Requests session. """A Requests session.
Provides cookie persistience, connection-pooling, and configuration. Provides cookie persistence, connection-pooling, and configuration.
Basic Usage:: Basic Usage::
@ -208,7 +231,10 @@ class Session(SessionRedirectMixin):
#: Should we trust the environment? #: Should we trust the environment?
self.trust_env = True self.trust_env = True
# Set up a CookieJar to be used by default #: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({}) self.cookies = cookiejar_from_dict({})
# Default connection adapters. # Default connection adapters.
@ -222,6 +248,45 @@ class Session(SessionRedirectMixin):
def __exit__(self, *args): def __exit__(self, *args):
self.close() self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url, def request(self, method, url,
params=None, params=None,
data=None, data=None,
@ -266,18 +331,23 @@ class Session(SessionRedirectMixin):
If Tuple, ('cert', 'key') pair. If Tuple, ('cert', 'key') pair.
""" """
cookies = cookies or {} method = builtin_str(method)
proxies = proxies or {}
# Bootstrap CookieJar. # Create the Request.
if not isinstance(cookies, cookielib.CookieJar): req = Request(
cookies = cookiejar_from_dict(cookies) method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
# Merge with session cookies proxies = proxies or {}
merged_cookies = RequestsCookieJar()
merged_cookies.update(self.cookies)
merged_cookies.update(cookies)
cookies = merged_cookies
# Gather clues from the surrounding environment. # Gather clues from the surrounding environment.
if self.trust_env: if self.trust_env:
@ -286,10 +356,6 @@ class Session(SessionRedirectMixin):
for (k, v) in env_proxies.items(): for (k, v) in env_proxies.items():
proxies.setdefault(k, v) proxies.setdefault(k, v)
# Set environment's basic authentication.
if not auth:
auth = get_netrc_auth(url)
# Look for configuration. # Look for configuration.
if not verify and verify is not False: if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE') verify = os.environ.get('REQUESTS_CA_BUNDLE')
@ -299,30 +365,11 @@ class Session(SessionRedirectMixin):
verify = os.environ.get('CURL_CA_BUNDLE') verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs. # Merge all the kwargs.
params = merge_setting(params, self.params)
headers = merge_setting(headers, self.headers, dict_class=CaseInsensitiveDict)
auth = merge_setting(auth, self.auth)
proxies = merge_setting(proxies, self.proxies) proxies = merge_setting(proxies, self.proxies)
hooks = merge_setting(hooks, self.hooks)
stream = merge_setting(stream, self.stream) stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify) verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert) cert = merge_setting(cert, self.cert)
# Create the Request.
req = Request()
req.method = method.upper()
req.url = url
req.headers = headers
req.files = files
req.data = data
req.params = params
req.auth = auth
req.cookies = cookies
req.hooks = hooks
# Prepare the Request.
prep = req.prepare()
# Send the request. # Send the request.
send_kwargs = { send_kwargs = {
'stream': stream, 'stream': stream,
@ -416,7 +463,7 @@ class Session(SessionRedirectMixin):
# It's possible that users might accidentally send a Request object. # It's possible that users might accidentally send a Request object.
# Guard against that specific failure case. # Guard against that specific failure case.
if getattr(request, 'prepare', None): if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.') raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of # Set up variables needed for resolve_redirects and dispatching of
@ -443,6 +490,10 @@ class Session(SessionRedirectMixin):
r = dispatch_hook('response', hooks, r, **kwargs) r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies # Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw) extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator. # Redirect resolving generator.
@ -467,7 +518,7 @@ class Session(SessionRedirectMixin):
"""Returns the appropriate connnection adapter for the given URL.""" """Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items(): for (prefix, adapter) in self.adapters.items():
if url.startswith(prefix): if url.lower().startswith(prefix):
return adapter return adapter
# Nothing matches :-/ # Nothing matches :-/
@ -475,7 +526,7 @@ class Session(SessionRedirectMixin):
def close(self): def close(self):
"""Closes all adapters and as such the session""" """Closes all adapters and as such the session"""
for _, v in self.adapters.items(): for v in self.adapters.values():
v.close() v.close()
def mount(self, prefix, adapter): def mount(self, prefix, adapter):

3
libs/requests/status_codes.py

@ -18,7 +18,8 @@ _codes = {
205: ('reset_content', 'reset'), 205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'), 206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('im_used',), 208: ('already_reported',),
226: ('im_used',),
# Redirection. # Redirection.
300: ('multiple_choices',), 300: ('multiple_choices',),

155
libs/requests/utils.py

@ -12,18 +12,22 @@ that are also useful for external consumption.
import cgi import cgi
import codecs import codecs
import collections import collections
import io
import os import os
import platform import platform
import re import re
import sys import sys
from netrc import netrc, NetrcParseError import socket
import struct
from . import __version__ from . import __version__
from . import certs from . import certs
from .compat import parse_http_list as _parse_list_header from .compat import parse_http_list as _parse_list_header
from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass)
from .cookies import RequestsCookieJar, cookiejar_from_dict from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL
_hush_pyflakes = (RequestsCookieJar,) _hush_pyflakes = (RequestsCookieJar,)
@ -44,16 +48,28 @@ def dict_to_sequence(d):
def super_len(o): def super_len(o):
if hasattr(o, '__len__'): if hasattr(o, '__len__'):
return len(o) return len(o)
if hasattr(o, 'len'): if hasattr(o, 'len'):
return o.len return o.len
if hasattr(o, 'fileno'): if hasattr(o, 'fileno'):
return os.fstat(o.fileno()).st_size try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI
return len(o.getvalue())
def get_netrc_auth(url): def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc.""" """Returns the Requests tuple auth for a given url from netrc."""
try: try:
from netrc import netrc, NetrcParseError
locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES) locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES)
netrc_path = None netrc_path = None
@ -264,8 +280,12 @@ def get_encodings_from_content(content):
""" """
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return charset_re.findall(content) return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers): def get_encoding_from_headers(headers):
@ -301,7 +321,7 @@ def stream_decode_response_unicode(iterator, r):
rv = decoder.decode(chunk) rv = decoder.decode(chunk)
if rv: if rv:
yield rv yield rv
rv = decoder.decode('', final=True) rv = decoder.decode(b'', final=True)
if rv: if rv:
yield rv yield rv
@ -361,7 +381,11 @@ def unquote_unreserved(uri):
for i in range(1, len(parts)): for i in range(1, len(parts)):
h = parts[i][0:2] h = parts[i][0:2]
if len(h) == 2 and h.isalnum(): if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16)) c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET: if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:] parts[i] = c + parts[i][2:]
else: else:
@ -383,42 +407,96 @@ def requote_uri(uri):
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~") return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def get_environ_proxies(url): def get_environ_proxies(url):
"""Return a dict of environment proxies.""" """Return a dict of environment proxies."""
proxy_keys = [
'all',
'http',
'https',
'ftp',
'socks'
]
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL # First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list. # we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy') no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy: if no_proxy:
# We need to check whether we match here. We need to see if we match # We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port. # the end of the netloc, both with and without the port.
no_proxy = no_proxy.split(',') no_proxy = no_proxy.replace(' ', '').split(',')
netloc = urlparse(url).netloc
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return {}
else:
for host in no_proxy: for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host): if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want # The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL. # to apply the proxies on this URL.
return {} return {}
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
if proxy_bypass(netloc):
return {}
# If we get here, we either didn't have no_proxy set or we're not going # If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to. # anywhere that no_proxy applies to, and the system settings don't require
proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys] # bypassing the proxy for the current URL.
return dict([(key, val) for (key, val) in proxies if val]) return getproxies()
def default_user_agent(): def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent.""" """Return a string representing the default user agent."""
_implementation = platform.python_implementation() _implementation = platform.python_implementation()
@ -444,7 +522,7 @@ def default_user_agent():
p_system = 'Unknown' p_system = 'Unknown'
p_release = 'Unknown' p_release = 'Unknown'
return " ".join(['python-requests/%s' % __version__, return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version), '%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)]) '%s/%s' % (p_system, p_release)])
@ -526,25 +604,40 @@ def guess_json_utf(data):
return None return None
def prepend_scheme_if_needed(url, new_scheme): def except_on_missing_scheme(url):
'''Given a URL that may or may not have a scheme, prepend the given scheme. """Given a URL, raise a MissingSchema exception if the scheme is missing.
Does not replace a present scheme with the one provided as an argument.''' """
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) scheme, netloc, path, params, query, fragment = urlparse(url)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment)) if not scheme:
raise MissingSchema('Proxy URLs must have explicit schemes.')
def get_auth_from_url(url): def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of """Given a url with authentication components, extract them into a tuple of
username,password.""" username,password."""
if url: if url:
url = unquote(url)
parsed = urlparse(url) parsed = urlparse(url)
return (parsed.username, parsed.password) return (parsed.username, parsed.password)
else: else:
return ('', '') return ('', '')
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out

4
libs/tornado/__init__.py

@ -25,5 +25,5 @@ from __future__ import absolute_import, division, print_function, with_statement
# is zero for an official release, positive for a development branch, # is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version # or negative for a release candidate or beta (after the base version
# number has been incremented) # number has been incremented)
version = "3.2.dev2" version = "3.2b1"
version_info = (3, 2, 0, -99) version_info = (3, 2, 0, -98)

7
libs/tornado/concurrent.py

@ -124,11 +124,11 @@ class TracebackFuture(Future):
self.__exc_info = exc_info self.__exc_info = exc_info
self.set_exception(exc_info[1]) self.set_exception(exc_info[1])
def result(self): def result(self, timeout=None):
if self.__exc_info is not None: if self.__exc_info is not None:
raise_exc_info(self.__exc_info) raise_exc_info(self.__exc_info)
else: else:
return super(TracebackFuture, self).result() return super(TracebackFuture, self).result(timeout=timeout)
class DummyExecutor(object): class DummyExecutor(object):
@ -151,6 +151,9 @@ def run_on_executor(fn):
The decorated method may be called with a ``callback`` keyword The decorated method may be called with a ``callback`` keyword
argument and returns a future. argument and returns a future.
This decorator should be used only on methods of objects with attributes
``executor`` and ``io_loop``.
""" """
@functools.wraps(fn) @functools.wraps(fn)
def wrapper(self, *args, **kwargs): def wrapper(self, *args, **kwargs):

6
libs/tornado/curl_httpclient.py

@ -318,10 +318,12 @@ def _curl_setup_request(curl, request, buffer, headers):
[native_str("%s: %s" % i) for i in request.headers.items()]) [native_str("%s: %s" % i) for i in request.headers.items()])
if request.header_callback: if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION, request.header_callback) curl.setopt(pycurl.HEADERFUNCTION,
lambda line: request.header_callback(native_str(line)))
else: else:
curl.setopt(pycurl.HEADERFUNCTION, curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers, line)) lambda line: _curl_header_callback(headers,
native_str(line)))
if request.streaming_callback: if request.streaming_callback:
write_function = request.streaming_callback write_function = request.streaming_callback
else: else:

12
libs/tornado/escape.py

@ -189,8 +189,10 @@ def utf8(value):
""" """
if isinstance(value, _UTF8_TYPES): if isinstance(value, _UTF8_TYPES):
return value return value
assert isinstance(value, unicode_type), \ if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value) "Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8") return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None)) _TO_UNICODE_TYPES = (unicode_type, type(None))
@ -204,8 +206,10 @@ def to_unicode(value):
""" """
if isinstance(value, _TO_UNICODE_TYPES): if isinstance(value, _TO_UNICODE_TYPES):
return value return value
assert isinstance(value, bytes_type), \ if not isinstance(value, bytes_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value) "Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8") return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private, # to_unicode was previously named _unicode not because it was private,
@ -233,8 +237,10 @@ def to_basestring(value):
""" """
if isinstance(value, _BASESTRING_TYPES): if isinstance(value, _BASESTRING_TYPES):
return value return value
assert isinstance(value, bytes_type), \ if not isinstance(value, bytes_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value) "Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8") return value.decode("utf-8")

12
libs/tornado/gen.py

@ -64,7 +64,7 @@ For more complicated interfaces, `Task` can be split into two parts:
def get(self): def get(self):
http_client = AsyncHTTPClient() http_client = AsyncHTTPClient()
http_client.fetch("http://example.com", http_client.fetch("http://example.com",
callback=(yield gen.Callback("key")) callback=(yield gen.Callback("key")))
response = yield gen.Wait("key") response = yield gen.Wait("key")
do_something_with_response(response) do_something_with_response(response)
self.render("template.html") self.render("template.html")
@ -390,16 +390,26 @@ class YieldFuture(YieldPoint):
self.io_loop = io_loop or IOLoop.current() self.io_loop = io_loop or IOLoop.current()
def start(self, runner): def start(self, runner):
if not self.future.done():
self.runner = runner self.runner = runner
self.key = object() self.key = object()
runner.register_callback(self.key) runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key)) self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self): def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key) return self.runner.is_ready(self.key)
else:
return True
def get_result(self): def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result() return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint): class Multi(YieldPoint):

59
libs/tornado/httpclient.py

@ -335,10 +335,11 @@ class HTTPRequest(object):
.. versionadded:: 3.1 .. versionadded:: 3.1
The ``auth_mode`` argument. The ``auth_mode`` argument.
""" """
if headers is None: # Note that some of these attributes go through property setters
headers = httputil.HTTPHeaders() # defined below.
self.headers = headers
if if_modified_since: if if_modified_since:
headers["If-Modified-Since"] = httputil.format_timestamp( self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since) if_modified_since)
self.proxy_host = proxy_host self.proxy_host = proxy_host
self.proxy_port = proxy_port self.proxy_port = proxy_port
@ -346,8 +347,7 @@ class HTTPRequest(object):
self.proxy_password = proxy_password self.proxy_password = proxy_password
self.url = url self.url = url
self.method = method self.method = method
self.headers = headers self.body = body
self.body = utf8(body)
self.auth_username = auth_username self.auth_username = auth_username
self.auth_password = auth_password self.auth_password = auth_password
self.auth_mode = auth_mode self.auth_mode = auth_mode
@ -358,9 +358,9 @@ class HTTPRequest(object):
self.user_agent = user_agent self.user_agent = user_agent
self.use_gzip = use_gzip self.use_gzip = use_gzip
self.network_interface = network_interface self.network_interface = network_interface
self.streaming_callback = stack_context.wrap(streaming_callback) self.streaming_callback = streaming_callback
self.header_callback = stack_context.wrap(header_callback) self.header_callback = header_callback
self.prepare_curl_callback = stack_context.wrap(prepare_curl_callback) self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert self.validate_cert = validate_cert
self.ca_certs = ca_certs self.ca_certs = ca_certs
@ -369,6 +369,49 @@ class HTTPRequest(object):
self.client_cert = client_cert self.client_cert = client_cert
self.start_time = time.time() self.start_time = time.time()
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
@property
def streaming_callback(self):
return self._streaming_callback
@streaming_callback.setter
def streaming_callback(self, value):
self._streaming_callback = stack_context.wrap(value)
@property
def header_callback(self):
return self._header_callback
@header_callback.setter
def header_callback(self, value):
self._header_callback = stack_context.wrap(value)
@property
def prepare_curl_callback(self):
return self._prepare_curl_callback
@prepare_curl_callback.setter
def prepare_curl_callback(self, value):
self._prepare_curl_callback = stack_context.wrap(value)
class HTTPResponse(object): class HTTPResponse(object):
"""HTTP Response object. """HTTP Response object.

3
libs/tornado/ioloop.py

@ -598,6 +598,7 @@ class PollIOLoop(IOLoop):
except ValueError: # non-main thread except ValueError: # non-main thread
pass pass
try:
while True: while True:
poll_timeout = _POLL_TIMEOUT poll_timeout = _POLL_TIMEOUT
@ -685,6 +686,8 @@ class PollIOLoop(IOLoop):
self.handle_callback_exception(self._handlers.get(fd)) self.handle_callback_exception(self._handlers.get(fd))
except Exception: except Exception:
self.handle_callback_exception(self._handlers.get(fd)) self.handle_callback_exception(self._handlers.get(fd))
finally:
# reset the stopped flag so another start/stop pair can be issued # reset the stopped flag so another start/stop pair can be issued
self._stopped = False self._stopped = False
if self._blocking_signal_threshold is not None: if self._blocking_signal_threshold is not None:

1
libs/tornado/iostream.py

@ -55,6 +55,7 @@ _ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
# They should be caught and handled less noisily than other errors. # They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE) _ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE)
class StreamClosedError(IOError): class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed. """Exception raised by `IOStream` methods when the stream is closed.

2
libs/tornado/locale.py

@ -286,8 +286,6 @@ class Locale(object):
This method is primarily intended for dates in the past. This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format. For dates in the future, we fall back to full format.
""" """
if self.code.startswith("ru"):
relative = False
if isinstance(date, numbers.Real): if isinstance(date, numbers.Real):
date = datetime.datetime.utcfromtimestamp(date) date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow() now = datetime.datetime.utcnow()

24
libs/tornado/log.py

@ -33,7 +33,6 @@ from __future__ import absolute_import, division, print_function, with_statement
import logging import logging
import logging.handlers import logging.handlers
import sys import sys
import time
from tornado.escape import _unicode from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type from tornado.util import unicode_type, basestring_type
@ -74,8 +73,21 @@ class LogFormatter(logging.Formatter):
`tornado.options.parse_command_line` (unless ``--logging=none`` is `tornado.options.parse_command_line` (unless ``--logging=none`` is
used). used).
""" """
def __init__(self, color=True, *args, **kwargs): DEFAULT_PREFIX_FORMAT = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]'
logging.Formatter.__init__(self, *args, **kwargs) DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
def __init__(self, color=True, prefix_fmt=None, datefmt=None):
r"""
:arg bool color: Enables color support
:arg string prefix_fmt: Log message prefix format.
Prefix is a part of the log message, directly preceding the actual
message text.
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
"""
self.__prefix_fmt = prefix_fmt if prefix_fmt is not None else self.DEFAULT_PREFIX_FORMAT
datefmt = datefmt if datefmt is not None else self.DEFAULT_DATE_FORMAT
logging.Formatter.__init__(self, datefmt=datefmt)
self._color = color and _stderr_supports_color() self._color = color and _stderr_supports_color()
if self._color: if self._color:
# The curses module has some str/bytes confusion in # The curses module has some str/bytes confusion in
@ -107,10 +119,8 @@ class LogFormatter(logging.Formatter):
except Exception as e: except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__) record.message = "Bad message (%r): %r" % (e, record.__dict__)
assert isinstance(record.message, basestring_type) # guaranteed by logging assert isinstance(record.message, basestring_type) # guaranteed by logging
record.asctime = time.strftime( record.asctime = self.formatTime(record, self.datefmt)
"%y%m%d %H:%M:%S", self.converter(record.created)) prefix = self.__prefix_fmt % record.__dict__
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
record.__dict__
if self._color: if self._color:
prefix = (self._colors.get(record.levelno, self._normal) + prefix = (self._colors.get(record.levelno, self._normal) +
prefix + self._normal) prefix + self._normal)

10
libs/tornado/netutil.py

@ -27,7 +27,7 @@ import stat
from tornado.concurrent import dummy_executor, run_on_executor from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec from tornado.platform.auto import set_close_exec
from tornado.util import Configurable from tornado.util import u, Configurable
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+ if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
ssl_match_hostname = ssl.match_hostname ssl_match_hostname = ssl.match_hostname
@ -37,6 +37,14 @@ else:
ssl_match_hostname = backports.ssl_match_hostname.match_hostname ssl_match_hostname = backports.ssl_match_hostname.match_hostname
SSLCertificateError = backports.ssl_match_hostname.CertificateError SSLCertificateError = backports.ssl_match_hostname.CertificateError
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
# getaddrinfo attempts to import encodings.idna. If this is done at
# module-import time, the import lock is already held by the main thread,
# leading to deadlock. Avoid it by caching the idna encoder on the main
# thread now.
u('foo').encode('idna')
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None): def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
"""Creates listening sockets bound to the given port and address. """Creates listening sockets bound to the given port and address.

2
libs/tornado/platform/asyncio.py

@ -16,6 +16,7 @@ import os
from tornado.ioloop import IOLoop from tornado.ioloop import IOLoop
from tornado import stack_context from tornado import stack_context
class BaseAsyncIOLoop(IOLoop): class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False): def initialize(self, asyncio_loop, close_loop=False):
self.asyncio_loop = asyncio_loop self.asyncio_loop = asyncio_loop
@ -128,6 +129,7 @@ class AsyncIOMainLoop(BaseAsyncIOLoop):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False) close_loop=False)
class AsyncIOLoop(BaseAsyncIOLoop): class AsyncIOLoop(BaseAsyncIOLoop):
def initialize(self): def initialize(self):
super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(), super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(),

6
libs/tornado/platform/twisted.py

@ -527,8 +527,10 @@ class TwistedResolver(Resolver):
resolved_family = socket.AF_INET6 resolved_family = socket.AF_INET6
else: else:
deferred = self.resolver.getHostByName(utf8(host)) deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addCallback) resolved = yield gen.Task(deferred.addBoth)
if twisted.internet.abstract.isIPAddress(resolved): if isinstance(resolved, failure.Failure):
resolved.raiseException()
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved): elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6 resolved_family = socket.AF_INET6

15
libs/tornado/template.py

@ -21,7 +21,7 @@ Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>") t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX") print t.generate(myvalue="XXX")
Loader is a class that loads templates from a root directory and caches `Loader` is a class that loads templates from a root directory and caches
the compiled templates:: the compiled templates::
loader = template.Loader("/home/btaylor") loader = template.Loader("/home/btaylor")
@ -56,16 +56,17 @@ interesting. Syntax for the templates::
{% end %} {% end %}
Unlike most other template systems, we do not put any restrictions on the Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. if and for blocks get expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, you can do complex expressions like:: translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %} {% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li> <li>{{ escape(student.name) }}</li>
{% end %} {% end %}
Translating directly to Python means you can apply functions to expressions Translating directly to Python means you can apply functions to expressions
easily, like the escape() function in the examples above. You can pass easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable:: functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code ### Python code
def add(x, y): def add(x, y):
@ -75,8 +76,8 @@ functions in to your template just like any other variable::
### The template ### The template
{{ add(1, 2) }} {{ add(1, 2) }}
We provide the functions escape(), url_escape(), json_encode(), and squeeze() We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
to all templates by default. `.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and hand, but instead use the `~.RequestHandler.render` and

4
libs/tornado/web.py

@ -447,7 +447,11 @@ class RequestHandler(object):
The name of the argument is provided if known, but may be None The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex). (e.g. for unnamed groups in the url regex).
""" """
try:
return _unicode(value) return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property @property
def cookies(self): def cookies(self):

1
libs/tornado/websocket.py

@ -891,6 +891,7 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None):
io_loop.add_future(conn.connect_future, callback) io_loop.add_future(conn.connect_future, callback)
return conn.connect_future return conn.connect_future
def _websocket_mask_python(mask, data): def _websocket_mask_python(mask, data):
"""Websocket masking function. """Websocket masking function.

Loading…
Cancel
Save