Browse Source

Merge branch 'refs/heads/develop'

pull/1185/merge
Ruud 13 years ago
parent
commit
7e6234298d
  1. 4
      couchpotato/core/event.py
  2. 2
      couchpotato/core/plugins/searcher/main.py
  3. 137
      couchpotato/core/providers/base.py
  4. 22
      couchpotato/core/providers/nzb/binsearch/__init__.py
  5. 99
      couchpotato/core/providers/nzb/binsearch/main.py
  6. 50
      couchpotato/core/providers/nzb/ftdworld/main.py
  7. 145
      couchpotato/core/providers/nzb/newznab/main.py
  8. 110
      couchpotato/core/providers/nzb/nzbclub/main.py
  9. 108
      couchpotato/core/providers/nzb/nzbindex/main.py
  10. 76
      couchpotato/core/providers/nzb/nzbsrus/main.py
  11. 88
      couchpotato/core/providers/nzb/nzbx/main.py
  12. 75
      couchpotato/core/providers/nzb/omgwtfnzbs/main.py
  13. 6
      couchpotato/core/providers/torrent/base.py
  14. 100
      couchpotato/core/providers/torrent/kickasstorrents/main.py
  15. 40
      couchpotato/core/providers/torrent/passthepopcorn/main.py
  16. 41
      couchpotato/core/providers/torrent/publichd/main.py
  17. 44
      couchpotato/core/providers/torrent/sceneaccess/main.py
  18. 46
      couchpotato/core/providers/torrent/scenehd/main.py
  19. 100
      couchpotato/core/providers/torrent/thepiratebay/main.py
  20. 32
      couchpotato/core/providers/torrent/torrentday/__init__.py
  21. 61
      couchpotato/core/providers/torrent/torrentday/main.py
  22. 52
      couchpotato/core/providers/torrent/torrentleech/main.py
  23. 2
      couchpotato/runner.py
  24. 9
      libs/dateutil/__init__.py
  25. 9
      libs/dateutil/easter.py
  26. 207
      libs/dateutil/parser.py
  27. 122
      libs/dateutil/relativedelta.py
  28. 131
      libs/dateutil/rrule.py
  29. 129
      libs/dateutil/tz.py
  30. 31
      libs/dateutil/tzwin.py
  31. 15
      libs/dateutil/zoneinfo/__init__.py
  32. BIN
      libs/dateutil/zoneinfo/zoneinfo--latest.tar.gz
  33. BIN
      libs/dateutil/zoneinfo/zoneinfo-2010g.tar.gz
  34. 366
      libs/six.py

4
couchpotato/core/event.py

@ -105,14 +105,14 @@ def fireEvent(name, *args, **kwargs):
# Merge
if options['merge'] and len(results) > 0:
# Dict
if type(results[0]) == dict:
if isinstance(results[0], dict):
merged = {}
for result in results:
merged = mergeDicts(merged, result)
results = merged
# Lists
elif type(results[0]) == list:
elif isinstance(results[0], list):
merged = []
for result in results:
merged += result

2
couchpotato/core/plugins/searcher/main.py

@ -363,7 +363,7 @@ class Searcher(Plugin):
return True
# Check if nzb contains imdb link
if self.checkIMDB([nzb['description']], movie['library']['identifier']):
if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']):
return True
for raw_title in movie['library']['titles']:

137
couchpotato/core/providers/base.py

@ -1,15 +1,17 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import tryFloat, getTitle
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
possibleTitles, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from urlparse import urlparse
import cookielib
import json
import re
import time
import traceback
import urllib2
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -55,12 +57,8 @@ class YarrProvider(Provider):
def __init__(self):
addEvent('provider.belongs_to', self.belongsTo)
addEvent('%s.search' % self.type, self.search)
addEvent('yarr.search', self.search)
addEvent('nzb.feed', self.feed)
def login(self):
try:
@ -97,11 +95,29 @@ class YarrProvider(Provider):
return 'try_next'
def feed(self):
return []
def search(self, movie, quality):
return []
if self.isDisabled():
return []
# Login if needed
if self.urls.get('login') and (not self.login_opener and not self.login()):
log.error('Failed to login to: %s', self.getName())
return []
# Create result container
imdb_result = hasattr(self, '_search')
results = ResultList(self, movie, quality, imdb_result = imdb_result)
# Do search based on imdb id
if imdb_result:
self._search(movie, quality, results)
# Search possible titles
else:
for title in possibleTitles(getTitle(movie['library'])):
self._searchOnTitle(title, movie, quality, results)
return results
def belongsTo(self, url, provider = None, host = None):
try:
@ -149,22 +165,93 @@ class YarrProvider(Provider):
return [self.cat_backup_id]
def found(self, new):
if not new.get('provider_extra'):
new['provider_extra'] = ''
else:
new['provider_extra'] = ', %s' % new['provider_extra']
def getJsonData(self, url, **kwargs):
data = self.getCache(md5(url), url, **kwargs)
if data:
try:
return json.loads(data)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getRSSData(self, url, **kwargs):
data = self.getCache(md5(url), url, **kwargs)
log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new)
if data:
try:
data = XMLTree.fromstring(data)
return self.getElements(data, 'channel/item')
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getHTMLData(self, url, **kwargs):
return self.getCache(md5(url), url, **kwargs)
class ResultList(list):
result_ids = None
provider = None
movie = None
quality = None
def __init__(self, provider, movie, quality, **kwargs):
self.result_ids = []
self.provider = provider
self.movie = movie
self.quality = quality
self.kwargs = kwargs
def removeDuplicateResults(self, results):
super(ResultList, self).__init__()
result_ids = []
new_results = []
def extend(self, results):
for r in results:
self.append(r)
for result in results:
if result['id'] not in result_ids:
new_results.append(result)
result_ids.append(result['id'])
def append(self, result):
new_result = self.fillResult(result)
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new_result, movie = self.movie, quality = self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct_movie and new_result['id'] not in self.result_ids:
new_result['score'] += fireEvent('score.calculate', new_result, self.movie, single = True)
self.found(new_result)
self.result_ids.append(result['id'])
super(ResultList, self).append(new_result)
def fillResult(self, result):
defaults = {
'id': 0,
'type': self.provider.type,
'provider': self.provider.getName(),
'download': self.provider.download,
'url': '',
'name': '',
'age': 0,
'size': 0,
'description': '',
'score': 0
}
return mergeDicts(defaults, result)
def found(self, new_result):
if not new_result.get('provider_extra'):
new_result['provider_extra'] = ''
else:
new_result['provider_extra'] = ', %s' % new_result['provider_extra']
return new_results
log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new_result)

22
couchpotato/core/providers/nzb/binsearch/__init__.py

@ -0,0 +1,22 @@
from .main import BinSearch
def start():
return BinSearch()
config = [{
'name': 'binsearch',
'groups': [
{
'tab': 'searcher',
'subtab': 'nzb_providers',
'name': 'binsearch',
'description': 'Free provider, less accurate. See <a href="https://www.binsearch.info/">BinSearch</a>',
'options': [
{
'name': 'enabled',
'type': 'enabler',
},
],
},
],
}]

99
couchpotato/core/providers/nzb/binsearch/main.py

@ -0,0 +1,99 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import re
import traceback
log = CPLog(__name__)
class BinSearch(NZBProvider):
urls = {
'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s',
'detail': 'https://www.binsearch.info%s',
'search': 'https://www.binsearch.info/index.php?%s',
}
http_time_between_calls = 4 # Seconds
def _search(self, movie, quality, results):
q = '%s %s' % (movie['library']['identifier'], quality.get('identifier'))
arguments = tryUrlencode({
'q': q,
'm': 'n',
'max': 250,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
data = self.getHTMLData(self.urls['search'] % arguments)
if data:
try:
html = BeautifulSoup(data)
main_table = html.find('table', attrs = {'id':'r2'})
if not main_table:
return
items = main_table.find_all('tr')
for row in items:
title = row.find('span', attrs = {'class':'s'})
if not title: continue
nzb_id = row.find('input', attrs = {'type':'checkbox'})['name']
info = row.find('span', attrs = {'class':'d'})
size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text)
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts'))
if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not 'par2' in info.text.lower()):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
return False
if 'requires password' in info.text.lower():
log.info2('Wrong: \'%s\', passworded', (item['name']))
return False
return True
results.append({
'id': nzb_id,
'name': title.text,
'age': tryInt(re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % info.find('a')['href'],
'extra_check': extra_check
})
except:
log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc())
def download(self, url = '', nzb_id = ''):
params = {'action': 'nzb'}
params[nzb_id] = 'on'
try:
return self.urlopen(url, params = params, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'

50
couchpotato/core/providers/nzb/ftdworld/main.py

@ -1,7 +1,6 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, possibleTitles, getTitle
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
@ -30,23 +29,11 @@ class FTDWorld(NZBProvider):
]
cat_backup_id = 1
def search(self, movie, quality):
if self.isDisabled():
return []
results = []
for title in possibleTitles(getTitle(movie['library'])):
results.extend(self._search(title, movie, quality))
return self.removeDuplicateResults(results)
def _search(self, title, movie, quality):
results = []
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s' % (title, movie['library']['year'])
params = {
params = tryUrlencode({
'ctitle': q,
'customQuery': 'usr',
'cage': Env.setting('retention', 'nzb'),
@ -54,10 +41,9 @@ class FTDWorld(NZBProvider):
'csizemax': quality.get('size_max'),
'ccategory': 14,
'ctype': ','.join([str(x) for x in self.getCatId(quality['identifier'])]),
}
})
cache_key = 'ftdworld.%s.%s' % (movie['library']['identifier'], q)
data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params), opener = self.login_opener)
data = self.getHTMLData(self.urls['search'] % params, opener = self.login_opener)
if data:
try:
@ -66,7 +52,7 @@ class FTDWorld(NZBProvider):
main_table = html.find('table', attrs = {'id':'ftdresult'})
if not main_table:
return results
return
items = main_table.find_all('tr', attrs = {'class': re.compile('tcontent')})
@ -77,34 +63,18 @@ class FTDWorld(NZBProvider):
up = item.find('img', attrs = {'src': re.compile('up.png')})
down = item.find('img', attrs = {'src': re.compile('down.png')})
new = {
results.append({
'id': nzb_id,
'type': 'nzb',
'provider': self.getName(),
'name': toUnicode(item.find('a', attrs = {'href': re.compile('./spotinfo')}).text.strip()),
'age': self.calculateAge(int(time.mktime(parse(tds[2].text).timetuple()))),
'size': 0,
'url': self.urls['download'] % nzb_id,
'download': self.loginDownload,
'detail_url': self.urls['detail'] % nzb_id,
'description': '',
'score': (tryInt(up.attrs['title'].split(' ')[0]) * 3) - (tryInt(down.attrs['title'].split(' ')[0]) * 3) if up else 0,
}
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
new['score'] += fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from NZBClub')
})
return results
except:
log.error('Failed to parse HTML response from FTDWorld')
def getLoginParams(self):
return tryUrlencode({

145
couchpotato/core/providers/nzb/newznab/main.py

@ -1,8 +1,8 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.base import ResultList
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
@ -10,7 +10,6 @@ from urllib2 import HTTPError
from urlparse import urlparse
import time
import traceback
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -25,137 +24,59 @@ class Newznab(NZBProvider, RSS):
limits_reached = {}
cat_backup_id = '2000'
http_time_between_calls = 1 # Seconds
def feed(self):
hosts = self.getHosts()
results = []
for host in hosts:
result = self.singleFeed(host)
if result:
results.extend(result)
return results
def singleFeed(self, host):
results = []
if self.isDisabled(host):
return results
arguments = tryUrlencode({
't': self.cat_backup_id,
'r': host['api_key'],
'i': 58,
})
url = '%s?%s' % (cleanHost(host['host']) + 'rss', arguments)
cache_key = 'newznab.%s.feed.%s' % (host['host'], arguments)
results = self.createItems(url, cache_key, host, for_feed = True)
return results
def search(self, movie, quality):
hosts = self.getHosts()
results = []
results = ResultList(self, movie, quality, imdb_result = True)
for host in hosts:
result = self.singleSearch(host, movie, quality)
if self.isDisabled(host):
continue
if result:
results.extend(result)
self._searchOnHost(host, movie, quality, results)
return results
def singleSearch(self, host, movie, quality):
results = []
if self.isDisabled(host):
return results
def _searchOnHost(self, host, movie, quality, results):
cat_id = self.getCatId(quality['identifier'])
arguments = tryUrlencode({
'imdbid': movie['library']['identifier'].replace('tt', ''),
'cat': ','.join(cat_id),
'apikey': host['api_key'],
'extended': 1
})
url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments)
cache_key = 'newznab.%s.%s.%s' % (host['host'], movie['library']['identifier'], cat_id)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
results = self.createItems(url, cache_key, host, movie = movie, quality = quality)
for nzb in nzbs:
return results
date = None
for item in nzb:
if item.attrib.get('name') == 'usenetdate':
date = item.attrib.get('value')
break
if not date:
date = self.getTextElement(nzb, 'pubDate')
def createItems(self, url, cache_key, host, movie = None, quality = None, for_feed = False):
results = []
data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
results = []
for nzb in nzbs:
date = None
for item in nzb:
if item.attrib.get('name') == 'usenetdate':
date = item.attrib.get('value')
break
if not date:
date = self.getTextElement(nzb, 'pubDate')
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title')
if not name:
continue
new = {
'id': nzb_id,
'provider': self.getName(),
'provider_extra': host['host'],
'type': 'nzb',
'name': self.getTextElement(nzb, 'title'),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': (self.getUrl(host['host'], self.urls['download']) % nzb_id) + self.getApiExt(host),
'download': self.download,
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), nzb_id),
'content': self.getTextElement(nzb, 'description'),
}
if not for_feed:
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = True, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
else:
results.append(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from Newznab: %s', host)
return results
nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
name = self.getTextElement(nzb, 'title')
if not name:
continue
results.append({
'id': nzb_id,
'provider_extra': host['host'],
'name': self.getTextElement(nzb, 'title'),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
'url': (self.getUrl(host['host'], self.urls['download']) % nzb_id) + self.getApiExt(host),
'detail_url': '%sdetails/%s' % (cleanHost(host['host']), nzb_id),
'content': self.getTextElement(nzb, 'description'),
})
def getHosts(self):

110
couchpotato/core/providers/nzb/nzbclub/main.py

@ -1,13 +1,11 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, getTitle, possibleTitles
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -20,88 +18,48 @@ class NZBClub(NZBProvider, RSS):
http_time_between_calls = 4 #seconds
def search(self, movie, quality):
if self.isDisabled():
return []
results = []
for title in possibleTitles(getTitle(movie['library'])):
results.extend(self._search(title, movie, quality))
return self.removeDuplicateResults(results)
def _search(self, title, movie, quality):
results = []
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s" %s' % (title, movie['library']['year'], quality.get('identifier'))
params = {
params = tryUrlencode({
'q': q,
'ig': '1',
'rpp': 200,
'st': 1,
'sp': 1,
'ns': 1,
}
cache_key = 'nzbclub.%s.%s.%s' % (movie['library']['identifier'], quality.get('identifier'), q)
data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params))
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', new['name'])
return False
return True
new = {
'id': nzbclub_id,
'type': 'nzb',
'provider': self.getName(),
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'download': self.download,
'detail_url': self.getTextElement(nzb, "link"),
'description': '',
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
}
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
except SyntaxError:
log.error('Failed to parse XML response from NZBClub')
return results
})
nzbs = self.getRSSData(self.urls['search'] % params)
for nzb in nzbs:
nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
def extra_check(item):
full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
if ignored in full_description:
log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
return False
return True
results.append({
'id': nzbclub_id,
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'].replace(' ', '_'),
'detail_url': self.getTextElement(nzb, "link"),
'get_more_info': self.getMoreInfo,
'extra_check': extra_check
})
def getMoreInfo(self, item):
full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

108
couchpotato/core/providers/nzb/nzbindex/main.py

@ -1,16 +1,13 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, getTitle, possibleTitles
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
import re
import time
import traceback
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -19,24 +16,12 @@ class NzbIndex(NZBProvider, RSS):
urls = {
'download': 'https://www.nzbindex.com/download/',
'api': 'https://www.nzbindex.com/rss/',
'search': 'https://www.nzbindex.com/rss/?%s',
}
http_time_between_calls = 1 # Seconds
def search(self, movie, quality):
if self.isDisabled():
return []
results = []
for title in possibleTitles(getTitle(movie['library'])):
results.extend(self._search(title, movie, quality))
return self.removeDuplicateResults(results)
def _search(self, title, movie, quality):
results = []
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s" %s %s' % (title, movie['library']['year'], quality.get('identifier'))
arguments = tryUrlencode({
@ -50,68 +35,37 @@ class NzbIndex(NZBProvider, RSS):
'more': 1,
'complete': 1,
})
url = "%s?%s" % (self.urls['api'], arguments)
cache_key = 'nzbindex.%s.%s' % (movie['library']['identifier'], q)
data = self.getCache(cache_key, url)
nzbs = self.getRSSData(self.urls['search'] % arguments)
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
try:
description = self.getTextElement(nzb, "description")
except:
description = ''
def extra_check(new):
if '#c20000' in new['description'].lower():
log.info('Wrong: Seems to be passworded: %s', new['name'])
return False
return True
new = {
'id': nzbindex_id,
'type': 'nzb',
'provider': self.getName(),
'download': self.download,
'name': self.getTextElement(nzb, "title"),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': enclosure['url'].replace('/download/', '/release/'),
'description': description,
'get_more_info': self.getMoreInfo,
'extra_check': extra_check,
'check_nzb': True,
}
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
description = self.getTextElement(nzb, "description")
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return results
description = ''
def extra_check(item):
if '#c20000' in item['description'].lower():
log.info('Wrong: Seems to be passworded: %s', item['name'])
return False
return True
results.append({
'id': nzbindex_id,
'name': self.getTextElement(nzb, "title"),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': enclosure['url'].replace('/download/', '/release/'),
'description': description,
'get_more_info': self.getMoreInfo,
'extra_check': extra_check,
})
def getMoreInfo(self, item):
try:
@ -123,5 +77,3 @@ class NzbIndex(NZBProvider, RSS):
except:
pass
def isEnabled(self):
return NZBProvider.isEnabled(self) and self.conf('enabled')

76
couchpotato/core/providers/nzb/nzbsrus/main.py

@ -1,11 +1,9 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -23,15 +21,9 @@ class Nzbsrus(NZBProvider, RSS):
]
cat_backup_id = 240
def search(self, movie, quality):
results = []
if self.isDisabled():
return results
def _search(self, movie, quality, results):
cat_id_string = '&'.join(['c%s=1' % x for x in self.getCatId(quality.get('identifier'))])
arguments = tryUrlencode({
'searchtext': 'imdb:' + movie['library']['identifier'][2:],
'uid': self.conf('userid'),
@ -42,59 +34,29 @@ class Nzbsrus(NZBProvider, RSS):
# check for english_only
if self.conf('english_only'):
arguments += "&lang0=1&lang3=1&lang1=1"
url = "%s&%s&%s" % (self.urls['search'], arguments , cat_id_string)
cache_key = 'nzbsrus.%s.%s' % (movie['library'].get('identifier'), cat_id_string)
data = self.getCache(cache_key, url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'results/result')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
for nzb in nzbs:
title = self.getTextElement(nzb, "name")
if 'error' in title.lower(): continue
id = self.getTextElement(nzb, "id")
size = int(round(int(self.getTextElement(nzb, "size")) / 1048576))
age = int(round((time.time() - int(self.getTextElement(nzb, "postdate"))) / 86400))
arguments += '&lang0=1&lang3=1&lang1=1'
new = {
'id': id,
'type': 'nzb',
'provider': self.getName(),
'name': title,
'age': age,
'size': size,
'url': self.urls['download'] % id + self.getApiExt() + self.getTextElement(nzb, "key"),
'download': self.download,
'detail_url': self.urls['detail'] % id,
'description': self.getTextElement(nzb, "addtext"),
'check_nzb': True,
}
url = '%s&%s&%s' % (self.urls['search'], arguments , cat_id_string)
nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = True, single = True)
for nzb in nzbs:
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
title = self.getTextElement(nzb, 'name')
if 'error' in title.lower(): continue
return results
except SyntaxError:
log.error('Failed to parse XML response from Nzbsrus.com')
nzb_id = self.getTextElement(nzb, 'id')
size = int(round(int(self.getTextElement(nzb, 'size')) / 1048576))
age = int(round((time.time() - int(self.getTextElement(nzb, 'postdate'))) / 86400))
return results
results.append({
'id': nzb_id,
'name': title,
'age': age,
'size': size,
'url': self.urls['download'] % id + self.getApiExt() + self.getTextElement(nzb, 'key'),
'detail_url': self.urls['detail'] % nzb_id,
'description': self.getTextElement(nzb, 'addtext'),
})
def getApiExt(self):
return '/%s/' % (self.conf('userid'))

88
couchpotato/core/providers/nzb/nzbx/main.py

@ -1,91 +1,37 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
import json
import traceback
log = CPLog(__name__)
class Nzbx(NZBProvider, RSS):
endpoint = 'https://nzbx.co/api/'
class Nzbx(NZBProvider):
urls = {
'search': 'https://nzbx.co/api/search',
'search': 'https://nzbx.co/api/search?%s',
'details': 'https://nzbx.co/api/details?guid=%s',
'comments': 'https://nzbx.co/api/get-comments?guid=%s',
'ratings': 'https://nzbx.co/api/get-votes?guid=%s',
'downloads': 'https://nzbx.co/api/get-downloads-count?guid=%s',
'categories': 'https://nzbx.co/api/categories',
'groups': 'https://nzbx.co/api/groups',
}
http_time_between_calls = 1 # Seconds
def search(self, movie, quality):
results = []
if self.isDisabled():
return results
def _search(self, movie, quality, results):
# Get nbzs
arguments = tryUrlencode({
'q': movie['library']['identifier'].replace('tt', ''),
'sf': quality.get('size_min'),
})
url = "%s?%s" % (self.urls['search'], arguments)
cache_key = 'nzbx.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
data = self.getCache(cache_key, url)
if data:
try:
try:
nzbs = json.loads(data)
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
for nzb in nzbs:
nzbx_guid = nzb['guid']
def extra_score(item):
score = 0
if item['votes']['upvotes'] > item['votes']['downvotes']:
score += 5
return score
new = {
'guid': nzbx_guid,
'type': 'nzb',
'provider': self.getName(),
'download': self.download,
'url': nzb['nzb'],
'name': nzb['name'],
'age': self.calculateAge(int(nzb['postdate'])),
'size': tryInt(nzb['size']) / 1024 / 1024,
'description': '',
'extra_score': extra_score,
'votes': nzb['votes'],
'check_nzb': True,
}
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
return results
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return results
nzbs = self.getJsonData(self.urls['search'] % arguments)
for nzb in nzbs:
results.append({
'id': nzb['guid'],
'url': nzb['nzb'],
'detail_url': self.urls['details'] % nzb['guid'],
'name': nzb['name'],
'age': self.calculateAge(int(nzb['postdate'])),
'size': tryInt(nzb['size']) / 1024 / 1024,
'score': 5 if nzb['votes']['upvotes'] > nzb['votes']['downvotes'] else 0
})

75
couchpotato/core/providers/nzb/omgwtfnzbs/main.py

@ -1,14 +1,12 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, getTitle, possibleTitles
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
from urlparse import urlparse, parse_qs
import time
import xml.etree.ElementTree as XMLTree
log = CPLog(__name__)
@ -31,70 +29,33 @@ class OMGWTFNZBs(NZBProvider, RSS):
def search(self, movie, quality):
pre_releases = fireEvent('quality.pre_releases', single = True)
if self.isDisabled() or quality['identifier'] in pre_releases:
if quality['identifier'] in fireEvent('quality.pre_releases', single = True):
return []
results = []
for title in possibleTitles(getTitle(movie['library'])):
results.extend(self._search(title, movie, quality))
return super(OMGWTFNZBs, self).search(movie, quality)
return self.removeDuplicateResults(results)
def _search(self, title, movie, quality):
results = []
def _searchOnTitle(self, title, movie, quality, results):
q = '%s %s' % (title, movie['library']['year'])
params = {
params = tryUrlencode({
'search': q,
'catid': ','.join([str(x) for x in self.getCatId(quality['identifier'])]),
'user': self.conf('username', default = ''),
'api': self.conf('api_key', default = ''),
}
cache_key = 'omgwtfnzbs.%s.%s' % (movie['library']['identifier'], q)
data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params))
if data:
try:
try:
data = XMLTree.fromstring(data)
nzbs = self.getElements(data, 'channel/item')
except Exception, e:
log.debug('%s, %s', (self.getName(), e))
return results
for nzb in nzbs:
nzb_id = parse_qs(urlparse(self.getTextElement(nzb, "link")).query).get('id')[0]
enclosure = self.getElement(nzb, "enclosure").attrib
size = enclosure['length']
date = self.getTextElement(nzb, "pubDate")
new = {
'id': nzb_id,
'type': 'nzb',
'provider': self.getName(),
'name': toUnicode(self.getTextElement(nzb, "title")),
'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
'size': tryInt(size) / 1024 / 1024,
'url': enclosure['url'],
'download': self.download,
'detail_url': self.getTextElement(nzb, "link"),
'description': self.getTextElement(nzb, 'description')
}
})
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
nzbs = self.getRSSData(self.urls['search'] % params)
if is_correct_movie:
new['score'] = fireEvent('score.calculate', new, movie, single = True)
results.append(new)
self.found(new)
for nzb in nzbs:
return results
except SyntaxError:
log.error('Failed to parse XML response from omgwtfnzbs')
enclosure = self.getElement(nzb, 'enclosure').attrib
return results
results.append({
'id': parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0],
'name': toUnicode(self.getTextElement(nzb, 'title')),
'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
'size': tryInt(enclosure['length']) / 1024 / 1024,
'url': enclosure['url'],
'detail_url': self.getTextElement(nzb, 'link'),
'description': self.getTextElement(nzb, 'description')
})

6
couchpotato/core/providers/torrent/base.py

@ -24,3 +24,9 @@ class TorrentProvider(YarrProvider):
return getImdb(data) == imdbId
return False
class TorrentMagnetProvider(TorrentProvider):
type = 'torrent_magnet'
download = None

100
couchpotato/core/providers/torrent/kickasstorrents/main.py

@ -1,16 +1,14 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import simplifyString
from couchpotato.core.helpers.variable import tryInt, getTitle
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
import re
import traceback
log = CPLog(__name__)
class KickAssTorrents(TorrentProvider):
class KickAssTorrents(TorrentMagnetProvider):
urls = {
'test': 'https://kat.ph/',
@ -30,16 +28,10 @@ class KickAssTorrents(TorrentProvider):
http_time_between_calls = 1 #seconds
cat_backup_id = None
def search(self, movie, quality):
def _search(self, movie, quality, results):
results = []
if self.isDisabled():
return results
data = self.getHTMLData(self.urls['search'] % ('m', movie['library']['identifier'].replace('tt', '')))
title = simplifyString(getTitle(movie['library'])).replace(' ', '-')
cache_key = 'kickasstorrents.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
data = self.getCache(cache_key, self.urls['search'] % (title, movie['library']['identifier'].replace('tt', '')))
if data:
cat_ids = self.getCatId(quality['identifier'])
@ -53,62 +45,42 @@ class KickAssTorrents(TorrentProvider):
continue
try:
try:
for temp in result.find_all('tr'):
if temp['class'] is 'firstr' or not temp.get('id'):
continue
new = {
'type': 'torrent_magnet',
'check_nzb': False,
'description': '',
'provider': self.getName(),
'score': 0,
}
nr = 0
for td in temp.find_all('td'):
column_name = table_order[nr]
if column_name:
if column_name is 'name':
link = td.find('div', {'class': 'torrentname'}).find_all('a')[1]
new['id'] = temp.get('id')[-8:]
new['name'] = link.text
new['url'] = td.find('a', 'imagnet')['href']
new['detail_url'] = self.urls['detail'] % link['href'][1:]
new['score'] = 20 if td.find('a', 'iverif') else 0
elif column_name is 'size':
new['size'] = self.parseSize(td.text)
elif column_name is 'age':
new['age'] = self.ageToDays(td.text)
elif column_name is 'seeds':
new['seeders'] = tryInt(td.text)
elif column_name is 'leechers':
new['leechers'] = tryInt(td.text)
nr += 1
new['score'] += fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = True, single = True)
if is_correct_movie:
results.append(new)
self.found(new)
except:
log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
for temp in result.find_all('tr'):
if temp['class'] is 'firstr' or not temp.get('id'):
continue
new = {}
nr = 0
for td in temp.find_all('td'):
column_name = table_order[nr]
if column_name:
if column_name is 'name':
link = td.find('div', {'class': 'torrentname'}).find_all('a')[1]
new['id'] = temp.get('id')[-8:]
new['name'] = link.text
new['url'] = td.find('a', 'imagnet')['href']
new['detail_url'] = self.urls['detail'] % link['href'][1:]
new['score'] = 20 if td.find('a', 'iverif') else 0
elif column_name is 'size':
new['size'] = self.parseSize(td.text)
elif column_name is 'age':
new['age'] = self.ageToDays(td.text)
elif column_name is 'seeds':
new['seeders'] = tryInt(td.text)
elif column_name is 'leechers':
new['leechers'] = tryInt(td.text)
nr += 1
results.append(new)
except:
pass
log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
return results
except AttributeError:
log.debug('No search results found.')
return results
def ageToDays(self, age_str):
age = 0
age_str = age_str.replace('&nbsp;', ' ')

40
couchpotato/core/providers/torrent/passthepopcorn/main.py

@ -1,4 +1,3 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts
from couchpotato.core.logger import CPLog
@ -65,18 +64,11 @@ class PassThePopcorn(TorrentProvider):
else:
raise PassThePopcorn.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp)
def search(self, movie, quality):
results = []
if self.isDisabled():
return results
def _search(self, movie, quality, results):
movie_title = getTitle(movie['library'])
quality_id = quality['identifier']
log.info('Searching for %s at quality %s' % (movie_title, quality_id))
params = mergeDicts(self.quality_search_params[quality_id].copy(), {
'order_by': 'relevance',
'order_way': 'descending',
@ -85,7 +77,7 @@ class PassThePopcorn(TorrentProvider):
# Do login for the cookies
if not self.login_opener and not self.login():
return results
return
try:
url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
@ -93,12 +85,11 @@ class PassThePopcorn(TorrentProvider):
res = json.loads(txt)
except:
log.error('Search on PassThePopcorn.me (%s) failed (could not decode JSON)' % params)
return []
return
try:
if not 'Movies' in res:
log.info("PTP search returned nothing for '%s' at quality '%s' with search parameters %s" % (movie_title, quality_id, params))
return []
return
authkey = res['AuthKey']
passkey = res['PassKey']
@ -118,7 +109,6 @@ class PassThePopcorn(TorrentProvider):
if 'Scene' in torrent and torrent['Scene']:
torrentdesc += ' Scene'
if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
# eliminate odd characters...
torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle'])
torrentdesc += ' (%s)' % quality_id
@ -127,39 +117,23 @@ class PassThePopcorn(TorrentProvider):
def extra_check(item):
return self.torrentMeetsQualitySpec(item, type)
def extra_score(item):
return 50 if torrent['GoldenPopcorn'] else 0
new = {
results.append({
'id': torrent_id,
'type': 'torrent',
'provider': self.getName(),
'name': torrent_name,
'description': '',
'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey),
'detail_url': self.urls['detail'] % torrent_id,
'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())),
'size': tryInt(torrent['Size']) / (1024 * 1024),
'provider': self.getName(),
'seeders': tryInt(torrent['Seeders']),
'leechers': tryInt(torrent['Leechers']),
'extra_score': extra_score,
'score': 50 if torrent['GoldenPopcorn'] else 0,
'extra_check': extra_check,
'download': self.loginDownload,
}
})
new['score'] = fireEvent('score.calculate', new, movie, single = True)
if fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality):
results.append(new)
self.found(new)
return results
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
return []
def login(self):
cookieprocessor = urllib2.HTTPCookieProcessor(cookielib.CookieJar())

41
couchpotato/core/providers/torrent/publichd/main.py

@ -1,9 +1,8 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import getTitle, tryInt
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
from urlparse import parse_qs
import re
import traceback
@ -11,7 +10,7 @@ import traceback
log = CPLog(__name__)
class PublicHD(TorrentProvider):
class PublicHD(TorrentMagnetProvider):
urls = {
'test': 'https://publichd.se',
@ -22,20 +21,20 @@ class PublicHD(TorrentProvider):
def search(self, movie, quality):
results = []
if not quality.get('hd', False):
return []
if self.isDisabled() or not quality.get('hd', False):
return results
return super(PublicHD, self).search(movie, quality)
def _searchOnTitle(self, title, movie, quality, results):
params = tryUrlencode({
'page':'torrents',
'search': '%s %s' % (getTitle(movie['library']), movie['library']['year']),
'search': '%s %s' % (title, movie['library']['year']),
'active': 1,
})
url = '%s?%s' % (self.urls['search'], params)
cache_key = 'publichd.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
data = self.getCache(cache_key, url)
data = self.getHTMLData('%s?%s' % (self.urls['search'], params))
if data:
@ -53,36 +52,20 @@ class PublicHD(TorrentProvider):
url = parse_qs(info_url['href'])
new = {
results.append({
'id': url['id'][0],
'name': info_url.string,
'type': 'torrent_magnet',
'check_nzb': False,
'description': '',
'provider': self.getName(),
'url': download['href'],
'detail_url': self.urls['detail'] % url['id'][0],
'size': self.parseSize(result.find_all('td')[7].string),
'seeders': tryInt(result.find_all('td')[4].string),
'leechers': tryInt(result.find_all('td')[5].string),
'get_more_info': self.getMoreInfo
}
new['score'] = fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
if is_correct_movie:
results.append(new)
self.found(new)
return results
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
return []
def getMoreInfo(self, item):
full_description = self.getCache('publichd.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)

44
couchpotato/core/providers/torrent/sceneaccess/main.py

@ -1,5 +1,4 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
@ -27,30 +26,24 @@ class SceneAccess(TorrentProvider):
http_time_between_calls = 1 #seconds
def search(self, movie, quality):
results = []
if self.isDisabled():
return results
def _search(self, movie, quality, results):
url = self.urls['search'] % (
self.getCatId(quality['identifier'])[0],
self.getCatId(quality['identifier'])[0]
)
q = '%s %s' % (movie['library']['identifier'], quality.get('identifier'))
arguments = tryUrlencode({
'search': q,
'search': movie['library']['identifier'],
'method': 1,
})
url = "%s&%s" % (url, arguments)
# Do login for the cookies
if not self.login_opener and not self.login():
return results
return
cache_key = 'sceneaccess.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
data = self.getCache(cache_key, url, opener = self.login_opener)
data = self.getHTMLData(url, opener = self.login_opener)
if data:
html = BeautifulSoup(data)
@ -58,7 +51,7 @@ class SceneAccess(TorrentProvider):
try:
resultsTable = html.find('table', attrs = {'id' : 'torrents-table'})
if resultsTable is None:
return results
return
entries = resultsTable.find_all('tr', attrs = {'class' : 'tt_row'})
for result in entries:
@ -66,38 +59,23 @@ class SceneAccess(TorrentProvider):
link = result.find('td', attrs = {'class' : 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class' : 'td_dl'}).find('a')
leechers = result.find('td', attrs = {'class' : 'ttr_leechers'}).find('a')
id = link['href'].replace('details?id=', '')
new = {
'id': id,
'type': 'torrent',
'check_nzb': False,
'description': '',
'provider': self.getName(),
torrent_id = link['href'].replace('details?id=', '')
results.append({
'id': torrent_id,
'name': link['title'],
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % id,
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class' : 'ttr_size'}).contents[0]),
'seeders': tryInt(result.find('td', attrs = {'class' : 'ttr_seeders'}).find('a').string),
'leechers': tryInt(leechers.string) if leechers else 0,
'download': self.loginDownload,
'get_more_info': self.getMoreInfo,
}
new['score'] = fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
})
if is_correct_movie:
results.append(new)
self.found(new)
return results
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
return []
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),

46
couchpotato/core/providers/torrent/scenehd/main.py

@ -1,7 +1,6 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
from couchpotato.core.helpers.variable import getTitle, tryInt
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
@ -21,13 +20,9 @@ class SceneHD(TorrentProvider):
http_time_between_calls = 1 #seconds
def search(self, movie, quality):
def _searchOnTitle(self, title, movie, quality, results):
results = []
if self.isDisabled():
return results
q = '"%s %s" %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier'))
q = '"%s %s" %s' % (simplifyString(title), movie['library']['year'], quality.get('identifier'))
arguments = tryUrlencode({
'search': q,
})
@ -35,10 +30,9 @@ class SceneHD(TorrentProvider):
# Cookie login
if not self.login_opener and not self.login():
return results
return
cache_key = 'scenehd.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
data = self.getCache(cache_key, url, opener = self.login_opener)
data = self.getHTMLData(url, opener = self.login_opener)
if data:
html = BeautifulSoup(data)
@ -52,7 +46,7 @@ class SceneHD(TorrentProvider):
detail_link = all_cells[2].find('a')
details = detail_link['href']
id = details.replace('details.php?id=', '')
torrent_id = details.replace('details.php?id=', '')
leechers = all_cells[11].find('a')
if leechers:
@ -60,38 +54,20 @@ class SceneHD(TorrentProvider):
else:
leechers = all_cells[11].string
new = {
'id': id,
results.append({
'id': torrent_id,
'name': detail_link['title'],
'type': 'torrent',
'check_nzb': False,
'description': '',
'provider': self.getName(),
'size': self.parseSize(all_cells[7].string),
'seeders': tryInt(all_cells[10].find('a').string),
'leechers': tryInt(leechers),
'url': self.urls['download'] % id,
'url': self.urls['download'] % torrent_id,
'download': self.loginDownload,
}
imdb_link = all_cells[1].find('a')
imdb_results = self.imdbMatch(imdb_link['href'], movie['library']['identifier']) if imdb_link else False
new['score'] = fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality,
imdb_results = imdb_results, single = True)
if is_correct_movie:
results.append(new)
self.found(new)
return results
'description': all_cells[1].find('a')['href'],
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
return []
def getLoginParams(self, params):
return tryUrlencode({

100
couchpotato/core/providers/torrent/thepiratebay/main.py

@ -1,11 +1,9 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle, tryInt, cleanHost
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt, cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
from couchpotato.core.providers.torrent.base import TorrentMagnetProvider
from couchpotato.environment import Env
from urllib import quote_plus
import re
import time
import traceback
@ -13,7 +11,7 @@ import traceback
log = CPLog(__name__)
class ThePirateBay(TorrentProvider):
class ThePirateBay(TorrentMagnetProvider):
urls = {
'detail': '%s/torrent/%s',
@ -45,44 +43,11 @@ class ThePirateBay(TorrentProvider):
self.domain = self.conf('domain')
super(ThePirateBay, self).__init__()
def getDomain(self, url = ''):
if not self.domain:
for proxy in self.proxy_list:
prop_name = 'tpb_proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed tpb proxy %s', proxy)
def _searchOnTitle(self, title, movie, quality, results):
if 'title="Pirate Search"' in data:
log.debug('Using proxy: %s', proxy)
self.domain = proxy
break
search_url = self.urls['search'] % (self.getDomain(), tryUrlencode(title + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
Env.prop(prop_name, time.time())
if not self.domain:
log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.')
return None
return cleanHost(self.domain).rstrip('/') + url
def search(self, movie, quality):
results = []
if self.isDisabled() or not self.getDomain():
return results
cache_key = 'thepiratebay.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
search_url = self.urls['search'] % (self.getDomain(), quote_plus(getTitle(movie['library']) + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
data = self.getCache(cache_key, search_url)
data = self.getHTMLData(search_url)
if data:
try:
@ -90,7 +55,7 @@ class ThePirateBay(TorrentProvider):
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return results
return
entries = results_table.find_all('tr')
for result in entries[2:]:
@ -112,13 +77,9 @@ class ThePirateBay(TorrentProvider):
return confirmed + trusted + vip + moderated
new = {
results.append({
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'type': 'torrent_magnet',
'name': link.string,
'check_nzb': False,
'description': '',
'provider': self.getName(),
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
@ -126,21 +87,42 @@ class ThePirateBay(TorrentProvider):
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
}
new['score'] = fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality,
imdb_results = False, single = True)
})
if is_correct_movie:
results.append(new)
self.found(new)
return results
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
return []
def isEnabled(self):
return super(ThePirateBay, self).isEnabled() and self.getDomain()
def getDomain(self, url = ''):
if not self.domain:
for proxy in self.proxy_list:
prop_name = 'tpb_proxy.%s' % proxy
last_check = float(Env.prop(prop_name, default = 0))
if last_check > time.time() - 1209600:
continue
data = ''
try:
data = self.urlopen(proxy, timeout = 3, show_error = False)
except:
log.debug('Failed tpb proxy %s', proxy)
if 'title="Pirate Search"' in data:
log.debug('Using proxy: %s', proxy)
self.domain = proxy
break
Env.prop(prop_name, time.time())
if not self.domain:
log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.')
return None
return cleanHost(self.domain).rstrip('/') + url
def getMoreInfo(self, item):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)

32
couchpotato/core/providers/torrent/torrentday/__init__.py

@ -0,0 +1,32 @@
from .main import TorrentDay
def start():
return TorrentDay()
config = [{
'name': 'torrentday',
'groups': [
{
'tab': 'searcher',
'subtab': 'torrent_providers',
'name': 'TorrentDay',
'description': 'See <a href="http://www.td.af/">TorrentDay</a>',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
],
},
],
}]

61
couchpotato/core/providers/torrent/torrentday/main.py

@ -0,0 +1,61 @@
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class TorrentDay(TorrentProvider):
urls = {
'test': 'http://www.td.af/',
'login' : 'http://www.td.af/torrents/',
'detail': 'http://www.td.af/details.php?id=%s',
'search': 'http://www.td.af/V3/API/API.php',
'download': 'http://www.td.af/download.php/%s/%s',
}
cat_ids = [
([11], ['720p', '1080p']),
([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']),
([3], ['dvdr']),
([5], ['bd50']),
]
http_time_between_calls = 1 #seconds
def _searchOnTitle(self, title, movie, quality, results):
q = '"%s %s"' % (title, movie['library']['year'])
params = {
'/browse.php?': None,
'cata': 'yes',
'jxt': 8,
'jxw': 'b',
'search': q,
}
data = self.getJsonData(self.urls['search'], params = params, opener = self.login_opener)
try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', [])
except: return
for torrent in torrents:
results.append({
'id': torrent['id'],
'name': torrent['name'],
'url': self.urls['download'] % (torrent['id'], torrent['fname']),
'detail_url': self.urls['detail'] % torrent['id'],
'size': self.parseSize(torrent.get('size')),
'seeders': tryInt(torrent.get('seed')),
'leechers': tryInt(torrent.get('leech')),
'download': self.loginDownload,
})
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'submit',
})

52
couchpotato/core/providers/torrent/torrentleech/main.py

@ -1,10 +1,8 @@
from bs4 import BeautifulSoup
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import getTitle, tryInt
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
from urllib import quote_plus
import traceback
@ -14,11 +12,11 @@ log = CPLog(__name__)
class TorrentLeech(TorrentProvider):
urls = {
'test' : 'http://torrentleech.org/',
'login' : 'http://torrentleech.org/user/account/login/',
'detail' : 'http://torrentleech.org/torrent/%s',
'search' : 'http://torrentleech.org/torrents/browse/index/query/%s/categories/%d',
'download' : 'http://torrentleech.org%s',
'test' : 'http://www.torrentleech.org/',
'login' : 'http://www.torrentleech.org/user/account/login/',
'detail' : 'http://www.torrentleech.org/torrent/%s',
'search' : 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d',
'download' : 'http://www.torrentleech.org%s',
}
cat_ids = [
@ -34,19 +32,10 @@ class TorrentLeech(TorrentProvider):
http_time_between_calls = 1 #seconds
cat_backup_id = None
def search(self, movie, quality):
def _searchOnTitle(self, title, movie, quality, results):
results = []
if self.isDisabled():
return results
# Cookie login
if not self.login_opener and not self.login():
return results
cache_key = 'torrentleech.%s.%s' % (movie['library']['identifier'], quality.get('identifier'))
url = self.urls['search'] % (quote_plus(getTitle(movie['library']).replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
data = self.getCache(cache_key, url, opener = self.login_opener)
url = self.urls['search'] % (tryUrlencode(title.replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener)
if data:
html = BeautifulSoup(data)
@ -54,7 +43,7 @@ class TorrentLeech(TorrentProvider):
try:
result_table = html.find('table', attrs = {'id' : 'torrenttable'})
if not result_table:
return results
return
entries = result_table.find_all('tr')
@ -64,37 +53,20 @@ class TorrentLeech(TorrentProvider):
url = result.find('td', attrs = {'class' : 'quickdownload'}).find('a')
details = result.find('td', attrs = {'class' : 'name'}).find('a')
new = {
results.append({
'id': link['href'].replace('/torrent/', ''),
'name': link.string,
'type': 'torrent',
'check_nzb': False,
'description': '',
'provider': self.getName(),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % details['href'],
'download': self.loginDownload,
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find('td', attrs = {'class' : 'seeders'}).string),
'leechers': tryInt(result.find('td', attrs = {'class' : 'leechers'}).string),
}
imdb_results = self.imdbMatch(self.urls['detail'] % new['id'], movie['library']['identifier'])
})
new['score'] = fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality,
imdb_results = imdb_results, single = True)
if is_correct_movie:
results.append(new)
self.found(new)
return results
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),

2
couchpotato/runner.py

@ -189,7 +189,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En
version_control(db, repo, version = latest_db_version)
current_db_version = db_version(db, repo)
if current_db_version < latest_db_version and not debug:
if current_db_version < latest_db_version and not development:
log.info('Doing database upgrade. From %d to %d', (current_db_version, latest_db_version))
upgrade(db, repo)

9
libs/dateutil/__init__.py

@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__version__ = "1.5"
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__version__ = "2.1"

9
libs/dateutil/easter.py

@ -1,11 +1,10 @@
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__license__ = "Simplified BSD"
import datetime
@ -52,7 +51,7 @@ def easter(year, method=EASTER_WESTERN):
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
@ -88,5 +87,5 @@ def easter(year, method=EASTER_WESTERN):
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
return datetime.date(int(y), int(m), int(d))

207
libs/dateutil/parser.py

@ -2,25 +2,27 @@
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
from __future__ import unicode_literals
__license__ = "Simplified BSD"
import datetime
import string
import time
import sys
import os
import collections
try:
from cStringIO import StringIO
from io import StringIO
except ImportError:
from StringIO import StringIO
from io import StringIO
from six import text_type, binary_type, integer_types
import relativedelta
import tz
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
@ -39,7 +41,7 @@ __all__ = ["parse", "parserinfo"]
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, basestring):
if isinstance(instream, text_type):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
@ -133,12 +135,15 @@ class _timelex(object):
def __iter__(self):
return self
def next(self):
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
def split(cls, s):
return list(cls(s))
split = classmethod(split)
@ -155,7 +160,7 @@ class _resultbase(object):
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
@ -167,7 +172,7 @@ class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
@ -176,7 +181,7 @@ class parserinfo(object):
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
@ -184,7 +189,7 @@ class parserinfo(object):
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "September"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
@ -197,7 +202,7 @@ class parserinfo(object):
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
def __init__(self, dayfirst = False, yearfirst = False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
@ -210,7 +215,7 @@ class parserinfo(object):
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year//100*100
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
@ -237,7 +242,7 @@ class parserinfo(object):
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
return self._months[name.lower()] + 1
except KeyError:
pass
return None
@ -268,7 +273,7 @@ class parserinfo(object):
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if abs(year - self._year) >= 50:
if year < self._year:
year += 100
else:
@ -289,18 +294,18 @@ class parserinfo(object):
class parser(object):
def __init__(self, info=None):
def __init__(self, info = None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
def parse(self, timestr, default = None,
ignoretz = False, tzinfos = None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
default = datetime.datetime.now().replace(hour = 0, minute = 0,
second = 0, microsecond = 0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError, "unknown string format"
raise ValueError("unknown string format")
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
@ -309,29 +314,29 @@ class parser(object):
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
ret = ret + relativedelta.relativedelta(weekday = res.weekday)
if not ignoretz:
if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
if callable(tzinfos):
if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, basestring):
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, int):
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError, "offset must be tzinfo subclass, " \
"tz string, or int offset"
ret = ret.replace(tzinfo=tzinfo)
raise ValueError("offset must be tzinfo subclass, " \
"tz string, or int offset")
ret = ret.replace(tzinfo = tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
ret = ret.replace(tzinfo = tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
ret = ret.replace(tzinfo = tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
ret = ret.replace(tzinfo = tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
@ -339,7 +344,7 @@ class parser(object):
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False):
def _parse(self, timestr, dayfirst = None, yearfirst = None, fuzzy = False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
@ -374,14 +379,14 @@ class parser(object):
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
s = l[i - 1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
elif len_li == 6 or (len_li > 6 and l[i - 1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
s = l[i - 1]
if not ymd and l[i - 1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
@ -392,13 +397,13 @@ class parser(object):
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
s = l[i - 1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
s = l[i - 1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
@ -407,8 +412,8 @@ class parser(object):
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
(i + 1 < len_l and l[i] == ' ' and
info.hms(l[i + 1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
@ -416,12 +421,12 @@ class parser(object):
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
if value % 1:
res.minute = int(60 * (value % 1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
@ -441,17 +446,28 @@ class parser(object):
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i+1 < len_l and l[i] == ':':
elif i == len_l and l[i - 2] == ' ' and info.hms(l[i - 3]) is not None:
# X h MM or X m SS
idx = info.hms(l[i - 3]) + 1
if idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i + 1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
if value % 1:
res.second = int(60 * (value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i+1])
res.second, res.microsecond = _parsems(l[i + 1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
@ -467,7 +483,7 @@ class parser(object):
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
mstridx = len(ymd) - 1
else:
return None
i += 1
@ -477,18 +493,18 @@ class parser(object):
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
mstridx = len(ymd) - 1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
if i + 1 < len_l and info.ampm(l[i + 1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
if res.hour < 12 and info.ampm(l[i + 1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
elif res.hour == 12 and info.ampm(l[i + 1]) == 0:
res.hour = 0
i += 1
else:
@ -521,7 +537,7 @@ class parser(object):
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
mstridx = len(ymd) - 1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
@ -535,12 +551,12 @@ class parser(object):
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
elif (i + 3 < len_l and l[i] == l[i + 2] == ' '
and info.pertain(l[i + 1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
value = int(l[i + 3])
except ValueError:
# Wrong guess
pass
@ -585,32 +601,32 @@ class parser(object):
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1,1)[l[i] == '+']
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
res.tzoffset = int(l[i][:2]) * 3600 + int(l[i][2:]) * 60
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
res.tzoffset = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
res.tzoffset = int(l[i][:2]) * 3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if (i + 3 < len_l and
info.jump(l[i]) and l[i + 1] == '(' and l[i + 3] == ')' and
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
res.tzname = l[i + 2]
i += 4
continue
@ -690,7 +706,12 @@ class parser(object):
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
def parse(timestr, parserinfo = None, **kwargs):
# Python 2.x support: datetimes return their string presentation as
# bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
# the parser will get both kinds. Internally we use unicode only.
if isinstance(timestr, binary_type):
timestr = timestr.decode()
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
@ -743,7 +764,7 @@ class _tzparser(object):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1,-1)[l[i] == '+']
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
@ -751,16 +772,16 @@ class _tzparser(object):
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
(int(l[i][:2]) * 3600 + int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
(int(l[i]) * 3600 + int(l[i + 2]) * 60) * signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
int(l[i][:2]) * 3600 * signal)
else:
return None
i += 1
@ -787,29 +808,29 @@ class _tzparser(object):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
value = int(l[i + 1]) * -1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-','+'):
signal = (-1,1)[l[i] == "+"]
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
res.dstoffset = (res.stdoffset + int(l[i])) * signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',','/','J','M',
'.','-',':')
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
@ -829,10 +850,10 @@ class _tzparser(object):
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i])+1
x.yday = int(l[i]) + 1
i += 1
@ -842,17 +863,17 @@ class _tzparser(object):
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
x.time = (int(l[i][:2]) * 3600 + int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
if i+1 < len_l and l[i+1] == ':':
if i + 1 < len_l and l[i + 1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
x.time = (int(l[i][:2]) * 3600)
else:
return None
i += 1
@ -865,7 +886,7 @@ class _tzparser(object):
except (IndexError, ValueError, AssertionError):
return None
return res

122
libs/dateutil/relativedelta.py

@ -1,15 +1,16 @@
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__license__ = "Simplified BSD"
import datetime
import calendar
from six import integer_types
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
@ -42,7 +43,7 @@ class weekday(object):
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
@ -113,10 +114,9 @@ Here is the behavior of operations with relativedelta:
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
@ -172,7 +172,7 @@ Here is the behavior of operations with relativedelta:
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
@ -185,7 +185,7 @@ Here is the behavior of operations with relativedelta:
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
@ -195,7 +195,7 @@ Here is the behavior of operations with relativedelta:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
raise ValueError("invalid year day (%d)" % yday)
self._fix()
@ -242,9 +242,26 @@ Here is the behavior of operations with relativedelta:
else:
self.years = 0
def __radd__(self, other):
def __add__(self, other):
if isinstance(other, relativedelta):
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.microsecond or self.microsecond)
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
raise TypeError("unsupported type for add operation")
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
@ -285,48 +302,31 @@ Here is the behavior of operations with relativedelta:
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
raise TypeError("unsupported type for sub operation")
return relativedelta(years=self.years-other.years,
months=self.months-other.months,
days=self.days-other.days,
hours=self.hours-other.hours,
minutes=self.minutes-other.minutes,
seconds=self.seconds-other.seconds,
microseconds=self.microseconds-other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=self.year or other.year,
month=self.month or other.month,
day=self.day or other.day,
weekday=self.weekday or other.weekday,
hour=self.hour or other.hour,
minute=self.minute or other.minute,
second=self.second or other.second,
microsecond=self.microsecond or other.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
@ -346,7 +346,7 @@ Here is the behavior of operations with relativedelta:
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
@ -366,13 +366,13 @@ Here is the behavior of operations with relativedelta:
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
return relativedelta(years=int(self.years*f),
months=int(self.months*f),
days=int(self.days*f),
hours=int(self.hours*f),
minutes=int(self.minutes*f),
seconds=int(self.seconds*f),
microseconds=int(self.microseconds*f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
@ -383,6 +383,8 @@ Here is the behavior of operations with relativedelta:
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
@ -415,6 +417,8 @@ Here is the behavior of operations with relativedelta:
def __div__(self, other):
return self.__mul__(1/float(other))
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
@ -426,7 +430,7 @@ Here is the behavior of operations with relativedelta:
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et

131
libs/dateutil/rrule.py

@ -1,18 +1,22 @@
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__license__ = "Simplified BSD"
import itertools
import datetime
import calendar
import thread
try:
import _thread
except ImportError:
import thread as _thread
import sys
from six import advance_iterator, integer_types
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
@ -22,15 +26,15 @@ __all__ = ["rrule", "rruleset", "rrulestr",
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
@ -41,7 +45,7 @@ M365MASK = tuple(M365MASK)
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
@ -52,7 +56,7 @@ class weekday(object):
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
raise ValueError("Can't create weekday with n == 0")
self.weekday = weekday
self.n = n
@ -79,11 +83,11 @@ class weekday(object):
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_lock = _thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
@ -112,7 +116,7 @@ class rrulebase:
break
try:
for j in range(10):
cache.append(gen.next())
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
@ -133,13 +137,13 @@ class rrulebase:
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
@ -232,7 +236,7 @@ class rrule(rrulebase):
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
@ -250,13 +254,13 @@ class rrule(rrulebase):
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
@ -280,14 +284,14 @@ class rrule(rrulebase):
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
elif isinstance(bymonth, integer_types):
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
elif isinstance(byyearday, integer_types):
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
@ -295,7 +299,7 @@ class rrule(rrulebase):
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
@ -305,7 +309,7 @@ class rrule(rrulebase):
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
elif isinstance(bymonthday, integer_types):
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
@ -318,7 +322,7 @@ class rrule(rrulebase):
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
elif isinstance(byweekno, integer_types):
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
@ -326,7 +330,7 @@ class rrule(rrulebase):
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
elif isinstance(byweekday, integer_types):
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
@ -340,7 +344,7 @@ class rrule(rrulebase):
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
if isinstance(wday, integer_types):
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
@ -358,7 +362,7 @@ class rrule(rrulebase):
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
elif isinstance(byhour, integer_types):
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
@ -368,7 +372,7 @@ class rrule(rrulebase):
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
elif isinstance(byminute, integer_types):
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
@ -378,7 +382,7 @@ class rrule(rrulebase):
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
elif isinstance(bysecond, integer_types):
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
@ -716,7 +720,7 @@ class _iterinfo(object):
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
@ -768,7 +772,7 @@ class _iterinfo(object):
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
@ -823,27 +827,38 @@ class _iterinfo(object):
class rruleset(rrulebase):
class _genitem:
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = gen()
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
def __next__(self):
try:
self.dt = self.gen()
self.dt = advance_iterator(self.gen)
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
@ -851,7 +866,7 @@ class rruleset(rrulebase):
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
@ -864,14 +879,14 @@ class rruleset(rrulebase):
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
@ -880,17 +895,17 @@ class rruleset(rrulebase):
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
advance_iterator(exlist[0])
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
advance_iterator(ritem)
rlist.sort()
self._len = total
class _rrulestr:
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
@ -932,7 +947,7 @@ class _rrulestr:
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
@ -959,7 +974,7 @@ class _rrulestr:
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
@ -972,9 +987,9 @@ class _rrulestr:
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
@ -991,7 +1006,7 @@ class _rrulestr:
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
@ -1026,36 +1041,36 @@ class _rrulestr:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
raise ValueError("unsupported RDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
raise ValueError("unsupported DTSTART parm: "+parm)
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):

129
libs/dateutil/tz.py

@ -1,11 +1,12 @@
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__license__ = "Simplified BSD"
from six import string_types, PY3
import datetime
import struct
@ -25,6 +26,19 @@ try:
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
@ -36,6 +50,7 @@ class tzutc(datetime.tzinfo):
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
@ -63,6 +78,7 @@ class tzoffset(datetime.tzinfo):
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
@ -75,7 +91,7 @@ class tzoffset(datetime.tzinfo):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
@ -100,6 +116,7 @@ class tzlocal(datetime.tzinfo):
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
@ -161,7 +178,7 @@ class _ttinfo(object):
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
@ -191,16 +208,16 @@ class _ttinfo(object):
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj)
fileobj = open(fileobj, 'rb')
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
self._filename = repr(fileobj)
# From tzfile(5):
#
@ -212,8 +229,8 @@ class tzfile(datetime.tzinfo):
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
@ -284,7 +301,7 @@ class tzfile(datetime.tzinfo):
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
@ -360,7 +377,7 @@ class tzfile(datetime.tzinfo):
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
@ -448,6 +465,7 @@ class tzfile(datetime.tzinfo):
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
@ -465,11 +483,11 @@ class tzfile(datetime.tzinfo):
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
@ -515,6 +533,7 @@ class tzrange(datetime.tzinfo):
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
@ -524,7 +543,7 @@ class tzrange(datetime.tzinfo):
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
@ -561,7 +580,7 @@ class tzstr(tzrange):
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
@ -624,9 +643,9 @@ class tzstr(tzrange):
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp:
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
@ -690,51 +709,52 @@ class _tzicalvtz(datetime.tzinfo):
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical:
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
if isinstance(fileobj, string_types):
self._s = fileobj
fileobj = open(fileobj)
fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError, "no timezones defined"
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
signal = (-1, +1)[s[0]=='+']
s = s[1:]
else:
signal = +1
@ -743,12 +763,12 @@ class tzical:
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
raise ValueError("empty string")
# Unfold
i = 0
@ -772,7 +792,7 @@ class tzical:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
@ -781,7 +801,7 @@ class tzical:
# Process component
pass
else:
raise ValueError, "unknown component: "+value
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
@ -791,27 +811,21 @@ class tzical:
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError, \
"mandatory TZID not found"
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError, \
"at least one component is needed"
raise ValueError("at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
raise ValueError("mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
raise ValueError("mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
@ -825,8 +839,7 @@ class tzical:
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
@ -835,40 +848,36 @@ class tzical:
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
raise ValueError("unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
raise ValueError("unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
@ -914,7 +923,7 @@ def gettz(name=None):
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:

31
libs/dateutil/tzwin.py

@ -1,9 +1,8 @@
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
import winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>"
__all__ = ["tzwin", "tzwinlocal"]
@ -15,9 +14,9 @@ TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
@ -49,10 +48,10 @@ class tzwinbase(datetime.tzinfo):
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, TZKEYNAME)
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
@ -79,8 +78,8 @@ class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
@ -118,9 +117,9 @@ class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
tzlocalkey = winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
@ -128,7 +127,7 @@ class tzwinlocal(tzwinbase):
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
tzkey = winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
@ -165,7 +164,7 @@ def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
for n in range(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
@ -173,8 +172,8 @@ def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
size = winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
data = winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict

15
libs/dateutil/zoneinfo/__init__.py

@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
This module offers extensions to the standard Python
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__all__ = ["setcachesize", "gettz", "rebuild"]
@ -21,8 +22,7 @@ class tzfile(tzfile):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames = sorted(os.listdir(os.path.join(os.path.dirname(__file__))))
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
@ -66,7 +66,10 @@ def rebuild(filename, tag=None, format="gz"):
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
# The "backwards" zone file contains links to other files, so must be
# processed as last
for name in sorted(tf.getnames(),
key=lambda k: k != "backward" and k or "z"):
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):

BIN
libs/dateutil/zoneinfo/zoneinfo--latest.tar.gz

Binary file not shown.

BIN
libs/dateutil/zoneinfo/zoneinfo-2010g.tar.gz

Binary file not shown.

366
libs/six.py

@ -0,0 +1,366 @@
"""Utilities for writing code that runs on Python 2 and 3"""
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform == "java":
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules["six.moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
Loading…
Cancel
Save