Browse Source
Replaced urllib2 with requests for both TVDB and TVRage. Updated cache cleanup code to process both TVDB and TVRage cache folders.tags/release_0.1.0
42 changed files with 1821 additions and 363 deletions
@ -0,0 +1,13 @@ |
|||||
|
"""CacheControl import Interface. |
||||
|
|
||||
|
Make it easy to import from cachecontrol without long namespaces. |
||||
|
""" |
||||
|
|
||||
|
# patch our requests.models.Response to make them pickleable in older |
||||
|
# versions of requests. |
||||
|
|
||||
|
import cachecontrol.patch_requests |
||||
|
|
||||
|
from cachecontrol.wrapper import CacheControl |
||||
|
from cachecontrol.adapter import CacheControlAdapter |
||||
|
from cachecontrol.controller import CacheController |
@ -0,0 +1,70 @@ |
|||||
|
from requests.adapters import HTTPAdapter |
||||
|
|
||||
|
from cachecontrol.controller import CacheController |
||||
|
from cachecontrol.cache import DictCache |
||||
|
|
||||
|
|
||||
|
class CacheControlAdapter(HTTPAdapter): |
||||
|
invalidating_methods = set(['PUT', 'DELETE']) |
||||
|
|
||||
|
def __init__(self, cache=None, cache_etags=True, *args, **kw): |
||||
|
super(CacheControlAdapter, self).__init__(*args, **kw) |
||||
|
self.cache = cache or DictCache() |
||||
|
self.controller = CacheController(self.cache, cache_etags=cache_etags) |
||||
|
|
||||
|
def send(self, request, **kw): |
||||
|
"""Send a request. Use the request information to see if it |
||||
|
exists in the cache. |
||||
|
""" |
||||
|
if request.method == 'GET': |
||||
|
cached_response = self.controller.cached_request( |
||||
|
request.url, request.headers |
||||
|
) |
||||
|
if cached_response: |
||||
|
# Cached responses should not have a raw field since |
||||
|
# they *cannot* be created from some stream. |
||||
|
cached_response.raw = None |
||||
|
return cached_response |
||||
|
|
||||
|
# check for etags and add headers if appropriate |
||||
|
headers = self.controller.add_headers(request.url) |
||||
|
request.headers.update(headers) |
||||
|
|
||||
|
resp = super(CacheControlAdapter, self).send(request, **kw) |
||||
|
return resp |
||||
|
|
||||
|
def build_response(self, request, response): |
||||
|
"""Build a response by making a request or using the cache. |
||||
|
|
||||
|
This will end up calling send and returning a potentially |
||||
|
cached response |
||||
|
""" |
||||
|
resp = super(CacheControlAdapter, self).build_response( |
||||
|
request, response |
||||
|
) |
||||
|
|
||||
|
# See if we should invalidate the cache. |
||||
|
if request.method in self.invalidating_methods and resp.ok: |
||||
|
cache_url = self.controller.cache_url(request.url) |
||||
|
self.cache.delete(cache_url) |
||||
|
|
||||
|
# Try to store the response if it is a GET |
||||
|
elif request.method == 'GET': |
||||
|
if response.status == 304: |
||||
|
# We must have sent an ETag request. This could mean |
||||
|
# that we've been expired already or that we simply |
||||
|
# have an etag. In either case, we want to try and |
||||
|
# update the cache if that is the case. |
||||
|
resp = self.controller.update_cached_response( |
||||
|
request, response |
||||
|
) |
||||
|
else: |
||||
|
# try to cache the response |
||||
|
self.controller.cache_response(request, resp) |
||||
|
|
||||
|
# Give the request a from_cache attr to let people use it |
||||
|
# rather than testing for hasattr. |
||||
|
if not hasattr(resp, 'from_cache'): |
||||
|
resp.from_cache = False |
||||
|
|
||||
|
return resp |
@ -0,0 +1,36 @@ |
|||||
|
""" |
||||
|
The cache object API for implementing caches. The default is just a |
||||
|
dictionary, which in turns means it is not threadsafe for writing. |
||||
|
""" |
||||
|
from threading import Lock |
||||
|
|
||||
|
|
||||
|
class BaseCache(object): |
||||
|
|
||||
|
def get(self, key): |
||||
|
raise NotImplemented() |
||||
|
|
||||
|
def set(self, key, value): |
||||
|
raise NotImplemented() |
||||
|
|
||||
|
def delete(self, key): |
||||
|
raise NotImplemented() |
||||
|
|
||||
|
|
||||
|
class DictCache(BaseCache): |
||||
|
|
||||
|
def __init__(self, init_dict=None): |
||||
|
self.lock = Lock() |
||||
|
self.data = init_dict or {} |
||||
|
|
||||
|
def get(self, key): |
||||
|
return self.data.get(key, None) |
||||
|
|
||||
|
def set(self, key, value): |
||||
|
with self.lock: |
||||
|
self.data.update({key: value}) |
||||
|
|
||||
|
def delete(self, key): |
||||
|
with self.lock: |
||||
|
if key in self.data: |
||||
|
self.data.pop(key) |
@ -0,0 +1,18 @@ |
|||||
|
from textwrap import dedent |
||||
|
|
||||
|
try: |
||||
|
from cachecontrol.caches.file_cache import FileCache |
||||
|
except ImportError: |
||||
|
notice = dedent(''' |
||||
|
NOTE: In order to use the FileCache you must have |
||||
|
lockfile installed. You can install it via pip: |
||||
|
pip install lockfile |
||||
|
''') |
||||
|
print(notice) |
||||
|
|
||||
|
|
||||
|
try: |
||||
|
import redis |
||||
|
from cachecontrol.caches.redis_cache import RedisCache |
||||
|
except ImportError: |
||||
|
pass |
@ -0,0 +1,43 @@ |
|||||
|
import os |
||||
|
import codecs |
||||
|
|
||||
|
from hashlib import md5 |
||||
|
|
||||
|
try: |
||||
|
from pickle import load, dump |
||||
|
except ImportError: |
||||
|
from cPickle import load, dump |
||||
|
|
||||
|
from lib.lockfile import FileLock |
||||
|
|
||||
|
|
||||
|
class FileCache(object): |
||||
|
|
||||
|
def __init__(self, directory, forever=False): |
||||
|
self.directory = directory |
||||
|
self.forever = forever |
||||
|
|
||||
|
if not os.path.isdir(self.directory): |
||||
|
os.mkdir(self.directory) |
||||
|
|
||||
|
def encode(self, x): |
||||
|
return md5(x.encode()).hexdigest() |
||||
|
|
||||
|
def _fn(self, name): |
||||
|
return os.path.join(self.directory, self.encode(name)) |
||||
|
|
||||
|
def get(self, key): |
||||
|
name = self._fn(key) |
||||
|
if os.path.exists(name): |
||||
|
return load(codecs.open(name, 'rb')) |
||||
|
|
||||
|
def set(self, key, value): |
||||
|
name = self._fn(key) |
||||
|
lock = FileLock(name) |
||||
|
with lock: |
||||
|
with codecs.open(lock.path, 'w+b') as fh: |
||||
|
dump(value, fh) |
||||
|
|
||||
|
def delete(self, key): |
||||
|
if not self.forever: |
||||
|
os.remove(self._fn(key)) |
@ -0,0 +1,46 @@ |
|||||
|
from __future__ import division |
||||
|
|
||||
|
from datetime import datetime |
||||
|
|
||||
|
try: |
||||
|
from cPickle import loads, dumps |
||||
|
except ImportError: # Python 3.x |
||||
|
from pickle import loads, dumps |
||||
|
|
||||
|
|
||||
|
def total_seconds(td): |
||||
|
"""Python 2.6 compatability""" |
||||
|
if hasattr(td, 'total_seconds'): |
||||
|
return td.total_seconds() |
||||
|
|
||||
|
ms = td.microseconds |
||||
|
secs = (td.seconds + td.days * 24 * 3600) |
||||
|
return (ms + secs * 10**6) / 10**6 |
||||
|
|
||||
|
|
||||
|
class RedisCache(object): |
||||
|
|
||||
|
def __init__(self, conn): |
||||
|
self.conn = conn |
||||
|
|
||||
|
def get(self, key): |
||||
|
val = self.conn.get(key) |
||||
|
if val: |
||||
|
return loads(val) |
||||
|
return None |
||||
|
|
||||
|
def set(self, key, value, expires=None): |
||||
|
if not expires: |
||||
|
self.conn.set(key, dumps(value)) |
||||
|
else: |
||||
|
expires = expires - datetime.now() |
||||
|
self.conn.setex(key, total_seconds(expires), value) |
||||
|
|
||||
|
def delete(self, key): |
||||
|
self.conn.delete(key) |
||||
|
|
||||
|
def clear(self): |
||||
|
"""Helper for clearing all the keys in a database. Use with |
||||
|
caution!""" |
||||
|
for key in self.conn.keys(): |
||||
|
self.conn.delete(key) |
@ -0,0 +1,12 @@ |
|||||
|
try: |
||||
|
from urllib.parse import urljoin |
||||
|
except ImportError: |
||||
|
from urlparse import urljoin |
||||
|
|
||||
|
|
||||
|
try: |
||||
|
import email.utils |
||||
|
parsedate_tz = email.utils.parsedate_tz |
||||
|
except ImportError: |
||||
|
import email.Utils |
||||
|
parsedate_tz = email.Utils.parsedate_tz |
@ -0,0 +1,247 @@ |
|||||
|
""" |
||||
|
The httplib2 algorithms ported for use with requests. |
||||
|
""" |
||||
|
import re |
||||
|
import calendar |
||||
|
import time |
||||
|
|
||||
|
from cachecontrol.cache import DictCache |
||||
|
from cachecontrol.compat import parsedate_tz |
||||
|
|
||||
|
|
||||
|
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") |
||||
|
|
||||
|
|
||||
|
def parse_uri(uri): |
||||
|
"""Parses a URI using the regex given in Appendix B of RFC 3986. |
||||
|
|
||||
|
(scheme, authority, path, query, fragment) = parse_uri(uri) |
||||
|
""" |
||||
|
groups = URI.match(uri).groups() |
||||
|
return (groups[1], groups[3], groups[4], groups[6], groups[8]) |
||||
|
|
||||
|
|
||||
|
class CacheController(object): |
||||
|
"""An interface to see if request should cached or not. |
||||
|
""" |
||||
|
def __init__(self, cache=None, cache_etags=True): |
||||
|
self.cache = cache or DictCache() |
||||
|
self.cache_etags = cache_etags |
||||
|
|
||||
|
def _urlnorm(self, uri): |
||||
|
"""Normalize the URL to create a safe key for the cache""" |
||||
|
(scheme, authority, path, query, fragment) = parse_uri(uri) |
||||
|
if not scheme or not authority: |
||||
|
raise Exception("Only absolute URIs are allowed. uri = %s" % uri) |
||||
|
authority = authority.lower() |
||||
|
scheme = scheme.lower() |
||||
|
if not path: |
||||
|
path = "/" |
||||
|
|
||||
|
# Could do syntax based normalization of the URI before |
||||
|
# computing the digest. See Section 6.2.2 of Std 66. |
||||
|
request_uri = query and "?".join([path, query]) or path |
||||
|
scheme = scheme.lower() |
||||
|
defrag_uri = scheme + "://" + authority + request_uri |
||||
|
|
||||
|
return defrag_uri |
||||
|
|
||||
|
def cache_url(self, uri): |
||||
|
return self._urlnorm(uri) |
||||
|
|
||||
|
def parse_cache_control(self, headers): |
||||
|
""" |
||||
|
Parse the cache control headers returning a dictionary with values |
||||
|
for the different directives. |
||||
|
""" |
||||
|
retval = {} |
||||
|
|
||||
|
cc_header = 'cache-control' |
||||
|
if 'Cache-Control' in headers: |
||||
|
cc_header = 'Cache-Control' |
||||
|
|
||||
|
if cc_header in headers: |
||||
|
parts = headers[cc_header].split(',') |
||||
|
parts_with_args = [ |
||||
|
tuple([x.strip().lower() for x in part.split("=", 1)]) |
||||
|
for part in parts if -1 != part.find("=")] |
||||
|
parts_wo_args = [(name.strip().lower(), 1) |
||||
|
for name in parts if -1 == name.find("=")] |
||||
|
retval = dict(parts_with_args + parts_wo_args) |
||||
|
return retval |
||||
|
|
||||
|
def cached_request(self, url, headers): |
||||
|
cache_url = self.cache_url(url) |
||||
|
cc = self.parse_cache_control(headers) |
||||
|
|
||||
|
# non-caching states |
||||
|
no_cache = True if 'no-cache' in cc else False |
||||
|
if 'max-age' in cc and cc['max-age'] == 0: |
||||
|
no_cache = True |
||||
|
|
||||
|
# see if it is in the cache anyways |
||||
|
in_cache = self.cache.get(cache_url) |
||||
|
if no_cache or not in_cache: |
||||
|
return False |
||||
|
|
||||
|
# It is in the cache, so lets see if it is going to be |
||||
|
# fresh enough |
||||
|
resp = self.cache.get(cache_url) |
||||
|
|
||||
|
# Check our Vary header to make sure our request headers match |
||||
|
# up. We don't delete it from the though, we just don't return |
||||
|
# our cached value. |
||||
|
# |
||||
|
# NOTE: Because httplib2 stores raw content, it denotes |
||||
|
# headers that were sent in the original response by |
||||
|
# adding -varied-$name. We don't have to do that b/c we |
||||
|
# are storing the object which has a reference to the |
||||
|
# original request. If that changes, then I'd propose |
||||
|
# using the varied headers in the cache key to avoid the |
||||
|
# situation all together. |
||||
|
if 'vary' in resp.headers: |
||||
|
varied_headers = resp.headers['vary'].replace(' ', '').split(',') |
||||
|
original_headers = resp.request.headers |
||||
|
for header in varied_headers: |
||||
|
# If our headers don't match for the headers listed in |
||||
|
# the vary header, then don't use the cached response |
||||
|
if headers.get(header, None) != original_headers.get(header): |
||||
|
return False |
||||
|
|
||||
|
now = time.time() |
||||
|
date = calendar.timegm( |
||||
|
parsedate_tz(resp.headers['date']) |
||||
|
) |
||||
|
current_age = max(0, now - date) |
||||
|
|
||||
|
# TODO: There is an assumption that the result will be a |
||||
|
# requests response object. This may not be best since we |
||||
|
# could probably avoid instantiating or constructing the |
||||
|
# response until we know we need it. |
||||
|
resp_cc = self.parse_cache_control(resp.headers) |
||||
|
|
||||
|
# determine freshness |
||||
|
freshness_lifetime = 0 |
||||
|
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit(): |
||||
|
freshness_lifetime = int(resp_cc['max-age']) |
||||
|
elif 'expires' in resp.headers: |
||||
|
expires = parsedate_tz(resp.headers['expires']) |
||||
|
if expires is not None: |
||||
|
expire_time = calendar.timegm(expires) - date |
||||
|
freshness_lifetime = max(0, expire_time) |
||||
|
|
||||
|
# determine if we are setting freshness limit in the req |
||||
|
if 'max-age' in cc: |
||||
|
try: |
||||
|
freshness_lifetime = int(cc['max-age']) |
||||
|
except ValueError: |
||||
|
freshness_lifetime = 0 |
||||
|
|
||||
|
if 'min-fresh' in cc: |
||||
|
try: |
||||
|
min_fresh = int(cc['min-fresh']) |
||||
|
except ValueError: |
||||
|
min_fresh = 0 |
||||
|
# adjust our current age by our min fresh |
||||
|
current_age += min_fresh |
||||
|
|
||||
|
# see how fresh we actually are |
||||
|
fresh = (freshness_lifetime > current_age) |
||||
|
|
||||
|
if fresh: |
||||
|
# make sure we set the from_cache to true |
||||
|
resp.from_cache = True |
||||
|
return resp |
||||
|
|
||||
|
# we're not fresh. If we don't have an Etag, clear it out |
||||
|
if 'etag' not in resp.headers: |
||||
|
self.cache.delete(cache_url) |
||||
|
|
||||
|
if 'etag' in resp.headers: |
||||
|
headers['If-None-Match'] = resp.headers['ETag'] |
||||
|
|
||||
|
if 'last-modified' in resp.headers: |
||||
|
headers['If-Modified-Since'] = resp.headers['Last-Modified'] |
||||
|
|
||||
|
# return the original handler |
||||
|
return False |
||||
|
|
||||
|
def add_headers(self, url): |
||||
|
resp = self.cache.get(url) |
||||
|
if resp and 'etag' in resp.headers: |
||||
|
return {'If-None-Match': resp.headers['etag']} |
||||
|
return {} |
||||
|
|
||||
|
def cache_response(self, request, resp): |
||||
|
""" |
||||
|
Algorithm for caching requests. |
||||
|
|
||||
|
This assumes a requests Response object. |
||||
|
""" |
||||
|
# From httplib2: Don't cache 206's since we aren't going to |
||||
|
# handle byte range requests |
||||
|
if resp.status_code not in [200, 203]: |
||||
|
return |
||||
|
|
||||
|
cc_req = self.parse_cache_control(request.headers) |
||||
|
cc = self.parse_cache_control(resp.headers) |
||||
|
|
||||
|
cache_url = self.cache_url(request.url) |
||||
|
|
||||
|
# Delete it from the cache if we happen to have it stored there |
||||
|
no_store = cc.get('no-store') or cc_req.get('no-store') |
||||
|
if no_store and self.cache.get(cache_url): |
||||
|
self.cache.delete(cache_url) |
||||
|
|
||||
|
# If we've been given an etag, then keep the response |
||||
|
if self.cache_etags and 'etag' in resp.headers: |
||||
|
self.cache.set(cache_url, resp) |
||||
|
|
||||
|
# Add to the cache if the response headers demand it. If there |
||||
|
# is no date header then we can't do anything about expiring |
||||
|
# the cache. |
||||
|
elif 'date' in resp.headers: |
||||
|
# cache when there is a max-age > 0 |
||||
|
if cc and cc.get('max-age'): |
||||
|
if int(cc['max-age']) > 0: |
||||
|
self.cache.set(cache_url, resp) |
||||
|
|
||||
|
# If the request can expire, it means we should cache it |
||||
|
# in the meantime. |
||||
|
elif 'expires' in resp.headers: |
||||
|
if resp.headers['expires']: |
||||
|
self.cache.set(cache_url, resp) |
||||
|
|
||||
|
def update_cached_response(self, request, response): |
||||
|
"""On a 304 we will get a new set of headers that we want to |
||||
|
update our cached value with, assuming we have one. |
||||
|
|
||||
|
This should only ever be called when we've sent an ETag and |
||||
|
gotten a 304 as the response. |
||||
|
""" |
||||
|
cache_url = self.cache_url(request.url) |
||||
|
|
||||
|
resp = self.cache.get(cache_url) |
||||
|
|
||||
|
if not resp: |
||||
|
# we didn't have a cached response |
||||
|
return response |
||||
|
|
||||
|
# did so lets update our headers |
||||
|
resp.headers.update(resp.headers) |
||||
|
|
||||
|
# we want a 200 b/c we have content via the cache |
||||
|
request.status_code = 200 |
||||
|
|
||||
|
# update the request as it has the if-none-match header + any |
||||
|
# other headers that the server might have updated (ie Date, |
||||
|
# Cache-Control, Expires, etc.) |
||||
|
resp.request = request |
||||
|
|
||||
|
# update our cache |
||||
|
self.cache.set(cache_url, resp) |
||||
|
|
||||
|
# Let everyone know this was from the cache. |
||||
|
resp.from_cache = True |
||||
|
|
||||
|
return resp |
@ -0,0 +1,56 @@ |
|||||
|
import requests |
||||
|
|
||||
|
from requests import models |
||||
|
from requests.packages.urllib3.response import HTTPResponse |
||||
|
|
||||
|
__attrs__ = [ |
||||
|
'_content', |
||||
|
'status_code', |
||||
|
'headers', |
||||
|
'url', |
||||
|
'history', |
||||
|
'encoding', |
||||
|
'reason', |
||||
|
'cookies', |
||||
|
'elapsed', |
||||
|
] |
||||
|
|
||||
|
|
||||
|
def response_getstate(self): |
||||
|
# consume everything |
||||
|
if not self._content_consumed: |
||||
|
self.content |
||||
|
|
||||
|
state = dict( |
||||
|
(attr, getattr(self, attr, None)) |
||||
|
for attr in __attrs__ |
||||
|
) |
||||
|
|
||||
|
# deal with our raw content b/c we need it for our cookie jar |
||||
|
state['raw_original_response'] = self.raw._original_response |
||||
|
return state |
||||
|
|
||||
|
|
||||
|
def response_setstate(self, state): |
||||
|
for name, value in state.items(): |
||||
|
if name != 'raw_original_response': |
||||
|
setattr(self, name, value) |
||||
|
|
||||
|
setattr(self, 'raw', HTTPResponse()) |
||||
|
self.raw._original_response = state['raw_original_response'] |
||||
|
|
||||
|
|
||||
|
def make_responses_pickleable(): |
||||
|
try: |
||||
|
version_parts = [int(part) for part in requests.__version__.split('.')] |
||||
|
|
||||
|
# must be >= 2.2.x |
||||
|
if not version_parts[0] >= 2 or not version_parts[1] >= 2: |
||||
|
models.Response.__getstate__ = response_getstate |
||||
|
models.Response.__setstate__ = response_setstate |
||||
|
except: |
||||
|
raise |
||||
|
pass |
||||
|
|
||||
|
|
||||
|
make_responses_pickleable() |
@ -0,0 +1,10 @@ |
|||||
|
from cachecontrol.adapter import CacheControlAdapter |
||||
|
from cachecontrol.cache import DictCache |
||||
|
|
||||
|
|
||||
|
def CacheControl(sess, cache=None, cache_etags=True): |
||||
|
cache = cache or DictCache() |
||||
|
adapter = CacheControlAdapter(cache, cache_etags=cache_etags) |
||||
|
sess.mount('http://', adapter) |
||||
|
|
||||
|
return sess |
Binary file not shown.
@ -0,0 +1,317 @@ |
|||||
|
""" |
||||
|
lockfile.py - Platform-independent advisory file locks. |
||||
|
|
||||
|
Requires Python 2.5 unless you apply 2.4.diff |
||||
|
Locking is done on a per-thread basis instead of a per-process basis. |
||||
|
|
||||
|
Usage: |
||||
|
|
||||
|
>>> lock = LockFile('somefile') |
||||
|
>>> try: |
||||
|
... lock.acquire() |
||||
|
... except AlreadyLocked: |
||||
|
... print 'somefile', 'is locked already.' |
||||
|
... except LockFailed: |
||||
|
... print 'somefile', 'can\\'t be locked.' |
||||
|
... else: |
||||
|
... print 'got lock' |
||||
|
got lock |
||||
|
>>> print lock.is_locked() |
||||
|
True |
||||
|
>>> lock.release() |
||||
|
|
||||
|
>>> lock = LockFile('somefile') |
||||
|
>>> print lock.is_locked() |
||||
|
False |
||||
|
>>> with lock: |
||||
|
... print lock.is_locked() |
||||
|
True |
||||
|
>>> print lock.is_locked() |
||||
|
False |
||||
|
|
||||
|
>>> lock = LockFile('somefile') |
||||
|
>>> # It is okay to lock twice from the same thread... |
||||
|
>>> with lock: |
||||
|
... lock.acquire() |
||||
|
... |
||||
|
>>> # Though no counter is kept, so you can't unlock multiple times... |
||||
|
>>> print lock.is_locked() |
||||
|
False |
||||
|
|
||||
|
Exceptions: |
||||
|
|
||||
|
Error - base class for other exceptions |
||||
|
LockError - base class for all locking exceptions |
||||
|
AlreadyLocked - Another thread or process already holds the lock |
||||
|
LockFailed - Lock failed for some other reason |
||||
|
UnlockError - base class for all unlocking exceptions |
||||
|
AlreadyUnlocked - File was not locked. |
||||
|
NotMyLock - File was locked but not by the current thread/process |
||||
|
""" |
||||
|
|
||||
|
from __future__ import absolute_import |
||||
|
|
||||
|
import sys |
||||
|
import socket |
||||
|
import os |
||||
|
import threading |
||||
|
import time |
||||
|
import urllib |
||||
|
import warnings |
||||
|
import functools |
||||
|
|
||||
|
# Work with PEP8 and non-PEP8 versions of threading module. |
||||
|
if not hasattr(threading, "current_thread"): |
||||
|
threading.current_thread = threading.currentThread |
||||
|
if not hasattr(threading.Thread, "get_name"): |
||||
|
threading.Thread.get_name = threading.Thread.getName |
||||
|
|
||||
|
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', |
||||
|
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', |
||||
|
'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile', |
||||
|
'LockBase', 'locked'] |
||||
|
|
||||
|
class Error(Exception): |
||||
|
""" |
||||
|
Base class for other exceptions. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise Error |
||||
|
... except Exception: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class LockError(Error): |
||||
|
""" |
||||
|
Base class for error arising from attempts to acquire the lock. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise LockError |
||||
|
... except Error: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class LockTimeout(LockError): |
||||
|
"""Raised when lock creation fails within a user-defined period of time. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise LockTimeout |
||||
|
... except LockError: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class AlreadyLocked(LockError): |
||||
|
"""Some other thread/process is locking the file. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise AlreadyLocked |
||||
|
... except LockError: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class LockFailed(LockError): |
||||
|
"""Lock file creation failed for some other reason. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise LockFailed |
||||
|
... except LockError: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class UnlockError(Error): |
||||
|
""" |
||||
|
Base class for errors arising from attempts to release the lock. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise UnlockError |
||||
|
... except Error: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class NotLocked(UnlockError): |
||||
|
"""Raised when an attempt is made to unlock an unlocked file. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise NotLocked |
||||
|
... except UnlockError: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class NotMyLock(UnlockError): |
||||
|
"""Raised when an attempt is made to unlock a file someone else locked. |
||||
|
|
||||
|
>>> try: |
||||
|
... raise NotMyLock |
||||
|
... except UnlockError: |
||||
|
... pass |
||||
|
""" |
||||
|
pass |
||||
|
|
||||
|
class LockBase: |
||||
|
"""Base class for platform-specific lock classes.""" |
||||
|
def __init__(self, path, threaded=True, timeout=None): |
||||
|
""" |
||||
|
>>> lock = LockBase('somefile') |
||||
|
>>> lock = LockBase('somefile', threaded=False) |
||||
|
""" |
||||
|
self.path = path |
||||
|
self.lock_file = os.path.abspath(path) + ".lock" |
||||
|
self.hostname = socket.gethostname() |
||||
|
self.pid = os.getpid() |
||||
|
if threaded: |
||||
|
t = threading.current_thread() |
||||
|
# Thread objects in Python 2.4 and earlier do not have ident |
||||
|
# attrs. Worm around that. |
||||
|
ident = getattr(t, "ident", hash(t)) |
||||
|
self.tname = "-%x" % (ident & 0xffffffff) |
||||
|
else: |
||||
|
self.tname = "" |
||||
|
dirname = os.path.dirname(self.lock_file) |
||||
|
self.unique_name = os.path.join(dirname, |
||||
|
"%s%s.%s" % (self.hostname, |
||||
|
self.tname, |
||||
|
self.pid)) |
||||
|
self.timeout = timeout |
||||
|
|
||||
|
def acquire(self, timeout=None): |
||||
|
""" |
||||
|
Acquire the lock. |
||||
|
|
||||
|
* If timeout is omitted (or None), wait forever trying to lock the |
||||
|
file. |
||||
|
|
||||
|
* If timeout > 0, try to acquire the lock for that many seconds. If |
||||
|
the lock period expires and the file is still locked, raise |
||||
|
LockTimeout. |
||||
|
|
||||
|
* If timeout <= 0, raise AlreadyLocked immediately if the file is |
||||
|
already locked. |
||||
|
""" |
||||
|
raise NotImplemented("implement in subclass") |
||||
|
|
||||
|
def release(self): |
||||
|
""" |
||||
|
Release the lock. |
||||
|
|
||||
|
If the file is not locked, raise NotLocked. |
||||
|
""" |
||||
|
raise NotImplemented("implement in subclass") |
||||
|
|
||||
|
def is_locked(self): |
||||
|
""" |
||||
|
Tell whether or not the file is locked. |
||||
|
""" |
||||
|
raise NotImplemented("implement in subclass") |
||||
|
|
||||
|
def i_am_locking(self): |
||||
|
""" |
||||
|
Return True if this object is locking the file. |
||||
|
""" |
||||
|
raise NotImplemented("implement in subclass") |
||||
|
|
||||
|
def break_lock(self): |
||||
|
""" |
||||
|
Remove a lock. Useful if a locking thread failed to unlock. |
||||
|
""" |
||||
|
raise NotImplemented("implement in subclass") |
||||
|
|
||||
|
def __enter__(self): |
||||
|
""" |
||||
|
Context manager support. |
||||
|
""" |
||||
|
self.acquire() |
||||
|
return self |
||||
|
|
||||
|
def __exit__(self, *_exc): |
||||
|
""" |
||||
|
Context manager support. |
||||
|
""" |
||||
|
self.release() |
||||
|
|
||||
|
def __repr__(self): |
||||
|
return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, |
||||
|
self.path) |
||||
|
|
||||
|
def _fl_helper(cls, mod, *args, **kwds): |
||||
|
warnings.warn("Import from %s module instead of lockfile package" % mod, |
||||
|
DeprecationWarning, stacklevel=2) |
||||
|
# This is a bit funky, but it's only for awhile. The way the unit tests |
||||
|
# are constructed this function winds up as an unbound method, so it |
||||
|
# actually takes three args, not two. We want to toss out self. |
||||
|
if not isinstance(args[0], str): |
||||
|
# We are testing, avoid the first arg |
||||
|
args = args[1:] |
||||
|
if len(args) == 1 and not kwds: |
||||
|
kwds["threaded"] = True |
||||
|
return cls(*args, **kwds) |
||||
|
|
||||
|
def LinkFileLock(*args, **kwds): |
||||
|
"""Factory function provided for backwards compatibility. |
||||
|
|
||||
|
Do not use in new code. Instead, import LinkLockFile from the |
||||
|
lockfile.linklockfile module. |
||||
|
""" |
||||
|
from . import linklockfile |
||||
|
return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", |
||||
|
*args, **kwds) |
||||
|
|
||||
|
def MkdirFileLock(*args, **kwds): |
||||
|
"""Factory function provided for backwards compatibility. |
||||
|
|
||||
|
Do not use in new code. Instead, import MkdirLockFile from the |
||||
|
lockfile.mkdirlockfile module. |
||||
|
""" |
||||
|
from . import mkdirlockfile |
||||
|
return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", |
||||
|
*args, **kwds) |
||||
|
|
||||
|
def SQLiteFileLock(*args, **kwds): |
||||
|
"""Factory function provided for backwards compatibility. |
||||
|
|
||||
|
Do not use in new code. Instead, import SQLiteLockFile from the |
||||
|
lockfile.mkdirlockfile module. |
||||
|
""" |
||||
|
from . import sqlitelockfile |
||||
|
return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", |
||||
|
*args, **kwds) |
||||
|
|
||||
|
def locked(path, timeout=None): |
||||
|
"""Decorator which enables locks for decorated function. |
||||
|
|
||||
|
Arguments: |
||||
|
- path: path for lockfile. |
||||
|
- timeout (optional): Timeout for acquiring lock. |
||||
|
|
||||
|
Usage: |
||||
|
@locked('/var/run/myname', timeout=0) |
||||
|
def myname(...): |
||||
|
... |
||||
|
""" |
||||
|
def decor(func): |
||||
|
@functools.wraps(func) |
||||
|
def wrapper(*args, **kwargs): |
||||
|
lock = FileLock(path, timeout=timeout) |
||||
|
lock.acquire() |
||||
|
try: |
||||
|
return func(*args, **kwargs) |
||||
|
finally: |
||||
|
lock.release() |
||||
|
return wrapper |
||||
|
return decor |
||||
|
|
||||
|
if hasattr(os, "link"): |
||||
|
from . import linklockfile as _llf |
||||
|
LockFile = _llf.LinkLockFile |
||||
|
else: |
||||
|
from . import mkdirlockfile as _mlf |
||||
|
LockFile = _mlf.MkdirLockFile |
||||
|
|
||||
|
FileLock = LockFile |
||||
|
|
@ -0,0 +1,73 @@ |
|||||
|
from __future__ import absolute_import |
||||
|
|
||||
|
import time |
||||
|
import os |
||||
|
|
||||
|
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, |
||||
|
AlreadyLocked) |
||||
|
|
||||
|
class LinkLockFile(LockBase): |
||||
|
"""Lock access to a file using atomic property of link(2). |
||||
|
|
||||
|
>>> lock = LinkLockFile('somefile') |
||||
|
>>> lock = LinkLockFile('somefile', threaded=False) |
||||
|
""" |
||||
|
|
||||
|
def acquire(self, timeout=None): |
||||
|
try: |
||||
|
open(self.unique_name, "wb").close() |
||||
|
except IOError: |
||||
|
raise LockFailed("failed to create %s" % self.unique_name) |
||||
|
|
||||
|
timeout = timeout is not None and timeout or self.timeout |
||||
|
end_time = time.time() |
||||
|
if timeout is not None and timeout > 0: |
||||
|
end_time += timeout |
||||
|
|
||||
|
while True: |
||||
|
# Try and create a hard link to it. |
||||
|
try: |
||||
|
os.link(self.unique_name, self.lock_file) |
||||
|
except OSError: |
||||
|
# Link creation failed. Maybe we've double-locked? |
||||
|
nlinks = os.stat(self.unique_name).st_nlink |
||||
|
if nlinks == 2: |
||||
|
# The original link plus the one I created == 2. We're |
||||
|
# good to go. |
||||
|
return |
||||
|
else: |
||||
|
# Otherwise the lock creation failed. |
||||
|
if timeout is not None and time.time() > end_time: |
||||
|
os.unlink(self.unique_name) |
||||
|
if timeout > 0: |
||||
|
raise LockTimeout("Timeout waiting to acquire" |
||||
|
" lock for %s" % |
||||
|
self.path) |
||||
|
else: |
||||
|
raise AlreadyLocked("%s is already locked" % |
||||
|
self.path) |
||||
|
time.sleep(timeout is not None and timeout/10 or 0.1) |
||||
|
else: |
||||
|
# Link creation succeeded. We're good to go. |
||||
|
return |
||||
|
|
||||
|
def release(self): |
||||
|
if not self.is_locked(): |
||||
|
raise NotLocked("%s is not locked" % self.path) |
||||
|
elif not os.path.exists(self.unique_name): |
||||
|
raise NotMyLock("%s is locked, but not by me" % self.path) |
||||
|
os.unlink(self.unique_name) |
||||
|
os.unlink(self.lock_file) |
||||
|
|
||||
|
def is_locked(self): |
||||
|
return os.path.exists(self.lock_file) |
||||
|
|
||||
|
def i_am_locking(self): |
||||
|
return (self.is_locked() and |
||||
|
os.path.exists(self.unique_name) and |
||||
|
os.stat(self.unique_name).st_nlink == 2) |
||||
|
|
||||
|
def break_lock(self): |
||||
|
if os.path.exists(self.lock_file): |
||||
|
os.unlink(self.lock_file) |
||||
|
|
@ -0,0 +1,83 @@ |
|||||
|
from __future__ import absolute_import, division |
||||
|
|
||||
|
import time |
||||
|
import os |
||||
|
import sys |
||||
|
import errno |
||||
|
|
||||
|
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, |
||||
|
AlreadyLocked) |
||||
|
|
||||
|
class MkdirLockFile(LockBase): |
||||
|
"""Lock file by creating a directory.""" |
||||
|
def __init__(self, path, threaded=True, timeout=None): |
||||
|
""" |
||||
|
>>> lock = MkdirLockFile('somefile') |
||||
|
>>> lock = MkdirLockFile('somefile', threaded=False) |
||||
|
""" |
||||
|
LockBase.__init__(self, path, threaded, timeout) |
||||
|
# Lock file itself is a directory. Place the unique file name into |
||||
|
# it. |
||||
|
self.unique_name = os.path.join(self.lock_file, |
||||
|
"%s.%s%s" % (self.hostname, |
||||
|
self.tname, |
||||
|
self.pid)) |
||||
|
|
||||
|
def acquire(self, timeout=None): |
||||
|
timeout = timeout is not None and timeout or self.timeout |
||||
|
end_time = time.time() |
||||
|
if timeout is not None and timeout > 0: |
||||
|
end_time += timeout |
||||
|
|
||||
|
if timeout is None: |
||||
|
wait = 0.1 |
||||
|
else: |
||||
|
wait = max(0, timeout / 10) |
||||
|
|
||||
|
while True: |
||||
|
try: |
||||
|
os.mkdir(self.lock_file) |
||||
|
except OSError: |
||||
|
err = sys.exc_info()[1] |
||||
|
if err.errno == errno.EEXIST: |
||||
|
# Already locked. |
||||
|
if os.path.exists(self.unique_name): |
||||
|
# Already locked by me. |
||||
|
return |
||||
|
if timeout is not None and time.time() > end_time: |
||||
|
if timeout > 0: |
||||
|
raise LockTimeout("Timeout waiting to acquire" |
||||
|
" lock for %s" % |
||||
|
self.path) |
||||
|
else: |
||||
|
# Someone else has the lock. |
||||
|
raise AlreadyLocked("%s is already locked" % |
||||
|
self.path) |
||||
|
time.sleep(wait) |
||||
|
else: |
||||
|
# Couldn't create the lock for some other reason |
||||
|
raise LockFailed("failed to create %s" % self.lock_file) |
||||
|
else: |
||||
|
open(self.unique_name, "wb").close() |
||||
|
return |
||||
|
|
||||
|
def release(self): |
||||
|
if not self.is_locked(): |
||||
|
raise NotLocked("%s is not locked" % self.path) |
||||
|
elif not os.path.exists(self.unique_name): |
||||
|
raise NotMyLock("%s is locked, but not by me" % self.path) |
||||
|
os.unlink(self.unique_name) |
||||
|
os.rmdir(self.lock_file) |
||||
|
|
||||
|
def is_locked(self): |
||||
|
return os.path.exists(self.lock_file) |
||||
|
|
||||
|
def i_am_locking(self): |
||||
|
return (self.is_locked() and |
||||
|
os.path.exists(self.unique_name)) |
||||
|
|
||||
|
def break_lock(self): |
||||
|
if os.path.exists(self.lock_file): |
||||
|
for name in os.listdir(self.lock_file): |
||||
|
os.unlink(os.path.join(self.lock_file, name)) |
||||
|
os.rmdir(self.lock_file) |
@ -0,0 +1,193 @@ |
|||||
|
# -*- coding: utf-8 -*- |
||||
|
|
||||
|
# pidlockfile.py |
||||
|
# |
||||
|
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au> |
||||
|
# |
||||
|
# This is free software: you may copy, modify, and/or distribute this work |
||||
|
# under the terms of the Python Software Foundation License, version 2 or |
||||
|
# later as published by the Python Software Foundation. |
||||
|
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. |
||||
|
|
||||
|
""" Lockfile behaviour implemented via Unix PID files. |
||||
|
""" |
||||
|
|
||||
|
from __future__ import absolute_import |
||||
|
|
||||
|
import os |
||||
|
import sys |
||||
|
import errno |
||||
|
import time |
||||
|
|
||||
|
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock, |
||||
|
LockTimeout) |
||||
|
|
||||
|
|
||||
|
class PIDLockFile(LockBase): |
||||
|
""" Lockfile implemented as a Unix PID file. |
||||
|
|
||||
|
The lock file is a normal file named by the attribute `path`. |
||||
|
A lock's PID file contains a single line of text, containing |
||||
|
the process ID (PID) of the process that acquired the lock. |
||||
|
|
||||
|
>>> lock = PIDLockFile('somefile') |
||||
|
>>> lock = PIDLockFile('somefile') |
||||
|
""" |
||||
|
|
||||
|
def __init__(self, path, threaded=False, timeout=None): |
||||
|
# pid lockfiles don't support threaded operation, so always force |
||||
|
# False as the threaded arg. |
||||
|
LockBase.__init__(self, path, False, timeout) |
||||
|
dirname = os.path.dirname(self.lock_file) |
||||
|
basename = os.path.split(self.path)[-1] |
||||
|
self.unique_name = self.path |
||||
|
|
||||
|
def read_pid(self): |
||||
|
""" Get the PID from the lock file. |
||||
|
""" |
||||
|
return read_pid_from_pidfile(self.path) |
||||
|
|
||||
|
def is_locked(self): |
||||
|
""" Test if the lock is currently held. |
||||
|
|
||||
|
The lock is held if the PID file for this lock exists. |
||||
|
|
||||
|
""" |
||||
|
return os.path.exists(self.path) |
||||
|
|
||||
|
def i_am_locking(self): |
||||
|
""" Test if the lock is held by the current process. |
||||
|
|
||||
|
Returns ``True`` if the current process ID matches the |
||||
|
number stored in the PID file. |
||||
|
""" |
||||
|
return self.is_locked() and os.getpid() == self.read_pid() |
||||
|
|
||||
|
def acquire(self, timeout=None): |
||||
|
""" Acquire the lock. |
||||
|
|
||||
|
Creates the PID file for this lock, or raises an error if |
||||
|
the lock could not be acquired. |
||||
|
""" |
||||
|
|
||||
|
timeout = timeout is not None and timeout or self.timeout |
||||
|
end_time = time.time() |
||||
|
if timeout is not None and timeout > 0: |
||||
|
end_time += timeout |
||||
|
|
||||
|
while True: |
||||
|
try: |
||||
|
write_pid_to_pidfile(self.path) |
||||
|
except OSError as exc: |
||||
|
if exc.errno == errno.EEXIST: |
||||
|
# The lock creation failed. Maybe sleep a bit. |
||||
|
if timeout is not None and time.time() > end_time: |
||||
|
if timeout > 0: |
||||
|
raise LockTimeout("Timeout waiting to acquire" |
||||
|
" lock for %s" % |
||||
|
self.path) |
||||
|
else: |
||||
|
raise AlreadyLocked("%s is already locked" % |
||||
|
self.path) |
||||
|
time.sleep(timeout is not None and timeout/10 or 0.1) |
||||
|
else: |
||||
|
raise LockFailed("failed to create %s" % self.path) |
||||
|
else: |
||||
|
return |
||||
|
|
||||
|
def release(self): |
||||
|
""" Release the lock. |
||||
|
|
||||
|
Removes the PID file to release the lock, or raises an |
||||
|
error if the current process does not hold the lock. |
||||
|
|
||||
|
""" |
||||
|
if not self.is_locked(): |
||||
|
raise NotLocked("%s is not locked" % self.path) |
||||
|
if not self.i_am_locking(): |
||||
|
raise NotMyLock("%s is locked, but not by me" % self.path) |
||||
|
remove_existing_pidfile(self.path) |
||||
|
|
||||
|
def break_lock(self): |
||||
|
""" Break an existing lock. |
||||
|
|
||||
|
Removes the PID file if it already exists, otherwise does |
||||
|
nothing. |
||||
|
|
||||
|
""" |
||||
|
remove_existing_pidfile(self.path) |
||||
|
|
||||
|
def read_pid_from_pidfile(pidfile_path): |
||||
|
""" Read the PID recorded in the named PID file. |
||||
|
|
||||
|
Read and return the numeric PID recorded as text in the named |
||||
|
PID file. If the PID file cannot be read, or if the content is |
||||
|
not a valid PID, return ``None``. |
||||
|
|
||||
|
""" |
||||
|
pid = None |
||||
|
try: |
||||
|
pidfile = open(pidfile_path, 'r') |
||||
|
except IOError: |
||||
|
pass |
||||
|
else: |
||||
|
# According to the FHS 2.3 section on PID files in /var/run: |
||||
|
# |
||||
|
# The file must consist of the process identifier in |
||||
|
# ASCII-encoded decimal, followed by a newline character. |
||||
|
# |
||||
|
# Programs that read PID files should be somewhat flexible |
||||
|
# in what they accept; i.e., they should ignore extra |
||||
|
# whitespace, leading zeroes, absence of the trailing |
||||
|
# newline, or additional lines in the PID file. |
||||
|
|
||||
|
line = pidfile.readline().strip() |
||||
|
try: |
||||
|
pid = int(line) |
||||
|
except ValueError: |
||||
|
pass |
||||
|
pidfile.close() |
||||
|
|
||||
|
return pid |
||||
|
|
||||
|
|
||||
|
def write_pid_to_pidfile(pidfile_path): |
||||
|
""" Write the PID in the named PID file. |
||||
|
|
||||
|
Get the numeric process ID (“PID”) of the current process |
||||
|
and write it to the named file as a line of text. |
||||
|
|
||||
|
""" |
||||
|
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) |
||||
|
open_mode = 0o644 |
||||
|
pidfile_fd = os.open(pidfile_path, open_flags, open_mode) |
||||
|
pidfile = os.fdopen(pidfile_fd, 'w') |
||||
|
|
||||
|
# According to the FHS 2.3 section on PID files in /var/run: |
||||
|
# |
||||
|
# The file must consist of the process identifier in |
||||
|
# ASCII-encoded decimal, followed by a newline character. For |
||||
|
# example, if crond was process number 25, /var/run/crond.pid |
||||
|
# would contain three characters: two, five, and newline. |
||||
|
|
||||
|
pid = os.getpid() |
||||
|
line = "%(pid)d\n" % vars() |
||||
|
pidfile.write(line) |
||||
|
pidfile.close() |
||||
|
|
||||
|
|
||||
|
def remove_existing_pidfile(pidfile_path): |
||||
|
""" Remove the named PID file if it exists. |
||||
|
|
||||
|
Removing a PID file that doesn't already exist puts us in the |
||||
|
desired state, so we ignore the condition if the file does not |
||||
|
exist. |
||||
|
|
||||
|
""" |
||||
|
try: |
||||
|
os.remove(pidfile_path) |
||||
|
except OSError as exc: |
||||
|
if exc.errno == errno.ENOENT: |
||||
|
pass |
||||
|
else: |
||||
|
raise |
@ -0,0 +1,155 @@ |
|||||
|
from __future__ import absolute_import, division |
||||
|
|
||||
|
import time |
||||
|
import os |
||||
|
|
||||
|
try: |
||||
|
unicode |
||||
|
except NameError: |
||||
|
unicode = str |
||||
|
|
||||
|
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked |
||||
|
|
||||
|
class SQLiteLockFile(LockBase): |
||||
|
"Demonstrate SQL-based locking." |
||||
|
|
||||
|
testdb = None |
||||
|
|
||||
|
def __init__(self, path, threaded=True, timeout=None): |
||||
|
""" |
||||
|
>>> lock = SQLiteLockFile('somefile') |
||||
|
>>> lock = SQLiteLockFile('somefile', threaded=False) |
||||
|
""" |
||||
|
LockBase.__init__(self, path, threaded, timeout) |
||||
|
self.lock_file = unicode(self.lock_file) |
||||
|
self.unique_name = unicode(self.unique_name) |
||||
|
|
||||
|
if SQLiteLockFile.testdb is None: |
||||
|
import tempfile |
||||
|
_fd, testdb = tempfile.mkstemp() |
||||
|
os.close(_fd) |
||||
|
os.unlink(testdb) |
||||
|
del _fd, tempfile |
||||
|
SQLiteLockFile.testdb = testdb |
||||
|
|
||||
|
import sqlite3 |
||||
|
self.connection = sqlite3.connect(SQLiteLockFile.testdb) |
||||
|
|
||||
|
c = self.connection.cursor() |
||||
|
try: |
||||
|
c.execute("create table locks" |
||||
|
"(" |
||||
|
" lock_file varchar(32)," |
||||
|
" unique_name varchar(32)" |
||||
|
")") |
||||
|
except sqlite3.OperationalError: |
||||
|
pass |
||||
|
else: |
||||
|
self.connection.commit() |
||||
|
import atexit |
||||
|
atexit.register(os.unlink, SQLiteLockFile.testdb) |
||||
|
|
||||
|
def acquire(self, timeout=None): |
||||
|
timeout = timeout is not None and timeout or self.timeout |
||||
|
end_time = time.time() |
||||
|
if timeout is not None and timeout > 0: |
||||
|
end_time += timeout |
||||
|
|
||||
|
if timeout is None: |
||||
|
wait = 0.1 |
||||
|
elif timeout <= 0: |
||||
|
wait = 0 |
||||
|
else: |
||||
|
wait = timeout / 10 |
||||
|
|
||||
|
cursor = self.connection.cursor() |
||||
|
|
||||
|
while True: |
||||
|
if not self.is_locked(): |
||||
|
# Not locked. Try to lock it. |
||||
|
cursor.execute("insert into locks" |
||||
|
" (lock_file, unique_name)" |
||||
|
" values" |
||||
|
" (?, ?)", |
||||
|
(self.lock_file, self.unique_name)) |
||||
|
self.connection.commit() |
||||
|
|
||||
|
# Check to see if we are the only lock holder. |
||||
|
cursor.execute("select * from locks" |
||||
|
" where unique_name = ?", |
||||
|
(self.unique_name,)) |
||||
|
rows = cursor.fetchall() |
||||
|
if len(rows) > 1: |
||||
|
# Nope. Someone else got there. Remove our lock. |
||||
|
cursor.execute("delete from locks" |
||||
|
" where unique_name = ?", |
||||
|
(self.unique_name,)) |
||||
|
self.connection.commit() |
||||
|
else: |
||||
|
# Yup. We're done, so go home. |
||||
|
return |
||||
|
else: |
||||
|
# Check to see if we are the only lock holder. |
||||
|
cursor.execute("select * from locks" |
||||
|
" where unique_name = ?", |
||||
|
(self.unique_name,)) |
||||
|
rows = cursor.fetchall() |
||||
|
if len(rows) == 1: |
||||
|
# We're the locker, so go home. |
||||
|
return |
||||
|
|
||||
|
# Maybe we should wait a bit longer. |
||||
|
if timeout is not None and time.time() > end_time: |
||||
|
if timeout > 0: |
||||
|
# No more waiting. |
||||
|
raise LockTimeout("Timeout waiting to acquire" |
||||
|
" lock for %s" % |
||||
|
self.path) |
||||
|
else: |
||||
|
# Someone else has the lock and we are impatient.. |
||||
|
raise AlreadyLocked("%s is already locked" % self.path) |
||||
|
|
||||
|
# Well, okay. We'll give it a bit longer. |
||||
|
time.sleep(wait) |
||||
|
|
||||
|
def release(self): |
||||
|
if not self.is_locked(): |
||||
|
raise NotLocked("%s is not locked" % self.path) |
||||
|
if not self.i_am_locking(): |
||||
|
raise NotMyLock("%s is locked, but not by me (by %s)" % |
||||
|
(self.unique_name, self._who_is_locking())) |
||||
|
cursor = self.connection.cursor() |
||||
|
cursor.execute("delete from locks" |
||||
|
" where unique_name = ?", |
||||
|
(self.unique_name,)) |
||||
|
self.connection.commit() |
||||
|
|
||||
|
def _who_is_locking(self): |
||||
|
cursor = self.connection.cursor() |
||||
|
cursor.execute("select unique_name from locks" |
||||
|
" where lock_file = ?", |
||||
|
(self.lock_file,)) |
||||
|
return cursor.fetchone()[0] |
||||
|
|
||||
|
def is_locked(self): |
||||
|
cursor = self.connection.cursor() |
||||
|
cursor.execute("select * from locks" |
||||
|
" where lock_file = ?", |
||||
|
(self.lock_file,)) |
||||
|
rows = cursor.fetchall() |
||||
|
return not not rows |
||||
|
|
||||
|
def i_am_locking(self): |
||||
|
cursor = self.connection.cursor() |
||||
|
cursor.execute("select * from locks" |
||||
|
" where lock_file = ?" |
||||
|
" and unique_name = ?", |
||||
|
(self.lock_file, self.unique_name)) |
||||
|
return not not cursor.fetchall() |
||||
|
|
||||
|
def break_lock(self): |
||||
|
cursor = self.connection.cursor() |
||||
|
cursor.execute("delete from locks" |
||||
|
" where lock_file = ?", |
||||
|
(self.lock_file,)) |
||||
|
self.connection.commit() |
@ -0,0 +1,69 @@ |
|||||
|
from __future__ import absolute_import |
||||
|
|
||||
|
import time |
||||
|
import os |
||||
|
|
||||
|
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, |
||||
|
AlreadyLocked) |
||||
|
|
||||
|
class SymlinkLockFile(LockBase): |
||||
|
"""Lock access to a file using symlink(2).""" |
||||
|
|
||||
|
def __init__(self, path, threaded=True, timeout=None): |
||||
|
# super(SymlinkLockFile).__init(...) |
||||
|
LockBase.__init__(self, path, threaded, timeout) |
||||
|
# split it back! |
||||
|
self.unique_name = os.path.split(self.unique_name)[1] |
||||
|
|
||||
|
def acquire(self, timeout=None): |
||||
|
# Hopefully unnecessary for symlink. |
||||
|
#try: |
||||
|
# open(self.unique_name, "wb").close() |
||||
|
#except IOError: |
||||
|
# raise LockFailed("failed to create %s" % self.unique_name) |
||||
|
timeout = timeout is not None and timeout or self.timeout |
||||
|
end_time = time.time() |
||||
|
if timeout is not None and timeout > 0: |
||||
|
end_time += timeout |
||||
|
|
||||
|
while True: |
||||
|
# Try and create a symbolic link to it. |
||||
|
try: |
||||
|
os.symlink(self.unique_name, self.lock_file) |
||||
|
except OSError: |
||||
|
# Link creation failed. Maybe we've double-locked? |
||||
|
if self.i_am_locking(): |
||||
|
# Linked to out unique name. Proceed. |
||||
|
return |
||||
|
else: |
||||
|
# Otherwise the lock creation failed. |
||||
|
if timeout is not None and time.time() > end_time: |
||||
|
if timeout > 0: |
||||
|
raise LockTimeout("Timeout waiting to acquire" |
||||
|
" lock for %s" % |
||||
|
self.path) |
||||
|
else: |
||||
|
raise AlreadyLocked("%s is already locked" % |
||||
|
self.path) |
||||
|
time.sleep(timeout/10 if timeout is not None else 0.1) |
||||
|
else: |
||||
|
# Link creation succeeded. We're good to go. |
||||
|
return |
||||
|
|
||||
|
def release(self): |
||||
|
if not self.is_locked(): |
||||
|
raise NotLocked("%s is not locked" % self.path) |
||||
|
elif not self.i_am_locking(): |
||||
|
raise NotMyLock("%s is locked, but not by me" % self.path) |
||||
|
os.unlink(self.lock_file) |
||||
|
|
||||
|
def is_locked(self): |
||||
|
return os.path.islink(self.lock_file) |
||||
|
|
||||
|
def i_am_locking(self): |
||||
|
return os.path.islink(self.lock_file) and \ |
||||
|
os.readlink(self.lock_file) == self.unique_name |
||||
|
|
||||
|
def break_lock(self): |
||||
|
if os.path.islink(self.lock_file): # exists && link |
||||
|
os.unlink(self.lock_file) |
@ -1,29 +0,0 @@ |
|||||
INDEXER_TVDB = 'Tvdb' |
|
||||
INDEXER_TVRAGE = 'TVRage' |
|
||||
|
|
||||
INDEXER_API_KEY = {} |
|
||||
INDEXER_API_KEY[INDEXER_TVDB] = '9DAF49C96CBF8DAC' |
|
||||
INDEXER_API_KEY[INDEXER_TVRAGE] = 'Uhewg1Rr0o62fvZvUIZt' |
|
||||
|
|
||||
INDEXER_BASEURL = {} |
|
||||
INDEXER_BASEURL[INDEXER_TVDB] = 'http://thetvdb.com/api/' + INDEXER_API_KEY[INDEXER_TVDB] |
|
||||
INDEXER_BASEURL[INDEXER_TVRAGE] = 'http://tvrage.com/feeds/' + INDEXER_API_KEY[INDEXER_TVRAGE] |
|
||||
|
|
||||
INDEXER_API_PARMS = {} |
|
||||
INDEXER_API_PARMS[INDEXER_TVDB] = {'apikey': INDEXER_API_KEY[INDEXER_TVDB], |
|
||||
'language': 'en', |
|
||||
'useZip': True} |
|
||||
|
|
||||
INDEXER_API_PARMS[INDEXER_TVRAGE] = {'apikey': INDEXER_API_KEY[INDEXER_TVRAGE], |
|
||||
'language': 'en'} |
|
||||
|
|
||||
|
|
||||
INDEXER_CONFIG = {} |
|
||||
INDEXER_CONFIG['valid_languages'] = [ |
|
||||
"da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr", |
|
||||
"ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"] |
|
||||
|
|
||||
INDEXER_CONFIG['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27, |
|
||||
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, |
|
||||
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, |
|
||||
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30} |
|
Loading…
Reference in new issue