From 0ded4f701a71c341be3f411706da307b0150acf2 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 14 Jul 2012 16:39:09 +0200 Subject: [PATCH 01/24] Prevent tvshows from imdbapi. fix #577 --- couchpotato/core/providers/automation/bluray/main.py | 2 +- couchpotato/core/providers/movie/imdbapi/main.py | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/couchpotato/core/providers/automation/bluray/main.py b/couchpotato/core/providers/automation/bluray/main.py index cd7c0c9..c6d82eb 100644 --- a/couchpotato/core/providers/automation/bluray/main.py +++ b/couchpotato/core/providers/automation/bluray/main.py @@ -26,7 +26,7 @@ class Bluray(Automation, RSS): rss_data = self.getCache(cache_key, self.rss_url) data = XMLTree.fromstring(rss_data) - if data: + if data is not None: rss_movies = self.getElements(data, 'channel/item') for movie in rss_movies: diff --git a/couchpotato/core/providers/movie/imdbapi/main.py b/couchpotato/core/providers/movie/imdbapi/main.py index 535e6b2..c81a9ab 100644 --- a/couchpotato/core/providers/movie/imdbapi/main.py +++ b/couchpotato/core/providers/movie/imdbapi/main.py @@ -13,15 +13,14 @@ log = CPLog(__name__) class IMDBAPI(MovieProvider): urls = { - 'search': 'http://www.imdbapi.com/?%s', - 'info': 'http://www.imdbapi.com/?i=%s', + 'search': 'http://www.imdbapi.com/?tomatoes=true&%s', + 'info': 'http://www.imdbapi.com/?tomatoes=true&i=%s', } http_time_between_calls = 0 def __init__(self): addEvent('movie.search', self.search) - addEvent('movie.searchimdb', self.search) addEvent('movie.info', self.getInfo) def search(self, q, limit = 12): @@ -72,7 +71,7 @@ class IMDBAPI(MovieProvider): log.info('No proper json to decode') return movie_data - if movie.get('Response') == 'Parse Error': + if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': return movie_data tmp_movie = movie.copy() @@ -91,7 +90,7 @@ class IMDBAPI(MovieProvider): }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), - #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', 0))), + 'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), From 6577a59ce209578ccc12ac0285ccc9baf5be7c79 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 14 Jul 2012 16:44:21 +0200 Subject: [PATCH 02/24] Remove ipv6 flag in tornado. fix #578 --- libs/tornado/netutil.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/libs/tornado/netutil.py b/libs/tornado/netutil.py index ba0b27d..1f3f2e5 100755 --- a/libs/tornado/netutil.py +++ b/libs/tornado/netutil.py @@ -258,12 +258,6 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128): if address == "": address = None flags = socket.AI_PASSIVE - if hasattr(socket, "AI_ADDRCONFIG"): - # AI_ADDRCONFIG ensures that we only try to bind on ipv6 - # if the system is configured for it, but the flag doesn't - # exist on some platforms (specifically WinXP, although - # newer versions of windows have it) - flags |= socket.AI_ADDRCONFIG for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags)): af, socktype, proto, canonname, sockaddr = res From 7c687d4d6b323d19c825d5dd98f5965113e22832 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 14 Jul 2012 17:31:29 +0200 Subject: [PATCH 03/24] Fedora wrong pidfile arg --- init/fedora | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/fedora b/init/fedora index 488027c..0adefd0 100644 --- a/init/fedora +++ b/init/fedora @@ -28,7 +28,7 @@ pidfile=${CP_PIDFILE-/var/run/couchpotato/couchpotato.pid} ## pidpath=`dirname ${pidfile}` -options=" --daemon --pidfile=${pidfile} --datadir=${datadir}" +options=" --daemon --pid_file=${pidfile} --datadir=${datadir}" # create PID directory if not exist and ensure the couchpotato user can write to it if [ ! -d $pidpath ]; then From b3e709c6c556c0165e4ebc20b975e11e179c7ce7 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 14 Jul 2012 23:23:45 +0200 Subject: [PATCH 04/24] Properly encode before md5. fix #580 --- couchpotato/core/_base/_core/main.py | 2 +- couchpotato/core/auth.py | 5 +++-- couchpotato/core/helpers/request.py | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/couchpotato/core/_base/_core/main.py b/couchpotato/core/_base/_core/main.py index 58bd1d1..0e178de 100644 --- a/couchpotato/core/_base/_core/main.py +++ b/couchpotato/core/_base/_core/main.py @@ -53,7 +53,7 @@ class Core(Plugin): def md5Password(self, value): - return md5(value) if value else '' + return md5(value.encode(Env.get('encoding'))) if value else '' def checkApikey(self, value): return value if value and len(value) > 3 else uuid4().hex diff --git a/couchpotato/core/auth.py b/couchpotato/core/auth.py index 032bdf2..0111b9a 100644 --- a/couchpotato/core/auth.py +++ b/couchpotato/core/auth.py @@ -17,8 +17,9 @@ def requires_auth(f): @wraps(f) def decorated(*args, **kwargs): auth = getattr(request, 'authorization') - if Env.setting('username') and Env.setting('password') and (not auth or not check_auth(auth.username, md5(auth.password))): - return authenticate() + if Env.setting('username') and Env.setting('password'): + if (not auth or not check_auth(auth.username.decode('latin1'), md5(auth.password.decode('latin1').encode(Env.get('encoding'))))): + return authenticate() return f(*args, **kwargs) diff --git a/couchpotato/core/helpers/request.py b/couchpotato/core/helpers/request.py index 07aa18e..3c6558b 100644 --- a/couchpotato/core/helpers/request.py +++ b/couchpotato/core/helpers/request.py @@ -2,8 +2,8 @@ from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import natcmp from flask.globals import current_app from flask.helpers import json, make_response -from libs.werkzeug.urls import url_decode from urllib import unquote +from werkzeug.urls import url_decode import flask import re @@ -57,7 +57,7 @@ def dictToList(params): def getParam(attr, default = None): try: - return toUnicode(unquote(getattr(flask.request, 'args').get(attr, default))) + return getParams().get(attr, default) except: return default From e726a314057deca604b88e23987b88ad4879eec5 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 14 Jul 2012 23:54:19 +0200 Subject: [PATCH 05/24] Support Plex Media Server only. fix #583 --- couchpotato/core/notifications/plex/main.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/couchpotato/core/notifications/plex/main.py b/couchpotato/core/notifications/plex/main.py index 23fd39d..fed9f5e 100644 --- a/couchpotato/core/notifications/plex/main.py +++ b/couchpotato/core/notifications/plex/main.py @@ -1,8 +1,10 @@ from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.request import jsonified from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification +from urllib2 import URLError from xml.dom import minidom import traceback @@ -38,7 +40,7 @@ class Plex(Notification): x = self.urlopen(url) except: - log.error('Plex library update failed for %s: %s', (host, traceback.format_exc())) + log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1))) return False return True @@ -62,9 +64,27 @@ class Plex(Notification): try: self.urlopen(url, headers = headers, show_error = False) + except URLError: + log.error("Couldn't sent command to Plex, probably just running Media Server") + return False except: log.error("Couldn't sent command to Plex: %s", traceback.format_exc()) return False log.info('Plex notification to %s successful.', host) return True + + def test(self): + + test_type = self.testNotifyName() + + log.info('Sending test to %s', test_type) + + success = self.notify( + message = self.test_message, + data = {}, + listener = 'test' + ) + success2 = self.addToLibrary() + + return jsonified({'success': success or success2}) From 8e720c9da67d50bfa4dbf576713e3fe43b933b59 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 14 Jul 2012 23:58:40 +0200 Subject: [PATCH 06/24] Allow no quality on release. fix #565 --- couchpotato/core/plugins/movie/static/movie.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/couchpotato/core/plugins/movie/static/movie.js b/couchpotato/core/plugins/movie/static/movie.js index 1ce0af7..92a66ef 100644 --- a/couchpotato/core/plugins/movie/static/movie.js +++ b/couchpotato/core/plugins/movie/static/movie.js @@ -370,7 +370,7 @@ var ReleaseAction = new Class({ }).adopt( new Element('span.name', {'text': self.get(release, 'name'), 'title': self.get(release, 'name')}), new Element('span.status', {'text': status.identifier, 'class': 'release_status '+status.identifier}), - new Element('span.quality', {'text': quality.get('label')}), + new Element('span.quality', {'text': quality.label || 'n/a'}), new Element('span.size', {'text': (self.get(release, 'size'))}), new Element('span.age', {'text': self.get(release, 'age')}), new Element('span.score', {'text': self.get(release, 'score')}), From a8c82a40ae425fa196977167e876546f7130e757 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 15 Jul 2012 10:50:23 +0200 Subject: [PATCH 07/24] Implement filelocking in logging rotation handler. fix #587 --- couchpotato/runner.py | 4 +- libs/cloghandler/__init__.py | 0 libs/cloghandler/cloghandler.py | 326 ++++++++++++++++++++++++++++++++++++++++ libs/cloghandler/portalocker.py | 158 +++++++++++++++++++ 4 files changed, 486 insertions(+), 2 deletions(-) create mode 100644 libs/cloghandler/__init__.py create mode 100644 libs/cloghandler/cloghandler.py create mode 100644 libs/cloghandler/portalocker.py diff --git a/couchpotato/runner.py b/couchpotato/runner.py index 31a8ce3..47c6659 100644 --- a/couchpotato/runner.py +++ b/couchpotato/runner.py @@ -1,9 +1,9 @@ from argparse import ArgumentParser +from cloghandler.cloghandler import ConcurrentRotatingFileHandler from couchpotato import web from couchpotato.api import api, NonBlockHandler from couchpotato.core.event import fireEventAsync, fireEvent from couchpotato.core.helpers.variable import getDataDir, tryInt -from logging import handlers from tornado import autoreload from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop @@ -156,7 +156,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En logger.addHandler(hdlr) # To file - hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) + hdlr2 = ConcurrentRotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) diff --git a/libs/cloghandler/__init__.py b/libs/cloghandler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libs/cloghandler/cloghandler.py b/libs/cloghandler/cloghandler.py new file mode 100644 index 0000000..50debe3 --- /dev/null +++ b/libs/cloghandler/cloghandler.py @@ -0,0 +1,326 @@ +# Copyright 2008 Lowell Alleman +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" cloghandler.py: A smart replacement for the standard RotatingFileHandler + +ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in +replacement for the python standard log handler 'RotateFileHandler', the primary +difference being that this handler will continue to write to the same file if +the file cannot be rotated for some reason, whereas the RotatingFileHandler will +strictly adhere to the maximum file size. Unfortunately, if you are using the +RotatingFileHandler on Windows, you will find that once an attempted rotation +fails, all subsequent log messages are dropped. The other major advantage of +this module is that multiple processes can safely write to a single log file. + +To put it another way: This module's top priority is preserving your log +records, whereas the standard library attempts to limit disk usage, which can +potentially drop log messages. If you are trying to determine which module to +use, there are number of considerations: What is most important: strict disk +space usage or preservation of log messages? What OSes are you supporting? Can +you afford to have processes blocked by file locks? + +Concurrent access is handled by using file locks, which should ensure that log +messages are not dropped or clobbered. This means that a file lock is acquired +and released for every log message that is written to disk. (On Windows, you may +also run into a temporary situation where the log file must be opened and closed +for each log message.) This can have potentially performance implications. In my +testing, performance was more than adequate, but if you need a high-volume or +low-latency solution, I suggest you look elsewhere. + +This module currently only support the 'nt' and 'posix' platforms due to the +usage of the portalocker module. I do not have access to any other platforms +for testing, patches are welcome. + +See the README file for an example usage of this module. + +""" + + +__version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $" +__author__ = "Lowell Alleman" +__all__ = [ + "ConcurrentRotatingFileHandler", +] + + +import os +import sys +from random import randint +from logging import Handler +from logging.handlers import BaseRotatingHandler + +try: + import codecs +except ImportError: + codecs = None + + + +# Question/TODO: Should we have a fallback mode if we can't load portalocker / +# we should still be better off than with the standard RotattingFileHandler +# class, right? We do some rename checking... that should prevent some file +# clobbering that the builtin class allows. + +# sibling module than handles all the ugly platform-specific details of file locking +from portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException + + +# A client can set this to true to automatically convert relative paths to +# absolute paths (which will also hide the absolute path warnings) +FORCE_ABSOLUTE_PATH = False + + +class ConcurrentRotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a set of files, which switches from one file to the + next when the current file reaches a certain size. Multiple processes can + write to the log file concurrently, but this may mean that the file will + exceed the given size. + """ + def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, + encoding=None, debug=True, supress_abs_warn=False): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + + On Windows, it is not possible to rename a file that is currently opened + by another process. This means that it is not possible to rotate the + log files if multiple processes is using the same log file. In this + case, the current log file will continue to grow until the rotation can + be completed successfully. In order for rotation to be possible, all of + the other processes need to close the file first. A mechanism, called + "degraded" mode, has been created for this scenario. In degraded mode, + the log file is closed after each log message is written. So once all + processes have entered degraded mode, the next rotate log attempt should + be successful and then normal logging can be resumed. + + This log handler assumes that all concurrent processes logging to a + single file will are using only this class, and that the exact same + parameters are provided to each instance of this class. If, for + example, two different processes are using this class, but with + different values for 'maxBytes' or 'backupCount', then odd behavior is + expected. The same is true if this class is used by one application, but + the RotatingFileHandler is used by another. + + NOTE: You should always provide 'filename' as an absolute path, since + this class will need to re-open the file during rotation. If your + application call os.chdir() then subsequent log files could be created + in the wrong directory. + """ + # The question of absolute paths: I'm not sure what the 'right thing' is + # to do here. RotatingFileHander simply ignores this possibility. I was + # going call os.path.abspath(), but that potentially limits uses. For + # example, on Linux (any posix system?) you can rename a directory of a + # running app, and the app wouldn't notice as long as it only opens new + # files using relative paths. But since that's not a "normal" thing to + # do, and having an app call os.chdir() is a much more likely scenario + # that should be supported. For the moment, we are just going to warn + # the user if they provide a relative path and do some other voodoo + # logic that you'll just have to review for yourself. + + # if the given filename contains no path, we make an absolute path + if not os.path.isabs(filename): + if FORCE_ABSOLUTE_PATH or \ + not os.path.split(filename)[0]: + filename = os.path.abspath(filename) + elif not supress_abs_warn: + from warnings import warn + warn("The given 'filename' should be an absolute path. If your " + "application calls os.chdir(), your logs may get messed up. " + "Use 'supress_abs_warn=True' to hide this message.") + try: + BaseRotatingHandler.__init__(self, filename, mode, encoding) + except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?) + BaseRotatingHandler.__init__(self, filename, mode) + self.encoding = encoding + + self._rotateFailed = False + self.maxBytes = maxBytes + self.backupCount = backupCount + # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) + if filename.endswith(".log"): + lock_file = filename[:-4] + else: + lock_file = filename + self.stream_lock = open(lock_file + ".lock", "w") + + # For debug mode, swap out the "_degrade()" method with a more a verbose one. + if debug: + self._degrade = self._degrade_debug + + def _openFile(self, mode): + if self.encoding: + self.stream = codecs.open(self.baseFilename, mode, self.encoding) + else: + self.stream = open(self.baseFilename, mode) + + def acquire(self): + """ Acquire thread and file locks. Also re-opening log file when running + in 'degraded' mode. """ + # handle thread lock + Handler.acquire(self) + lock(self.stream_lock, LOCK_EX) + if self.stream.closed: + self._openFile(self.mode) + + def release(self): + """ Release file and thread locks. Flush stream and take care of closing + stream in 'degraded' mode. """ + try: + self.stream.flush() + if self._rotateFailed: + self.stream.close() + finally: + try: + unlock(self.stream_lock) + finally: + # release thread lock + Handler.release(self) + + def close(self): + """ + Closes the stream. + """ + if not self.stream.closed: + self.stream.flush() + self.stream.close() + Handler.close(self) + + def flush(self): + """ flush(): Do nothing. + + Since a flush is issued in release(), we don't do it here. To do a flush + here, it would be necessary to re-lock everything, and it is just easier + and cleaner to do it all in release(), rather than requiring two lock + ops per handle() call. + + Doing a flush() here would also introduces a window of opportunity for + another process to write to the log file in between calling + stream.write() and stream.flush(), which seems like a bad thing. """ + pass + + def _degrade(self, degrade, msg, *args): + """ Set degrade mode or not. Ignore msg. """ + self._rotateFailed = degrade + del msg, args # avoid pychecker warnings + + def _degrade_debug(self, degrade, msg, *args): + """ A more colorful version of _degade(). (This is enabled by passing + "debug=True" at initialization). + """ + if degrade: + if not self._rotateFailed: + sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" % + (os.getpid(), msg % args)) + self._rotateFailed = True + else: + if self._rotateFailed: + sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" % + (os.getpid(), msg % args)) + self._rotateFailed = False + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + if self.backupCount <= 0: + # Don't keep any backups, just overwrite the existing backup file + # Locking doesn't much matter here; since we are overwriting it anyway + self.stream.close() + self._openFile("w") + return + self.stream.close() + try: + # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable + tmpname = None + while not tmpname or os.path.exists(tmpname): + tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0,99999999)) + try: + # Do a rename test to determine if we can successfully rename the log file + os.rename(self.baseFilename, tmpname) + except (IOError, OSError): + exc_value = sys.exc_info()[1] + self._degrade(True, "rename failed. File in use? " + "exception=%s", exc_value) + return + + # Q: Is there some way to protect this code from a KeboardInterupt? + # This isn't necessarily a data loss issue, but it certainly would + # break the rotation process during my stress testing. + + # There is currently no mechanism in place to handle the situation + # where one of these log files cannot be renamed. (Example, user + # opens "logfile.3" in notepad) + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + #print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(tmpname, dfn) + #print "%s -> %s" % (self.baseFilename, dfn) + self._degrade(False, "Rotation completed") + finally: + self._openFile(self.mode) + + def shouldRollover(self, record): + """ + Determine if rollover should occur. + + For those that are keeping track. This differs from the standard + library's RotatingLogHandler class. Because there is no promise to keep + the file size under maxBytes we ignore the length of the current record. + """ + del record # avoid pychecker warnings + if self._shouldRollover(): + # if some other process already did the rollover we might + # checked log.1, so we reopen the stream and check again on + # the right log file + self.stream.close() + self._openFile(self.mode) + return self._shouldRollover() + return False + + def _shouldRollover(self): + if self.maxBytes > 0: # are we rolling over? + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() >= self.maxBytes: + return True + else: + self._degrade(False, "Rotation done or not needed at this time") + return False + + +# Publish this class to the "logging.handlers" module so that it can be use +# from a logging config file via logging.config.fileConfig(). +import logging.handlers +logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler diff --git a/libs/cloghandler/portalocker.py b/libs/cloghandler/portalocker.py new file mode 100644 index 0000000..67a01b6 --- /dev/null +++ b/libs/cloghandler/portalocker.py @@ -0,0 +1,158 @@ +# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. +# Requires python 1.5.2 or better. +"""Cross-platform (posix/nt) API for flock-style file locking. + +Synopsis: + + import portalocker + file = open("somefile", "r+") + portalocker.lock(file, portalocker.LOCK_EX) + file.seek(12) + file.write("foo") + file.close() + +If you know what you're doing, you may choose to + + portalocker.unlock(file) + +before closing the file, but why? + +Methods: + + lock( file, flags ) + unlock( file ) + +Constants: + + LOCK_EX + LOCK_SH + LOCK_NB + +Exceptions: + + LockException + +Notes: + +For the 'nt' platform, this module requires the Python Extensions for Windows. +Be aware that this may not work as expected on Windows 95/98/ME. + +History: + +I learned the win32 technique for locking files from sample code +provided by John Nielsen in the documentation +that accompanies the win32 modules. + +Author: Jonathan Feinberg , + Lowell Alleman +Version: $Id: portalocker.py 5488 2008-05-21 20:49:38Z lowell $ + + +http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203 +""" + + +__all__ = [ + "lock", + "unlock", + "LOCK_EX", + "LOCK_SH", + "LOCK_NB", + "LockException", +] + +import os +from types import IntType + +class LockException(Exception): + # Error codes: + LOCK_FAILED = 1 + +if os.name == 'nt': + import win32con + import win32file + import pywintypes + LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK + LOCK_SH = 0 # the default + LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY + # is there any reason not to reuse the following structure? + __overlapped = pywintypes.OVERLAPPED() +elif os.name == 'posix': + import fcntl + LOCK_EX = fcntl.LOCK_EX + LOCK_SH = fcntl.LOCK_SH + LOCK_NB = fcntl.LOCK_NB +else: + raise RuntimeError, "PortaLocker only defined for nt and posix platforms" + + + +def _getfd(file): + """ Get a file-descriptor from a file object or file-descriptor. """ + if hasattr(file, "fileno"): + return file.fileno() + elif type(file) == IntType: + return file + else: + raise TypeError("File object or file descriptor required, but %s " + "was provided." % type(file)) + + +if os.name == 'nt': + def lock(file, flags): + hfile = win32file._get_osfhandle(_getfd(file)) + try: + win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) + except pywintypes.error, exc_value: + # error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.') + if exc_value[0] == 33: + raise LockException(LockException.LOCK_FAILED, exc_value[2]) + else: + # Q: Are there exceptions/codes we should be dealing with here? + raise + + def unlock(file): + hfile = win32file._get_osfhandle(_getfd(file)) + try: + win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) + except pywintypes.error, exc_value: + if exc_value[0] == 158: + # error: (158, 'UnlockFileEx', 'The segment is already unlocked.') + # To match the 'posix' implementation, silently ignore this error + pass + else: + # Q: Are there exceptions/codes we should be dealing with here? + raise + +elif os.name == 'posix': + def lock(file, flags): + try: + fcntl.flock(_getfd(file), flags) + except IOError, exc_value: + # IOError: [Errno 11] Resource temporarily unavailable + if exc_value[0] == 11: + raise LockException(LockException.LOCK_FAILED, exc_value[1]) + else: + raise + + def unlock(file): + fcntl.flock(_getfd(file), fcntl.LOCK_UN) + + + +if __name__ == '__main__': + from time import time, strftime, localtime + import sys + import portalocker + + log = open('log.txt', "a+") + portalocker.lock(log, portalocker.LOCK_EX) + + timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time())) + log.write( timestamp ) + + print "Wrote lines. Hit enter to release lock." + dummy = sys.stdin.readline() + + log.close() + From 31cd0f05ba8e80dc79c8531b498db0bd904c359d Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 15 Jul 2012 14:34:41 +0200 Subject: [PATCH 08/24] Revert "Implement filelocking in logging rotation handler. fix #587" This reverts commit a8c82a40ae425fa196977167e876546f7130e757. --- couchpotato/runner.py | 4 +- libs/cloghandler/__init__.py | 0 libs/cloghandler/cloghandler.py | 326 ---------------------------------------- libs/cloghandler/portalocker.py | 158 ------------------- 4 files changed, 2 insertions(+), 486 deletions(-) delete mode 100644 libs/cloghandler/__init__.py delete mode 100644 libs/cloghandler/cloghandler.py delete mode 100644 libs/cloghandler/portalocker.py diff --git a/couchpotato/runner.py b/couchpotato/runner.py index 47c6659..31a8ce3 100644 --- a/couchpotato/runner.py +++ b/couchpotato/runner.py @@ -1,9 +1,9 @@ from argparse import ArgumentParser -from cloghandler.cloghandler import ConcurrentRotatingFileHandler from couchpotato import web from couchpotato.api import api, NonBlockHandler from couchpotato.core.event import fireEventAsync, fireEvent from couchpotato.core.helpers.variable import getDataDir, tryInt +from logging import handlers from tornado import autoreload from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop @@ -156,7 +156,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En logger.addHandler(hdlr) # To file - hdlr2 = ConcurrentRotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) + hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) diff --git a/libs/cloghandler/__init__.py b/libs/cloghandler/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/libs/cloghandler/cloghandler.py b/libs/cloghandler/cloghandler.py deleted file mode 100644 index 50debe3..0000000 --- a/libs/cloghandler/cloghandler.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2008 Lowell Alleman -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may not -# use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" cloghandler.py: A smart replacement for the standard RotatingFileHandler - -ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in -replacement for the python standard log handler 'RotateFileHandler', the primary -difference being that this handler will continue to write to the same file if -the file cannot be rotated for some reason, whereas the RotatingFileHandler will -strictly adhere to the maximum file size. Unfortunately, if you are using the -RotatingFileHandler on Windows, you will find that once an attempted rotation -fails, all subsequent log messages are dropped. The other major advantage of -this module is that multiple processes can safely write to a single log file. - -To put it another way: This module's top priority is preserving your log -records, whereas the standard library attempts to limit disk usage, which can -potentially drop log messages. If you are trying to determine which module to -use, there are number of considerations: What is most important: strict disk -space usage or preservation of log messages? What OSes are you supporting? Can -you afford to have processes blocked by file locks? - -Concurrent access is handled by using file locks, which should ensure that log -messages are not dropped or clobbered. This means that a file lock is acquired -and released for every log message that is written to disk. (On Windows, you may -also run into a temporary situation where the log file must be opened and closed -for each log message.) This can have potentially performance implications. In my -testing, performance was more than adequate, but if you need a high-volume or -low-latency solution, I suggest you look elsewhere. - -This module currently only support the 'nt' and 'posix' platforms due to the -usage of the portalocker module. I do not have access to any other platforms -for testing, patches are welcome. - -See the README file for an example usage of this module. - -""" - - -__version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $" -__author__ = "Lowell Alleman" -__all__ = [ - "ConcurrentRotatingFileHandler", -] - - -import os -import sys -from random import randint -from logging import Handler -from logging.handlers import BaseRotatingHandler - -try: - import codecs -except ImportError: - codecs = None - - - -# Question/TODO: Should we have a fallback mode if we can't load portalocker / -# we should still be better off than with the standard RotattingFileHandler -# class, right? We do some rename checking... that should prevent some file -# clobbering that the builtin class allows. - -# sibling module than handles all the ugly platform-specific details of file locking -from portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException - - -# A client can set this to true to automatically convert relative paths to -# absolute paths (which will also hide the absolute path warnings) -FORCE_ABSOLUTE_PATH = False - - -class ConcurrentRotatingFileHandler(BaseRotatingHandler): - """ - Handler for logging to a set of files, which switches from one file to the - next when the current file reaches a certain size. Multiple processes can - write to the log file concurrently, but this may mean that the file will - exceed the given size. - """ - def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, - encoding=None, debug=True, supress_abs_warn=False): - """ - Open the specified file and use it as the stream for logging. - - By default, the file grows indefinitely. You can specify particular - values of maxBytes and backupCount to allow the file to rollover at - a predetermined size. - - Rollover occurs whenever the current log file is nearly maxBytes in - length. If backupCount is >= 1, the system will successively create - new files with the same pathname as the base file, but with extensions - ".1", ".2" etc. appended to it. For example, with a backupCount of 5 - and a base file name of "app.log", you would get "app.log", - "app.log.1", "app.log.2", ... through to "app.log.5". The file being - written to is always "app.log" - when it gets filled up, it is closed - and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. - exist, then they are renamed to "app.log.2", "app.log.3" etc. - respectively. - - If maxBytes is zero, rollover never occurs. - - On Windows, it is not possible to rename a file that is currently opened - by another process. This means that it is not possible to rotate the - log files if multiple processes is using the same log file. In this - case, the current log file will continue to grow until the rotation can - be completed successfully. In order for rotation to be possible, all of - the other processes need to close the file first. A mechanism, called - "degraded" mode, has been created for this scenario. In degraded mode, - the log file is closed after each log message is written. So once all - processes have entered degraded mode, the next rotate log attempt should - be successful and then normal logging can be resumed. - - This log handler assumes that all concurrent processes logging to a - single file will are using only this class, and that the exact same - parameters are provided to each instance of this class. If, for - example, two different processes are using this class, but with - different values for 'maxBytes' or 'backupCount', then odd behavior is - expected. The same is true if this class is used by one application, but - the RotatingFileHandler is used by another. - - NOTE: You should always provide 'filename' as an absolute path, since - this class will need to re-open the file during rotation. If your - application call os.chdir() then subsequent log files could be created - in the wrong directory. - """ - # The question of absolute paths: I'm not sure what the 'right thing' is - # to do here. RotatingFileHander simply ignores this possibility. I was - # going call os.path.abspath(), but that potentially limits uses. For - # example, on Linux (any posix system?) you can rename a directory of a - # running app, and the app wouldn't notice as long as it only opens new - # files using relative paths. But since that's not a "normal" thing to - # do, and having an app call os.chdir() is a much more likely scenario - # that should be supported. For the moment, we are just going to warn - # the user if they provide a relative path and do some other voodoo - # logic that you'll just have to review for yourself. - - # if the given filename contains no path, we make an absolute path - if not os.path.isabs(filename): - if FORCE_ABSOLUTE_PATH or \ - not os.path.split(filename)[0]: - filename = os.path.abspath(filename) - elif not supress_abs_warn: - from warnings import warn - warn("The given 'filename' should be an absolute path. If your " - "application calls os.chdir(), your logs may get messed up. " - "Use 'supress_abs_warn=True' to hide this message.") - try: - BaseRotatingHandler.__init__(self, filename, mode, encoding) - except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?) - BaseRotatingHandler.__init__(self, filename, mode) - self.encoding = encoding - - self._rotateFailed = False - self.maxBytes = maxBytes - self.backupCount = backupCount - # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) - if filename.endswith(".log"): - lock_file = filename[:-4] - else: - lock_file = filename - self.stream_lock = open(lock_file + ".lock", "w") - - # For debug mode, swap out the "_degrade()" method with a more a verbose one. - if debug: - self._degrade = self._degrade_debug - - def _openFile(self, mode): - if self.encoding: - self.stream = codecs.open(self.baseFilename, mode, self.encoding) - else: - self.stream = open(self.baseFilename, mode) - - def acquire(self): - """ Acquire thread and file locks. Also re-opening log file when running - in 'degraded' mode. """ - # handle thread lock - Handler.acquire(self) - lock(self.stream_lock, LOCK_EX) - if self.stream.closed: - self._openFile(self.mode) - - def release(self): - """ Release file and thread locks. Flush stream and take care of closing - stream in 'degraded' mode. """ - try: - self.stream.flush() - if self._rotateFailed: - self.stream.close() - finally: - try: - unlock(self.stream_lock) - finally: - # release thread lock - Handler.release(self) - - def close(self): - """ - Closes the stream. - """ - if not self.stream.closed: - self.stream.flush() - self.stream.close() - Handler.close(self) - - def flush(self): - """ flush(): Do nothing. - - Since a flush is issued in release(), we don't do it here. To do a flush - here, it would be necessary to re-lock everything, and it is just easier - and cleaner to do it all in release(), rather than requiring two lock - ops per handle() call. - - Doing a flush() here would also introduces a window of opportunity for - another process to write to the log file in between calling - stream.write() and stream.flush(), which seems like a bad thing. """ - pass - - def _degrade(self, degrade, msg, *args): - """ Set degrade mode or not. Ignore msg. """ - self._rotateFailed = degrade - del msg, args # avoid pychecker warnings - - def _degrade_debug(self, degrade, msg, *args): - """ A more colorful version of _degade(). (This is enabled by passing - "debug=True" at initialization). - """ - if degrade: - if not self._rotateFailed: - sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" % - (os.getpid(), msg % args)) - self._rotateFailed = True - else: - if self._rotateFailed: - sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" % - (os.getpid(), msg % args)) - self._rotateFailed = False - - def doRollover(self): - """ - Do a rollover, as described in __init__(). - """ - if self.backupCount <= 0: - # Don't keep any backups, just overwrite the existing backup file - # Locking doesn't much matter here; since we are overwriting it anyway - self.stream.close() - self._openFile("w") - return - self.stream.close() - try: - # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable - tmpname = None - while not tmpname or os.path.exists(tmpname): - tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0,99999999)) - try: - # Do a rename test to determine if we can successfully rename the log file - os.rename(self.baseFilename, tmpname) - except (IOError, OSError): - exc_value = sys.exc_info()[1] - self._degrade(True, "rename failed. File in use? " - "exception=%s", exc_value) - return - - # Q: Is there some way to protect this code from a KeboardInterupt? - # This isn't necessarily a data loss issue, but it certainly would - # break the rotation process during my stress testing. - - # There is currently no mechanism in place to handle the situation - # where one of these log files cannot be renamed. (Example, user - # opens "logfile.3" in notepad) - for i in range(self.backupCount - 1, 0, -1): - sfn = "%s.%d" % (self.baseFilename, i) - dfn = "%s.%d" % (self.baseFilename, i + 1) - if os.path.exists(sfn): - #print "%s -> %s" % (sfn, dfn) - if os.path.exists(dfn): - os.remove(dfn) - os.rename(sfn, dfn) - dfn = self.baseFilename + ".1" - if os.path.exists(dfn): - os.remove(dfn) - os.rename(tmpname, dfn) - #print "%s -> %s" % (self.baseFilename, dfn) - self._degrade(False, "Rotation completed") - finally: - self._openFile(self.mode) - - def shouldRollover(self, record): - """ - Determine if rollover should occur. - - For those that are keeping track. This differs from the standard - library's RotatingLogHandler class. Because there is no promise to keep - the file size under maxBytes we ignore the length of the current record. - """ - del record # avoid pychecker warnings - if self._shouldRollover(): - # if some other process already did the rollover we might - # checked log.1, so we reopen the stream and check again on - # the right log file - self.stream.close() - self._openFile(self.mode) - return self._shouldRollover() - return False - - def _shouldRollover(self): - if self.maxBytes > 0: # are we rolling over? - self.stream.seek(0, 2) #due to non-posix-compliant Windows feature - if self.stream.tell() >= self.maxBytes: - return True - else: - self._degrade(False, "Rotation done or not needed at this time") - return False - - -# Publish this class to the "logging.handlers" module so that it can be use -# from a logging config file via logging.config.fileConfig(). -import logging.handlers -logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler diff --git a/libs/cloghandler/portalocker.py b/libs/cloghandler/portalocker.py deleted file mode 100644 index 67a01b6..0000000 --- a/libs/cloghandler/portalocker.py +++ /dev/null @@ -1,158 +0,0 @@ -# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. -# Requires python 1.5.2 or better. -"""Cross-platform (posix/nt) API for flock-style file locking. - -Synopsis: - - import portalocker - file = open("somefile", "r+") - portalocker.lock(file, portalocker.LOCK_EX) - file.seek(12) - file.write("foo") - file.close() - -If you know what you're doing, you may choose to - - portalocker.unlock(file) - -before closing the file, but why? - -Methods: - - lock( file, flags ) - unlock( file ) - -Constants: - - LOCK_EX - LOCK_SH - LOCK_NB - -Exceptions: - - LockException - -Notes: - -For the 'nt' platform, this module requires the Python Extensions for Windows. -Be aware that this may not work as expected on Windows 95/98/ME. - -History: - -I learned the win32 technique for locking files from sample code -provided by John Nielsen in the documentation -that accompanies the win32 modules. - -Author: Jonathan Feinberg , - Lowell Alleman -Version: $Id: portalocker.py 5488 2008-05-21 20:49:38Z lowell $ - - -http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203 -""" - - -__all__ = [ - "lock", - "unlock", - "LOCK_EX", - "LOCK_SH", - "LOCK_NB", - "LockException", -] - -import os -from types import IntType - -class LockException(Exception): - # Error codes: - LOCK_FAILED = 1 - -if os.name == 'nt': - import win32con - import win32file - import pywintypes - LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK - LOCK_SH = 0 # the default - LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY - # is there any reason not to reuse the following structure? - __overlapped = pywintypes.OVERLAPPED() -elif os.name == 'posix': - import fcntl - LOCK_EX = fcntl.LOCK_EX - LOCK_SH = fcntl.LOCK_SH - LOCK_NB = fcntl.LOCK_NB -else: - raise RuntimeError, "PortaLocker only defined for nt and posix platforms" - - - -def _getfd(file): - """ Get a file-descriptor from a file object or file-descriptor. """ - if hasattr(file, "fileno"): - return file.fileno() - elif type(file) == IntType: - return file - else: - raise TypeError("File object or file descriptor required, but %s " - "was provided." % type(file)) - - -if os.name == 'nt': - def lock(file, flags): - hfile = win32file._get_osfhandle(_getfd(file)) - try: - win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) - except pywintypes.error, exc_value: - # error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.') - if exc_value[0] == 33: - raise LockException(LockException.LOCK_FAILED, exc_value[2]) - else: - # Q: Are there exceptions/codes we should be dealing with here? - raise - - def unlock(file): - hfile = win32file._get_osfhandle(_getfd(file)) - try: - win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) - except pywintypes.error, exc_value: - if exc_value[0] == 158: - # error: (158, 'UnlockFileEx', 'The segment is already unlocked.') - # To match the 'posix' implementation, silently ignore this error - pass - else: - # Q: Are there exceptions/codes we should be dealing with here? - raise - -elif os.name == 'posix': - def lock(file, flags): - try: - fcntl.flock(_getfd(file), flags) - except IOError, exc_value: - # IOError: [Errno 11] Resource temporarily unavailable - if exc_value[0] == 11: - raise LockException(LockException.LOCK_FAILED, exc_value[1]) - else: - raise - - def unlock(file): - fcntl.flock(_getfd(file), fcntl.LOCK_UN) - - - -if __name__ == '__main__': - from time import time, strftime, localtime - import sys - import portalocker - - log = open('log.txt', "a+") - portalocker.lock(log, portalocker.LOCK_EX) - - timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time())) - log.write( timestamp ) - - print "Wrote lines. Hit enter to release lock." - dummy = sys.stdin.readline() - - log.close() - From 897a7ea122bbbf9bbbfa2d6207ada503e01bb3c2 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 15 Jul 2012 16:25:59 +0200 Subject: [PATCH 09/24] Unicode errors. fix #600 #597 --- couchpotato/core/plugins/manage/main.py | 3 ++- couchpotato/core/plugins/release/main.py | 3 ++- couchpotato/core/plugins/renamer/main.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/couchpotato/core/plugins/manage/main.py b/couchpotato/core/plugins/manage/main.py index bf739a6..aa46d70 100644 --- a/couchpotato/core/plugins/manage/main.py +++ b/couchpotato/core/plugins/manage/main.py @@ -1,5 +1,6 @@ from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent, fireEventAsync +from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.request import jsonified, getParam from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin @@ -77,7 +78,7 @@ class Manage(Plugin): for release in done_movie.get('releases', []): for release_file in release.get('files', []): # Remove release not available anymore - if not os.path.isfile(release_file['path']): + if not os.path.isfile(ss(release_file['path'])): fireEvent('release.clean', release['id']) break diff --git a/couchpotato/core/plugins/release/main.py b/couchpotato/core/plugins/release/main.py index ba79d9f..0abc065 100644 --- a/couchpotato/core/plugins/release/main.py +++ b/couchpotato/core/plugins/release/main.py @@ -1,6 +1,7 @@ from couchpotato import get_session from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.request import getParam, jsonified from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin @@ -131,7 +132,7 @@ class Release(Plugin): rel = db.query(Relea).filter_by(id = id).first() if rel: for release_file in rel.files: - if not os.path.isfile(release_file.path): + if not os.path.isfile(ss(release_file.path)): db.delete(release_file) db.commit() diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer/main.py index e75ab5c..4c6e6b8 100644 --- a/couchpotato/core/plugins/renamer/main.py +++ b/couchpotato/core/plugins/renamer/main.py @@ -1,7 +1,7 @@ from couchpotato import get_session from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.encoding import toUnicode, ss from couchpotato.core.helpers.request import jsonified from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle from couchpotato.core.logger import CPLog @@ -419,6 +419,7 @@ class Renamer(Plugin): raise def moveFile(self, old, dest): + dest = ss(dest) try: shutil.move(old, dest) From 000c5fa591f0e7e58eb12d2d6c356f99a74eabf5 Mon Sep 17 00:00:00 2001 From: Ruud Date: Mon, 16 Jul 2012 00:44:18 +0200 Subject: [PATCH 10/24] Movies.io userscript and automation --- .../providers/automation/movies_io/__init__.py | 33 +++++++++++ .../core/providers/automation/movies_io/main.py | 69 ++++++++++++++++++++++ .../core/providers/userscript/moviesio/__init__.py | 6 ++ .../core/providers/userscript/moviesio/main.py | 6 ++ 4 files changed, 114 insertions(+) create mode 100644 couchpotato/core/providers/automation/movies_io/__init__.py create mode 100644 couchpotato/core/providers/automation/movies_io/main.py create mode 100644 couchpotato/core/providers/userscript/moviesio/__init__.py create mode 100644 couchpotato/core/providers/userscript/moviesio/main.py diff --git a/couchpotato/core/providers/automation/movies_io/__init__.py b/couchpotato/core/providers/automation/movies_io/__init__.py new file mode 100644 index 0000000..0f998f6 --- /dev/null +++ b/couchpotato/core/providers/automation/movies_io/__init__.py @@ -0,0 +1,33 @@ +from .main import MoviesIO + +def start(): + return MoviesIO() + +config = [{ + 'name': 'moviesio', + 'groups': [ + { + 'tab': 'automation', + 'name': 'moviesio', + 'label': 'Movies.io', + 'description': 'Imports movies from Movies.io RSS watchlists', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/movies_io/main.py b/couchpotato/core/providers/automation/movies_io/main.py new file mode 100644 index 0000000..3a5a2f0 --- /dev/null +++ b/couchpotato/core/providers/automation/movies_io/main.py @@ -0,0 +1,69 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import md5 +from couchpotato.core.logger import CPLog +from couchpotato.core.providers.automation.base import Automation +from couchpotato.environment import Env +from dateutil.parser import parse +from xml.etree.ElementTree import ParseError +import time +import traceback +import xml.etree.ElementTree as XMLTree + +log = CPLog(__name__) + + +class MoviesIO(Automation, RSS): + + interval = 1800 + + def getIMDBids(self): + + if self.isDisabled(): + return + + movies = [] + + enablers = self.conf('automation_urls_use').split(',') + + index = -1 + for rss_url in self.conf('automation_urls').split(','): + + index += 1 + if not enablers[index]: + continue + + prop_name = 'automation.moviesio.last_update.%s' % md5(rss_url) + last_update = float(Env.prop(prop_name, default = 0)) + + last_movie_added = 0 + try: + cache_key = 'imdb.rss.%s' % md5(rss_url) + + rss_data = self.getCache(cache_key, rss_url, headers = {'Referer': ''}) + data = XMLTree.fromstring(rss_data) + rss_movies = self.getElements(data, 'channel/item') + + for movie in rss_movies: + created = int(time.mktime(parse(self.getTextElement(movie, "pubDate")).timetuple())) + + if created > last_movie_added: + last_movie_added = created + if created <= last_update: + continue + + nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, "title"), single = True) + imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True) + + if not imdb: + continue + + movies.append(imdb) + except ParseError: + log.debug('Failed loading Movies.io watchlist, probably empty: %s', (rss_url)) + except: + log.error('Failed loading Movies.io watchlist: %s %s', (rss_url, traceback.format_exc())) + + Env.prop(prop_name, last_movie_added) + + return movies diff --git a/couchpotato/core/providers/userscript/moviesio/__init__.py b/couchpotato/core/providers/userscript/moviesio/__init__.py new file mode 100644 index 0000000..473f847 --- /dev/null +++ b/couchpotato/core/providers/userscript/moviesio/__init__.py @@ -0,0 +1,6 @@ +from .main import MoviesIO + +def start(): + return MoviesIO() + +config = [] diff --git a/couchpotato/core/providers/userscript/moviesio/main.py b/couchpotato/core/providers/userscript/moviesio/main.py new file mode 100644 index 0000000..5dab618 --- /dev/null +++ b/couchpotato/core/providers/userscript/moviesio/main.py @@ -0,0 +1,6 @@ +from couchpotato.core.providers.userscript.base import UserscriptBase + + +class MoviesIO(UserscriptBase): + + includes = ['*://movies.io/m/*'] From b07a937fa7a2adb6ddb5d319c0cb7bc6999fdba0 Mon Sep 17 00:00:00 2001 From: Ruud Date: Mon, 16 Jul 2012 00:44:39 +0200 Subject: [PATCH 11/24] gzip requests --- couchpotato/core/plugins/base.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/couchpotato/core/plugins/base.py b/couchpotato/core/plugins/base.py index 92f6f8c..32e4fab 100644 --- a/couchpotato/core/plugins/base.py +++ b/couchpotato/core/plugins/base.py @@ -1,3 +1,4 @@ +from StringIO import StringIO from couchpotato import addView from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.encoding import tryUrlencode, simplifyString, ss @@ -9,6 +10,7 @@ from multipartpost import MultipartPostHandler from urlparse import urlparse import cookielib import glob +import gzip import math import os.path import re @@ -101,10 +103,10 @@ class Plugin(object): if not params: params = {} # Fill in some headers - if not headers.get('Referer'): - headers['Referer'] = urlparse(url).hostname - if not headers.get('User-Agent'): - headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2' + headers['Referer'] = headers.get('Referer', urlparse(url).hostname) + headers['Host'] = headers.get('Host', urlparse(url).hostname) + headers['User-Agent'] = headers.get('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2') + headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip') host = urlparse(url).hostname @@ -127,16 +129,24 @@ class Plugin(object): cookies = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler) - data = opener.open(request, timeout = timeout).read() + response = opener.open(request, timeout = timeout) else: log.info('Opening url: %s, params: %s', (url, [x for x in params.iterkeys()])) data = tryUrlencode(params) if len(params) > 0 else None request = urllib2.Request(url, data, headers) if opener: - data = opener.open(request, timeout = timeout).read() + response = opener.open(request, timeout = timeout) else: - data = urllib2.urlopen(request, timeout = timeout).read() + response = urllib2.urlopen(request, timeout = timeout) + + # unzip if needed + if response.info().get('Content-Encoding') == 'gzip': + buf = StringIO(response.read()) + f = gzip.GzipFile(fileobj = buf) + data = f.read() + else: + data = response.read() self.http_failed_request[host] = 0 except IOError: From ee21610bc3aba941d8c2c83911a75417176f379c Mon Sep 17 00:00:00 2001 From: Ruud Date: Mon, 16 Jul 2012 01:11:52 +0200 Subject: [PATCH 12/24] Cleanup automation providers --- couchpotato/core/providers/automation/base.py | 19 ++++++--------- .../core/providers/automation/bluray/main.py | 28 +++++----------------- .../core/providers/automation/kinepolis/main.py | 11 ++++----- .../providers/automation/movies_io/__init__.py | 2 +- 4 files changed, 18 insertions(+), 42 deletions(-) diff --git a/couchpotato/core/providers/automation/base.py b/couchpotato/core/providers/automation/base.py index df04b13..d3af263 100644 --- a/couchpotato/core/providers/automation/base.py +++ b/couchpotato/core/providers/automation/base.py @@ -27,36 +27,31 @@ class Automation(Plugin): return self.getIMDBids() - def search(self, name, year = None): + def search(self, name, year = None, imdb_only = False): result = fireEvent('movie.search', q = '%s %s' % (name, year if year else ''), limit = 1, merge = True) if len(result) > 0: - return result[0].get('imdb') + return result[0].get('imdb') if imdb_only else result[0] else: return None def isMinimalMovie(self, movie): + if not movie.get('rating'): + return False + if movie['rating'] and movie['rating'].get('imdb'): movie['votes'] = movie['rating']['imdb'][1] movie['rating'] = movie['rating']['imdb'][0] - identifier = movie['imdb'] + for minimal_type in ['year', 'rating', 'votes']: type_value = movie.get(minimal_type, 0) type_min = self.getMinimal(minimal_type) if type_value < type_min: - log.info('%s too low for %s, need %s has %s', (minimal_type, identifier, type_min, type_value)) + log.info('%s too low for %s, need %s has %s', (minimal_type, movie['imdb'], type_min, type_value)) return False return True - def getIMDBFromTitle(self, name, year = None): - result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True) - - if len(result) > 0: - return result[0] - else: - return None - def getMinimal(self, min_type): return Env.setting(min_type, 'automation') diff --git a/couchpotato/core/providers/automation/bluray/main.py b/couchpotato/core/providers/automation/bluray/main.py index c6d82eb..20f733b 100644 --- a/couchpotato/core/providers/automation/bluray/main.py +++ b/couchpotato/core/providers/automation/bluray/main.py @@ -1,5 +1,5 @@ from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import md5 +from couchpotato.core.helpers.variable import md5, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.providers.automation.base import Automation from couchpotato.environment import Env @@ -19,8 +19,6 @@ class Bluray(Automation, RSS): return movies = [] - RSSMovie = {'name': 'placeholder', 'year' : 'placeholder'} - RSSMovies = [] cache_key = 'bluray.%s' % md5(self.rss_url) rss_data = self.getCache(cache_key, self.rss_url) @@ -30,30 +28,16 @@ class Bluray(Automation, RSS): rss_movies = self.getElements(data, 'channel/item') for movie in rss_movies: - RSSMovie['name'] = self.getTextElement(movie, "title").lower().split("blu-ray")[0].strip("(").rstrip() - RSSMovie['year'] = self.getTextElement(movie, "description").split("|")[1].strip("(").strip() + name = self.getTextElement(movie, "title").lower().split("blu-ray")[0].strip("(").rstrip() + year = self.getTextElement(movie, "description").split("|")[1].strip("(").strip() - if not RSSMovie['name'].find("/") == -1: # make sure it is not a double movie release + if not name.find("/") == -1: # make sure it is not a double movie release continue - if int(RSSMovie['year']) < Env.setting('year', 'automation'): #do year filtering + if tryInt(year) < self.getMinimal('year'): continue - for test in RSSMovies: - if test.values() == RSSMovie.values(): # make sure we did not already include it... - break - else: - log.info('Release found: %s.' % RSSMovie) - RSSMovies.append(RSSMovie.copy()) - - if not RSSMovies: - log.info('No movies found.') - return - - log.debug("Applying IMDB filter to found movies...") - - for RSSMovie in RSSMovies: - imdb = self.getIMDBFromTitle(RSSMovie['name'] + ' ' + RSSMovie['year']) + imdb = self.search(name, year) if imdb: if self.isMinimalMovie(imdb): diff --git a/couchpotato/core/providers/automation/kinepolis/main.py b/couchpotato/core/providers/automation/kinepolis/main.py index 031bb44..f6633af 100644 --- a/couchpotato/core/providers/automation/kinepolis/main.py +++ b/couchpotato/core/providers/automation/kinepolis/main.py @@ -19,7 +19,6 @@ class Kinepolis(Automation, RSS): return movies = [] - RSSMovie = {'name': 'placeholder', 'year' : 'placeholder'} cache_key = 'kinepolis.%s' % md5(self.rss_url) rss_data = self.getCache(cache_key, self.rss_url) @@ -29,14 +28,12 @@ class Kinepolis(Automation, RSS): rss_movies = self.getElements(data, 'channel/item') for movie in rss_movies: - RSSMovie['name'] = self.getTextElement(movie, "title") - currentYear = datetime.datetime.now().strftime("%Y") - RSSMovie['year'] = currentYear + name = self.getTextElement(movie, "title") + year = datetime.datetime.now().strftime("%Y") - log.debug('Release found: %s.', RSSMovie) - imdb = self.getIMDBFromTitle(RSSMovie['name'], RSSMovie['year']) + imdb = self.search(name, year) - if imdb: + if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies diff --git a/couchpotato/core/providers/automation/movies_io/__init__.py b/couchpotato/core/providers/automation/movies_io/__init__.py index 0f998f6..5d997e9 100644 --- a/couchpotato/core/providers/automation/movies_io/__init__.py +++ b/couchpotato/core/providers/automation/movies_io/__init__.py @@ -9,7 +9,7 @@ config = [{ { 'tab': 'automation', 'name': 'moviesio', - 'label': 'Movies.io', + 'label': 'Movies.IO', 'description': 'Imports movies from Movies.io RSS watchlists', 'options': [ { From 4284bc20c35d38177023f530b65aad6d31ea7b81 Mon Sep 17 00:00:00 2001 From: Ruud Date: Mon, 16 Jul 2012 01:21:40 +0200 Subject: [PATCH 13/24] Restart when updating manually --- couchpotato/core/_base/updater/main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/couchpotato/core/_base/updater/main.py b/couchpotato/core/_base/updater/main.py index e52870a..2b53ba4 100644 --- a/couchpotato/core/_base/updater/main.py +++ b/couchpotato/core/_base/updater/main.py @@ -102,6 +102,8 @@ class Updater(Plugin): success = False else: success = self.updater.doUpdate() + if success: + fireEventAsync('app.restart') return jsonified({ 'success': success From 125948cb947b37e0d1473bc6026c9d2c762a89f4 Mon Sep 17 00:00:00 2001 From: Ruud Date: Mon, 16 Jul 2012 01:32:58 +0200 Subject: [PATCH 14/24] Exclude rottentomatoes movie subpage in userscript --- couchpotato/core/providers/userscript/rottentomatoes/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/couchpotato/core/providers/userscript/rottentomatoes/main.py b/couchpotato/core/providers/userscript/rottentomatoes/main.py index cd869b8..c611779 100644 --- a/couchpotato/core/providers/userscript/rottentomatoes/main.py +++ b/couchpotato/core/providers/userscript/rottentomatoes/main.py @@ -4,7 +4,10 @@ from couchpotato.core.providers.userscript.base import UserscriptBase class RottenTomatoes(UserscriptBase): - includes = ['*://www.rottentomatoes.com/m/*'] + includes = ['*://www.rottentomatoes.com/m/*/'] + excludes = ['*://www.rottentomatoes.com/m/*/*/'] + + version = 2 def getMovie(self, url): From 5e481623fc698e10e3199231cffbef7ae60114b2 Mon Sep 17 00:00:00 2001 From: Ruud Date: Thu, 2 Aug 2012 21:54:47 +0200 Subject: [PATCH 15/24] Remove right path sep. fix #646 --- couchpotato/core/downloaders/transmission/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/couchpotato/core/downloaders/transmission/main.py b/couchpotato/core/downloaders/transmission/main.py index 1819f31..5bdd65b 100644 --- a/couchpotato/core/downloaders/transmission/main.py +++ b/couchpotato/core/downloaders/transmission/main.py @@ -4,6 +4,7 @@ from couchpotato.core.helpers.encoding import isInt from couchpotato.core.logger import CPLog import httplib import json +import os.path import re import urllib2 @@ -31,7 +32,7 @@ class Transmission(Downloader): # Set parameters for Transmission params = { 'paused': self.conf('paused', default = 0), - 'download-dir': self.conf('directory', default = None) + 'download-dir': self.conf('directory', default = '').rstrip(os.path.sep) } torrent_params = { From 196b27092eb2e1014feba7f4e43f669bd90117d3 Mon Sep 17 00:00:00 2001 From: Ruud Date: Thu, 2 Aug 2012 22:14:35 +0200 Subject: [PATCH 16/24] Use proper age param for nzbmatrix. fix #620 --- couchpotato/core/providers/nzb/nzbmatrix/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/couchpotato/core/providers/nzb/nzbmatrix/main.py b/couchpotato/core/providers/nzb/nzbmatrix/main.py index 203ada1..c6c5be9 100644 --- a/couchpotato/core/providers/nzb/nzbmatrix/main.py +++ b/couchpotato/core/providers/nzb/nzbmatrix/main.py @@ -43,7 +43,7 @@ class NZBMatrix(NZBProvider, RSS): 'username': self.conf('username'), 'apikey': self.conf('api_key'), 'searchin': 'weblink', - 'age': Env.setting('retention', section = 'nzb'), + 'maxage': Env.setting('retention', section = 'nzb'), 'english': self.conf('english_only'), }) url = "%s?%s" % (self.urls['search'], arguments) From 08e96b8691166f25bc383dcd0f8f09a81b2c42f6 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 4 Aug 2012 22:58:50 +0200 Subject: [PATCH 17/24] Make it 4 weeks before release --- couchpotato/core/plugins/searcher/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/couchpotato/core/plugins/searcher/main.py b/couchpotato/core/plugins/searcher/main.py index 03eb6cf..dfefdab 100644 --- a/couchpotato/core/plugins/searcher/main.py +++ b/couchpotato/core/plugins/searcher/main.py @@ -429,8 +429,8 @@ class Searcher(Plugin): if dates.get('dvd') > 0: - # 3 weeks before dvd release - if dates.get('dvd') - 1814400 < now: + # 4 weeks before dvd release + if dates.get('dvd') - 2419200 < now: return True # Dvd should be released From 1154d93ccd86e1148d7e8ec8d44022497f232c89 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 4 Aug 2012 23:14:53 +0200 Subject: [PATCH 18/24] Don't re-use path_identifiers. fix #665 --- couchpotato/core/plugins/scanner/main.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/couchpotato/core/plugins/scanner/main.py b/couchpotato/core/plugins/scanner/main.py index 43547b8..7e47ed9 100644 --- a/couchpotato/core/plugins/scanner/main.py +++ b/couchpotato/core/plugins/scanner/main.py @@ -74,8 +74,6 @@ class Scanner(Plugin): cp_imdb = '(\.cp\((?Ptt[0-9{7}]+)\))' - path_identifiers = {} # bind identifier to filepath - def __init__(self): addEvent('scanner.create_file_identifier', self.createStringIdentifier) @@ -222,18 +220,19 @@ class Scanner(Plugin): # Create identifiers for all leftover files + path_identifiers = {} for file_path in leftovers: identifier = self.createStringIdentifier(file_path, folder) - if not self.path_identifiers.get(identifier): - self.path_identifiers[identifier] = [] + if not path_identifiers.get(identifier): + path_identifiers[identifier] = [] - self.path_identifiers[identifier].append(file_path) + path_identifiers[identifier].append(file_path) # Group the files based on the identifier delete_identifiers = [] - for identifier, found_files in self.path_identifiers.iteritems(): + for identifier, found_files in path_identifiers.iteritems(): log.debug('Grouping files on identifier: %s', identifier) group = movie_files.get(identifier) @@ -250,13 +249,13 @@ class Scanner(Plugin): # Cleaning up used for identifier in delete_identifiers: - if self.path_identifiers.get(identifier): - del self.path_identifiers[identifier] + if path_identifiers.get(identifier): + del path_identifiers[identifier] del delete_identifiers # Group based on folder delete_identifiers = [] - for identifier, found_files in self.path_identifiers.iteritems(): + for identifier, found_files in path_identifiers.iteritems(): log.debug('Grouping files on foldername: %s', identifier) for ff in found_files: @@ -276,8 +275,8 @@ class Scanner(Plugin): # Cleaning up used for identifier in delete_identifiers: - if self.path_identifiers.get(identifier): - del self.path_identifiers[identifier] + if path_identifiers.get(identifier): + del path_identifiers[identifier] del delete_identifiers # Determine file types @@ -388,14 +387,11 @@ class Scanner(Plugin): processed_movies[identifier] = group - - # Clean up - self.path_identifiers = {} - if len(processed_movies) > 0: log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) else: log.debug('Found no movies in the folder %s', (folder)) + return processed_movies def getMetaData(self, group): From 60e0eed26b75c90184e29f52fb2c89b0f42ba712 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sat, 4 Aug 2012 23:29:09 +0200 Subject: [PATCH 19/24] Use imdb identfier on sceneaccess. fix #648 --- couchpotato/core/providers/torrent/sceneaccess/main.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/couchpotato/core/providers/torrent/sceneaccess/main.py b/couchpotato/core/providers/torrent/sceneaccess/main.py index 2054294..62cd7dc 100644 --- a/couchpotato/core/providers/torrent/sceneaccess/main.py +++ b/couchpotato/core/providers/torrent/sceneaccess/main.py @@ -1,8 +1,7 @@ from bs4 import BeautifulSoup from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode, \ - toUnicode -from couchpotato.core.helpers.variable import getTitle, tryInt +from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode +from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.providers.torrent.base import TorrentProvider import traceback @@ -39,9 +38,10 @@ class SceneAccess(TorrentProvider): self.getCatId(quality['identifier'])[0] ) - q = '"%s %s" %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier')) + q = '%s %s' % (movie['library']['identifier'], quality.get('identifier')) arguments = tryUrlencode({ 'search': q, + 'method': 1, }) url = "%s&%s" % (url, arguments) @@ -57,7 +57,10 @@ class SceneAccess(TorrentProvider): try: resultsTable = html.find('table', attrs = {'id' : 'torrents-table'}) - entries = resultsTable.findAll('tr', attrs = {'class' : 'tt_row'}) + if resultsTable is None: + return results + + entries = resultsTable.find_all('tr', attrs = {'class' : 'tt_row'}) for result in entries: link = result.find('td', attrs = {'class' : 'ttr_name'}).find('a') From de532cb7ee267e8bbcd3f32edcc7f2a5c34466ad Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 5 Aug 2012 14:11:48 +0200 Subject: [PATCH 20/24] Add description to growl notification. closes #604 --- couchpotato/core/notifications/growl/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/couchpotato/core/notifications/growl/__init__.py b/couchpotato/core/notifications/growl/__init__.py index 3e882b4..82a6636 100644 --- a/couchpotato/core/notifications/growl/__init__.py +++ b/couchpotato/core/notifications/growl/__init__.py @@ -9,6 +9,7 @@ config = [{ { 'tab': 'notifications', 'name': 'growl', + 'description': 'Version 1.4+', 'options': [ { 'name': 'enabled', From ac91156288ccc149cab51190a912979b20afee6c Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 5 Aug 2012 14:46:19 +0200 Subject: [PATCH 21/24] Firefox bookmarklet isn't run as function. fix #681 --- couchpotato/core/plugins/userscript/template.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/couchpotato/core/plugins/userscript/template.js b/couchpotato/core/plugins/userscript/template.js index e200afb..8b07272 100644 --- a/couchpotato/core/plugins/userscript/template.js +++ b/couchpotato/core/plugins/userscript/template.js @@ -12,8 +12,7 @@ // ==/UserScript== -if (window.top != window.self) // Only run on top window - return; +if (window.top == window.self){ // Only run on top window var version = {{version}}, host = '{{host}}', @@ -135,4 +134,6 @@ var setVersion = function(){ if(document.location.href.indexOf(host) == -1) osd(); else - setVersion(); \ No newline at end of file + setVersion(); + +} \ No newline at end of file From a3895dd17690961ee5750ee151e06a35ed2dba35 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 5 Aug 2012 15:27:27 +0200 Subject: [PATCH 22/24] Use float for size calculation. fix #643 --- couchpotato/core/helpers/variable.py | 2 +- couchpotato/core/providers/base.py | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/couchpotato/core/helpers/variable.py b/couchpotato/core/helpers/variable.py index 1f1d3ba..5312177 100644 --- a/couchpotato/core/helpers/variable.py +++ b/couchpotato/core/helpers/variable.py @@ -105,7 +105,7 @@ def tryInt(s): def tryFloat(s): try: return float(s) if '.' in s else tryInt(s) - except: return s + except: return 0 def natsortKey(s): return map(tryInt, re.findall(r'(\d+|\D+)', s)) diff --git a/couchpotato/core/providers/base.py b/couchpotato/core/providers/base.py index d25377b..3e9ef26 100644 --- a/couchpotato/core/providers/base.py +++ b/couchpotato/core/providers/base.py @@ -1,14 +1,13 @@ from couchpotato.core.event import addEvent +from couchpotato.core.helpers.variable import tryFloat from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from urlparse import urlparse -from urllib import quote_plus -from couchpotato.core.helpers.encoding import simplifyString - import re import time + log = CPLog(__name__) @@ -86,19 +85,19 @@ class YarrProvider(Provider): def parseSize(self, size): sizeRaw = size.lower() - size = float(re.sub(r'[^0-9.]', '', size).strip()) + size = tryFloat(re.sub(r'[^0-9.]', '', size).strip()) for s in self.sizeGb: if s in sizeRaw: - return int(size) * 1024 + return size * 1024 for s in self.sizeMb: if s in sizeRaw: - return int(size) + return size for s in self.sizeKb: if s in sizeRaw: - return int(size) / 1024 + return size / 1024 return 0 From 40daba277be63051540530c21f638c2e2ce027b4 Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 5 Aug 2012 15:39:04 +0200 Subject: [PATCH 23/24] Handle permission error in shutil.move for *nix systems during the rename plugin. --- couchpotato/core/plugins/renamer/main.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer/main.py index 4c6e6b8..2cd09e4 100644 --- a/couchpotato/core/plugins/renamer/main.py +++ b/couchpotato/core/plugins/renamer/main.py @@ -12,6 +12,7 @@ import os import re import shutil import traceback +import errno log = CPLog(__name__) @@ -428,6 +429,14 @@ class Renamer(Plugin): except: log.error('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1))) + except OSError, err: + # Copying from a filesystem with octal permission to an NTFS file system causes a permission error. In this case ignore it. + if not hasattr(os, 'chmod') or err.errno != errno.EPERM: + raise + else: + if os.path.exists(dest): + os.unlink(old) + except: log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc())) raise Exception From cfe1e47df244b04bf265863fcc0ab18954a24e6c Mon Sep 17 00:00:00 2001 From: Ruud Date: Sun, 5 Aug 2012 16:14:24 +0200 Subject: [PATCH 24/24] Add branch to version file --- couchpotato/core/_base/updater/main.py | 3 ++- version.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/couchpotato/core/_base/updater/main.py b/couchpotato/core/_base/updater/main.py index 2b53ba4..980e247 100644 --- a/couchpotato/core/_base/updater/main.py +++ b/couchpotato/core/_base/updater/main.py @@ -14,6 +14,7 @@ import shutil import tarfile import time import traceback +import version log = CPLog(__name__) @@ -114,7 +115,7 @@ class BaseUpdater(Plugin): repo_user = 'RuudBurger' repo_name = 'CouchPotatoServer' - branch = 'develop' + branch = version.BRANCH version = None update_failed = False diff --git a/version.py b/version.py index 3808ef1..81b487e 100644 --- a/version.py +++ b/version.py @@ -1 +1,2 @@ VERSION = '2.0.0.pre1' +BRANCH = 'develop'