Browse Source

Merge branch 'refs/heads/develop' into tv

Conflicts:
	couchpotato/core/media/_base/searcher/main.py
	couchpotato/core/plugins/renamer/main.py
pull/2352/head
Ruud 12 years ago
parent
commit
1b59fd9af0
  1. 4
      couchpotato/core/downloaders/__init__.py
  2. 7
      couchpotato/core/downloaders/blackhole/__init__.py
  3. 22
      couchpotato/core/downloaders/blackhole/main.py
  4. 1
      couchpotato/core/downloaders/nzbget/__init__.py
  5. 13
      couchpotato/core/downloaders/rtorrent/__init__.py
  6. 12
      couchpotato/core/downloaders/rtorrent/main.py
  7. 11
      couchpotato/core/downloaders/sabnzbd/main.py
  8. 4
      couchpotato/core/downloaders/transmission/main.py
  9. 6
      couchpotato/core/downloaders/utorrent/main.py
  10. 59
      couchpotato/core/loader.py
  11. 36
      couchpotato/core/media/movie/_base/static/movie.actions.js
  12. 15
      couchpotato/core/media/movie/_base/static/movie.css
  13. 2
      couchpotato/core/media/movie/_base/static/movie.js
  14. 4
      couchpotato/core/notifications/__init__.py
  15. 3
      couchpotato/core/notifications/email/main.py
  16. 11
      couchpotato/core/notifications/plex/__init__.py
  17. 203
      couchpotato/core/notifications/plex/main.py
  18. 2
      couchpotato/core/plugins/base.py
  19. 6
      couchpotato/core/plugins/custom/__init__.py
  20. 21
      couchpotato/core/plugins/custom/main.py
  21. 7
      couchpotato/core/plugins/manage/main.py
  22. 14
      couchpotato/core/plugins/quality/main.py
  23. 32
      couchpotato/core/plugins/release/main.py
  24. 70
      couchpotato/core/plugins/renamer/main.py
  25. 17
      couchpotato/core/plugins/scanner/main.py
  26. 1
      couchpotato/core/plugins/status/main.py
  27. 4
      couchpotato/core/providers/automation/__init__.py
  28. 7
      couchpotato/core/providers/automation/bluray/__init__.py
  29. 39
      couchpotato/core/providers/automation/bluray/main.py
  30. 34
      couchpotato/core/providers/automation/flixster/__init__.py
  31. 48
      couchpotato/core/providers/automation/flixster/main.py
  32. 4
      couchpotato/core/providers/nzb/__init__.py
  33. 4
      couchpotato/core/providers/torrent/__init__.py
  34. 60
      couchpotato/core/providers/torrent/ilovetorrents/__init__.py
  35. 128
      couchpotato/core/providers/torrent/ilovetorrents/main.py
  36. 2
      couchpotato/core/providers/torrent/thepiratebay/__init__.py
  37. 31
      couchpotato/core/providers/torrent/torrentshack/main.py
  38. 2
      couchpotato/static/style/settings.css
  39. 38
      libs/importlib/__init__.py
  40. 6
      libs/rtorrent/__init__.py
  41. 7
      libs/rtorrent/lib/torrentparser.py
  42. 49
      libs/synchronousdeluge/client.py

4
couchpotato/core/downloaders/__init__.py

@ -1,4 +1,4 @@
config = {
config = [{
'name': 'download_providers',
'groups': [
{
@ -10,4 +10,4 @@ config = {
'options': [],
},
],
}
}]

7
couchpotato/core/downloaders/blackhole/__init__.py

@ -36,6 +36,13 @@ config = [{
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'create_subdir',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Create a sub directory when saving the .nzb (or .torrent).',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',

22
couchpotato/core/downloaders/blackhole/main.py

@ -33,17 +33,27 @@ class Blackhole(Downloader):
log.error('No nzb/torrent available: %s', data.get('url'))
return False
fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
file_name = self.createFileName(data, filedata, movie)
full_path = os.path.join(directory, file_name)
if self.conf('create_subdir'):
try:
new_path = os.path.splitext(full_path)[0]
if not os.path.exists(new_path):
os.makedirs(new_path)
full_path = os.path.join(new_path, file_name)
except:
log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
try:
if not os.path.isfile(fullPath):
log.info('Downloading %s to %s.', (data.get('protocol'), fullPath))
with open(fullPath, 'wb') as f:
if not os.path.isfile(full_path):
log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
with open(full_path, 'wb') as f:
f.write(filedata)
os.chmod(fullPath, Env.getPermission('file'))
os.chmod(full_path, Env.getPermission('file'))
return True
else:
log.info('File %s already exists.', fullPath)
log.info('File %s already exists.', full_path)
return True
except:

1
couchpotato/core/downloaders/nzbget/__init__.py

@ -12,6 +12,7 @@ config = [{
'name': 'nzbget',
'label': 'NZBGet',
'description': 'Use <a href="http://nzbget.sourceforge.net/Main_Page" target="_blank">NZBGet</a> to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',

13
couchpotato/core/downloaders/rtorrent/__init__.py

@ -36,6 +36,11 @@ config = [{
'description': 'Label to apply on added torrents.',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Directory where rtorrent should download the files too.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'default': False,
@ -44,6 +49,14 @@ config = [{
'description': 'Remove the torrent after it finishes seeding.',
},
{
'name': 'append_label',
'label': 'Append Label',
'default': False,
'advanced': True,
'type': 'bool',
'description': 'Append label to download location. Requires you to set the download location above.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,

12
couchpotato/core/downloaders/rtorrent/main.py

@ -7,7 +7,7 @@ from datetime import timedelta
from hashlib import sha1
from rtorrent import RTorrent
from rtorrent.err import MethodError
import shutil
import shutil, os
log = CPLog(__name__)
@ -91,6 +91,7 @@ class rTorrent(Downloader):
if self.conf('label'):
torrent_params['label'] = self.conf('label')
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
@ -116,10 +117,19 @@ class rTorrent(Downloader):
# Send torrent to rTorrent
torrent = self.rt.load_torrent(filedata)
if not torrent:
log.error('Unable to find the torrent, did it fail to load?')
return False
# Set label
if self.conf('label'):
torrent.set_custom(1, self.conf('label'))
if self.conf('directory') and self.conf('append_label'):
torrent.set_directory(os.path.join(self.conf('directory'), self.conf('label')))
elif self.conf('directory'):
torrent.set_directory(self.conf('directory'))
# Set Ratio Group
torrent.set_visible(group_name)

11
couchpotato/core/downloaders/sabnzbd/main.py

@ -90,9 +90,14 @@ class Sabnzbd(Downloader):
# Get busy releases
for item in queue.get('slots', []):
status = 'busy'
if 'ENCRYPTED / ' in item['filename']:
status = 'failed'
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'status': status,
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
@ -123,6 +128,12 @@ class Sabnzbd(Downloader):
try:
self.call({
'mode': 'queue',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',

4
couchpotato/core/downloaders/transmission/main.py

@ -136,11 +136,11 @@ class Transmission(Downloader):
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
return self.trpc.remove_torrent(item['hashString'], True)
return self.trpc.remove_torrent(item['id'], True)
def processComplete(self, item, delete_files = False):
log.debug('Requesting Transmission to remove the torrent %s%s.', (item['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.trpc.remove_torrent(item['hashString'], delete_files)
return self.trpc.remove_torrent(item['id'], delete_files)
class TransmissionRPC(object):

6
couchpotato/core/downloaders/utorrent/main.py

@ -107,9 +107,9 @@ class uTorrent(Downloader):
count += 1
# Check if torrent is saved in subfolder of torrent name
data = self.utorrent_api.get_files(torrent_hash)
getfiles_data = self.utorrent_api.get_files(torrent_hash)
torrent_files = json.loads(data)
torrent_files = json.loads(getfiles_data)
if torrent_files.get('error'):
log.error('Error getting data from uTorrent: %s', torrent_files.get('error'))
return False
@ -200,7 +200,7 @@ class uTorrent(Downloader):
if not self.connect():
return False
return self.utorrent_api.remove_torrent(item['id'], remove_data = delete_files)
def removeReadOnly(self, folder):
#Removes all read-only flags in a folder
if folder and os.path.isdir(folder):

59
couchpotato/core/loader.py

@ -1,7 +1,8 @@
from couchpotato.core.event import fireEvent
from couchpotato.core.logger import CPLog
import glob
from importlib import import_module
import os
import sys
import traceback
log = CPLog(__name__)
@ -12,17 +13,6 @@ class Loader(object):
providers = {}
modules = {}
def addPath(self, root, base_path, priority, recursive = False):
for filename in os.listdir(os.path.join(root, *base_path)):
path = os.path.join(os.path.join(root, *base_path), filename)
if os.path.isdir(path) and filename[:2] != '__':
if u'__init__.py' in os.listdir(path):
new_base_path = ''.join(s + '.' for s in base_path) + filename
self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
if recursive:
self.addPath(root, base_path + [filename], priority, recursive = True)
def preload(self, root = ''):
core = os.path.join(root, 'couchpotato', 'core')
@ -39,6 +29,14 @@ class Loader(object):
# Add media to loader
self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True)
# Add custom plugin folder
from couchpotato.environment import Env
custom_plugin_dir = os.path.join(Env.get('data_dir'), 'custom_plugins')
if os.path.isdir(custom_plugin_dir):
sys.path.insert(0, custom_plugin_dir)
self.paths['custom_plugins'] = (30, '', custom_plugin_dir)
# Loop over all paths and add to module list
for plugin_type, plugin_tuple in self.paths.iteritems():
priority, module, dir_name = plugin_tuple
self.addFromDir(plugin_type, priority, module, dir_name)
@ -46,8 +44,9 @@ class Loader(object):
def run(self):
did_save = 0
for priority in self.modules:
for priority in sorted(self.modules):
for module_name, plugin in sorted(self.modules[priority].iteritems()):
# Load module
try:
if plugin.get('name')[:2] == '__':
@ -56,7 +55,6 @@ class Loader(object):
m = self.loadModule(module_name)
if m is None:
continue
m = getattr(m, plugin.get('name'))
log.info('Loading %s: %s', (plugin['type'], plugin['name']))
@ -78,20 +76,26 @@ class Loader(object):
if did_save:
fireEvent('settings.save')
def addPath(self, root, base_path, priority, recursive = False):
root_path = os.path.join(root, *base_path)
for filename in os.listdir(root_path):
path = os.path.join(root_path, filename)
if os.path.isdir(path) and filename[:2] != '__':
if u'__init__.py' in os.listdir(path):
new_base_path = ''.join(s + '.' for s in base_path) + filename
self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
if recursive:
self.addPath(root, base_path + [filename], priority, recursive = True)
def addFromDir(self, plugin_type, priority, module, dir_name):
# Load dir module
try:
m = __import__(module)
splitted = module.split('.')
for sub in splitted[1:]:
m = getattr(m, sub)
except:
raise
if module and len(module) > 0:
self.addModule(priority, plugin_type, module, os.path.basename(dir_name))
for cur_file in glob.glob(os.path.join(dir_name, '*')):
name = os.path.basename(cur_file)
if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(cur_file, '__init__.py')):
for name in os.listdir(dir_name):
if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(dir_name, name, '__init__.py')):
module_name = '%s.%s' % (module, name)
self.addModule(priority, plugin_type, module_name, name)
@ -131,6 +135,7 @@ class Loader(object):
if not self.modules.get(priority):
self.modules[priority] = {}
module = module.lstrip('.')
self.modules[priority][module] = {
'priority': priority,
'module': module,
@ -140,11 +145,7 @@ class Loader(object):
def loadModule(self, name):
try:
m = __import__(name)
splitted = name.split('.')
for sub in splitted[1:-1]:
m = getattr(m, sub)
return m
return import_module(name)
except ImportError:
log.debug('Skip loading module plugin %s: %s', (name, traceback.format_exc()))
return None

36
couchpotato/core/media/movie/_base/static/movie.actions.js

@ -241,7 +241,6 @@ MA.Release = new Class({
}
})
).inject(self.release_container);
release['el'] = item;
if(status.identifier == 'ignored' || status.identifier == 'failed' || status.identifier == 'snatched'){
@ -251,6 +250,30 @@ MA.Release = new Class({
else if(!self.next_release && status.identifier == 'available'){
self.next_release = release;
}
var update_handle = function(notification) {
var q = self.movie.quality.getElement('.q_id' + release.quality_id),
status = Status.get(release.status_id),
new_status = Status.get(notification.data);
release.status_id = new_status.id
release.el.set('class', 'item ' + new_status.identifier);
var status_el = release.el.getElement('.release_status');
status_el.set('class', 'release_status ' + new_status.identifier);
status_el.set('text', new_status.identifier);
if(!q && (new_status.identifier == 'snatched' || new_status.identifier == 'seeding' || new_status.identifier == 'done'))
var q = self.addQuality(release.quality_id);
if(new_status && q && !q.hasClass(new_status.identifier)) {
q.removeClass(status.identifier).addClass(new_status.identifier);
q.set('title', q.get('title').replace(status.label, new_status.label));
}
}
App.addEvent('release.update_status.' + release.id, update_handle);
});
if(self.last_release)
@ -397,17 +420,6 @@ MA.Release = new Class({
'data': {
'id': release.id
},
'onComplete': function(){
var el = release.el;
if(el && (el.hasClass('failed') || el.hasClass('ignored'))){
el.removeClass('failed').removeClass('ignored');
el.getElement('.release_status').set('text', 'available');
}
else if(el) {
el.addClass('ignored');
el.getElement('.release_status').set('text', 'ignored');
}
}
})
},

15
couchpotato/core/media/movie/_base/static/movie.css

@ -419,22 +419,25 @@
}
.movies .data .quality .available,
.movies .data .quality .snatched {
.movies .data .quality .snatched,
.movies .data .quality .seeding {
opacity: 1;
cursor: pointer;
}
.movies .data .quality .available { background-color: #578bc3; }
.movies .data .quality .failed { background-color: #a43d34; }
.movies .data .quality .failed,
.movies .data .quality .missing,
.movies .data .quality .ignored { background-color: #a43d34; }
.movies .data .quality .snatched { background-color: #a2a232; }
.movies .data .quality .seeding { background-color: #0a6819; }
.movies .data .quality .done {
background-color: #369545;
opacity: 1;
}
.movies .data .quality .seeding { background-color: #0a6819; }
.movies .data .quality .finish {
background-image: url('../images/sprite.png');
background-repeat: no-repeat;
background-repeat: no-repeat;
background-position: 0 2px;
padding-left: 14px;
background-size: 14px
@ -646,7 +649,7 @@
margin-top: 25px;
}
}
.trailer_container.hide {
height: 0 !important;
}
@ -1029,7 +1032,7 @@
.movies .progress > div .folder {
display: inline-block;
padding: 5px 20px 5px 0;
white-space: nowrap;
white-space: nowrap;
text-overflow: ellipsis;
overflow: hidden;
width: 85%;

2
couchpotato/core/media/movie/_base/static/movie.js

@ -185,7 +185,7 @@ var Movie = new Class({
var q = self.quality.getElement('.q_id'+ release.quality_id),
status = Status.get(release.status_id);
if(!q && (status.identifier == 'snatched' || status.identifier == 'done'))
if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done'))
var q = self.addQuality(release.quality_id)
if (status && q && !q.hasClass(status.identifier)){

4
couchpotato/core/notifications/__init__.py

@ -1,4 +1,4 @@
config = {
config = [{
'name': 'notification_providers',
'groups': [
{
@ -10,4 +10,4 @@ config = {
'options': [],
},
],
}
}]

3
couchpotato/core/notifications/email/main.py

@ -2,6 +2,7 @@ from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env
from email.mime.text import MIMEText
import smtplib
import traceback
@ -23,7 +24,7 @@ class Email(Notification):
smtp_pass = self.conf('smtp_pass')
# Make the basic message
message = MIMEText(toUnicode(message))
message = MIMEText(toUnicode(message), _charset = Env.get('encoding'))
message['Subject'] = self.default_title
message['From'] = from_address
message['To'] = to_address

11
couchpotato/core/notifications/plex/__init__.py

@ -17,10 +17,15 @@ config = [{
'type': 'enabler',
},
{
'name': 'host',
'name': 'media_server',
'label': 'Media Server',
'default': 'localhost',
'description': 'Default should be on localhost',
'advanced': True,
'description': 'Hostname/IP, default localhost'
},
{
'name': 'clients',
'default': '',
'description': 'Comma separated list of client names\'s (computer names). Top right when you start Plex'
},
{
'name': 'on_snatch',

203
couchpotato/core/notifications/plex/main.py

@ -1,79 +1,184 @@
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import cleanHost, splitString
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from urllib2 import URLError
from datetime import datetime
from urlparse import urlparse
from xml.dom import minidom
import json
import requests
import traceback
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
log = CPLog(__name__)
class Plex(Notification):
client_update_time = 5 * 60
http_time_between_calls = 0
def __init__(self):
super(Plex, self).__init__()
self.clients = {}
self.clients_updated = None
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = None):
def updateClients(self, force = False):
if not self.conf('media_server'):
log.warning("Plex media server hostname is required")
return
since_update = ((datetime.now() - self.clients_updated).total_seconds())\
if self.clients_updated is not None else None
if force or self.clients_updated is None or since_update > self.client_update_time:
self.clients = {}
data = self.urlopen('%s/clients' % self.createHost(self.conf('media_server'), port = 32400))
client_result = etree.fromstring(data)
clients = [x.strip().lower() for x in self.conf('clients').split(',')]
for server in client_result.findall('Server'):
if server.get('name').lower() in clients:
clients.remove(server.get('name').lower())
protocol = server.get('protocol', 'xbmchttp')
if protocol in ['plex', 'xbmcjson', 'xbmchttp']:
self.clients[server.get('name')] = {
'name': server.get('name'),
'address': server.get('address'),
'port': server.get('port'),
'protocol': protocol
}
if len(clients) > 0:
log.info2('Unable to find plex clients: %s', ', '.join(clients))
log.info2('Found hosts: %s', ', '.join(self.clients.keys()))
self.clients_updated = datetime.now()
def addToLibrary(self, message = None, group = {}):
if self.isDisabled(): return
if not group: group = {}
log.info('Sending notification to Plex')
hosts = self.getHosts(port = 32400)
for host in hosts:
source_type = ['movie']
base_url = '%s/library/sections' % self.createHost(self.conf('media_server'), port = 32400)
refresh_url = '%s/%%s/refresh' % base_url
source_type = ['movie']
base_url = '%s/library/sections' % host
refresh_url = '%s/%%s/refresh' % base_url
try:
sections_xml = self.urlopen(base_url)
xml_sections = minidom.parseString(sections_xml)
sections = xml_sections.getElementsByTagName('Directory')
try:
sections_xml = self.urlopen(base_url)
xml_sections = minidom.parseString(sections_xml)
sections = xml_sections.getElementsByTagName('Directory')
for s in sections:
if s.getAttribute('type') in source_type:
url = refresh_url % s.getAttribute('key')
self.urlopen(url)
for s in sections:
if s.getAttribute('type') in source_type:
url = refresh_url % s.getAttribute('key')
x = self.urlopen(url)
except:
log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1)))
return False
except:
log.error('Plex library update failed for %s, Media Server not running: %s',
(self.conf('media_server'), traceback.format_exc(1)))
return False
return True
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
def sendHTTP(self, command, client):
url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % (
client['address'],
client['port'],
tryUrlencode(command)
)
headers = {}
hosts = self.getHosts(port = 3000)
try:
self.urlopen(url, headers = headers, timeout = 3, show_error = False)
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
return True
def notifyHTTP(self, message = '', data = {}, listener = None):
total = 0
successful = 0
for host in hosts:
if self.send({'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message}, host):
successful += 1
return successful == len(hosts)
data = {
'command': 'ExecBuiltIn',
'parameter': 'Notification(CouchPotato, %s)' % message
}
def send(self, command, host):
for name, client in self.clients.items():
if client['protocol'] == 'xbmchttp':
total += 1
if self.sendHTTP(data, client):
successful += 1
url = '%s/xbmcCmds/xbmcHttp/?%s' % (host, tryUrlencode(command))
headers = {}
return successful == total
def sendJSON(self, method, params, client):
log.debug('sendJSON("%s", %s, %s)', (method, params, client))
url = 'http://%s:%s/jsonrpc' % (
client['address'],
client['port']
)
headers = {
'Content-Type': 'application/json'
}
request = {
'id':1,
'jsonrpc': '2.0',
'method': method,
'params': params
}
try:
self.urlopen(url, headers = headers, show_error = False)
except URLError:
log.error("Couldn't sent command to Plex, probably just running Media Server")
return False
except:
log.error("Couldn't sent command to Plex: %s", traceback.format_exc())
requests.post(url, headers = headers, timeout = 3, data = json.dumps(request))
except Exception, err:
log.error("Couldn't sent command to Plex: %s", err)
return False
log.info('Plex notification to %s successful.', host)
return True
def notifyJSON(self, message = '', data = {}, listener = None):
total = 0
successful = 0
params = {
'title': 'CouchPotato',
'message': message
}
for name, client in self.clients.items():
if client['protocol'] in ['xbmcjson', 'plex']:
total += 1
if self.sendJSON('GUI.ShowNotification', params, client):
successful += 1
return successful == total
def notify(self, message = '', data = {}, listener = None, force = False):
self.updateClients(force)
http_result = self.notifyHTTP(message, data, listener)
json_result = self.notifyJSON(message, data, listener)
return http_result and json_result
def test(self, **kwargs):
test_type = self.testNotifyName()
@ -83,7 +188,8 @@ class Plex(Notification):
success = self.notify(
message = self.test_message,
data = {},
listener = 'test'
listener = 'test',
force = True
)
success2 = self.addToLibrary()
@ -91,17 +197,12 @@ class Plex(Notification):
'success': success or success2
}
def getHosts(self, port = None):
raw_hosts = splitString(self.conf('host'))
hosts = []
def createHost(self, host, port = None):
for h in raw_hosts:
h = cleanHost(h)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
hosts.append(h)
h = cleanHost(host)
p = urlparse(h)
h = h.rstrip('/')
if port and not p.port:
h += ':%s' % port
return hosts
return h

2
couchpotato/core/plugins/base.py

@ -121,7 +121,7 @@ class Plugin(object):
# http request
def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True):
url = ss(url)
url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not params: params = {}

6
couchpotato/core/plugins/custom/__init__.py

@ -0,0 +1,6 @@
from .main import Custom
def start():
return Custom()
config = []

21
couchpotato/core/plugins/custom/main.py

@ -0,0 +1,21 @@
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
import os
log = CPLog(__name__)
class Custom(Plugin):
def __init__(self):
addEvent('app.load', self.createStructure)
def createStructure(self):
custom_dir = os.path.join(Env.get('data_dir'), 'custom_plugins')
if not os.path.isdir(custom_dir):
self.makeDir(custom_dir)
self.createFile(os.path.join(custom_dir, '__init__.py'), '# Don\'t remove this file')

7
couchpotato/core/plugins/manage/main.py

@ -222,9 +222,10 @@ class Manage(Plugin):
groups = fireEvent('scanner.scan', folder = folder, files = files, single = True)
for group in groups.itervalues():
if group['library'] and group['library'].get('identifier'):
fireEvent('release.add', group = group)
if groups:
for group in groups.itervalues():
if group['library'] and group['library'].get('identifier'):
fireEvent('release.add', group = group)
def getDiskSpace(self):

14
couchpotato/core/plugins/quality/main.py

@ -1,7 +1,7 @@
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.encoding import toUnicode, ss
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
@ -200,15 +200,19 @@ class QualityPlugin(Plugin):
return None
def containsTag(self, quality, words, cur_file = ''):
cur_file = ss(cur_file)
# Check alt and tags
for tag_type in ['alternative', 'tags']:
for alt in quality.get(tag_type, []):
if isinstance(alt, tuple) and '.'.join(alt) in '.'.join(words):
for tag_type in ['alternative', 'tags', 'label']:
qualities = quality.get(tag_type, [])
qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities
for alt in qualities:
if (isinstance(alt, tuple) and '.'.join(alt) in '.'.join(words)) or (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True
if list(set(quality.get(tag_type, [])) & set(words)):
if list(set(qualities) & set(words)):
log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file))
return True

32
couchpotato/core/plugins/release/main.py

@ -10,6 +10,7 @@ from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import and_, or_
import os
import traceback
import time
log = CPLog(__name__)
@ -47,6 +48,7 @@ class Release(Plugin):
addEvent('release.for_movie', self.forMovie)
addEvent('release.delete', self.delete)
addEvent('release.clean', self.clean)
addEvent('release.update_status', self.updateStatus)
def add(self, group):
@ -159,8 +161,7 @@ class Release(Plugin):
rel = db.query(Relea).filter_by(id = id).first()
if rel:
ignored_status, failed_status, available_status = fireEvent('status.get', ['ignored', 'failed', 'available'], single = True)
rel.status_id = available_status.get('id') if rel.status_id in [ignored_status.get('id'), failed_status.get('id')] else ignored_status.get('id')
db.commit()
self.updateStatus(id, available_status if rel.status_id in [ignored_status.get('id'), failed_status.get('id')] else ignored_status)
return {
'success': True
@ -199,14 +200,9 @@ class Release(Plugin):
if success:
db.expunge_all()
rel = db.query(Relea).filter_by(id = id).first() # Get release again
if rel.status_id != done_status.get('id'):
rel.status_id = snatched_status.get('id')
db.commit()
rel = db.query(Relea).filter_by(id = id).first() # Get release again @RuudBurger why do we need to get it again??
fireEvent('notify.frontend', type = 'release.download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': success
}
@ -241,3 +237,23 @@ class Release(Plugin):
'success': True
}
def updateStatus(self, id, status = None):
if not status: return
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel and status and rel.status_id != status.get('id'):
item = {}
for info in rel.info:
item[info.identifier] = info.value
#update status in Db
log.debug('Marking release %s as %s', (item['name'], status.get("label")))
rel.status_id = status.get('id')
rel.last_edit = int(time.time())
db.commit()
#Update all movie info as there is no release update function
fireEvent('notify.frontend', type = 'release.update_status.%s' % rel.id, data = status.get('id'))

70
couchpotato/core/plugins/renamer/main.py

@ -395,14 +395,8 @@ class Renamer(Plugin):
break
elif release.status_id is snatched_status.get('id'):
if release.quality.id is group['meta_data']['quality']['id']:
log.debug('Marking release as downloaded')
try:
release.status_id = downloaded_status.get('id')
release.last_edit = int(time.time())
except Exception, e:
log.error('Failed marking release as finished: %s %s', (e, traceback.format_exc()))
db.commit()
# Set the release to downloaded
fireEvent('release.update_status', release.id, status = downloaded_status, single = True)
# Remove leftover files
if not remove_leftovers: # Don't remove anything
@ -476,11 +470,18 @@ class Renamer(Plugin):
log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(download_info):
if movie_folder:
# Delete the movie folder
group_folder = movie_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = os.path.join(self.conf('from'), os.path.relpath(group['parentdir'], self.conf('from')).split(os.path.sep)[0])
try:
log.info('Deleting folder: %s', group['parentdir'])
self.deleteEmptyFolder(group['parentdir'])
log.info('Deleting folder: %s', group_folder)
self.deleteEmptyFolder(group_folder)
except:
log.error('Failed removing %s: %s', (group['parentdir'], traceback.format_exc()))
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s)' % (movie_title, replacements['quality'])
@ -656,12 +657,13 @@ Remove it if you want it to be renamed (again, or at least let it try again)
self.checking_snatched = True
snatched_status, ignored_status, failed_status, done_status, seeding_status, downloaded_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done', 'seeding', 'downloaded'], single = True)
snatched_status, ignored_status, failed_status, done_status, seeding_status, downloaded_status, missing_status = \
fireEvent('status.get', ['snatched', 'ignored', 'failed', 'done', 'seeding', 'downloaded', 'missing'], single = True)
db = get_session()
rels = db.query(Release).filter_by(status_id = snatched_status.get('id')).all()
rels.extend(db.query(Release).filter_by(status_id = seeding_status.get('id')).all())
rels.extend(db.query(Release).filter_by(status_id = missing_status.get('id')).all())
scan_items = []
scan_required = False
@ -699,39 +701,36 @@ Remove it if you want it to be renamed (again, or at least let it try again)
log.debug('Found %s: %s, time to go: %s', (item['name'], item['status'].upper(), timeleft))
if item['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if item['folder'] and self.conf('from') in item['folder']:
self.tagDir(item['folder'], 'downloading')
elif item['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', rel.id, status = seeding_status, single = True)
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.movie.status_id == done_status.get('id') and self.statusInfoComplete(item):
if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(item):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (item['name'], item['seed_ratio']))
# Remove the downloading tag
self.untagDir(item['folder'], 'downloading')
rel.status_id = seeding_status.get('id')
rel.last_edit = int(time.time())
db.commit()
# Scan and set the torrent to paused if required
item.update({'pause': True, 'scan': True, 'process_complete': False})
scan_items.append(item)
else:
if rel.status_id != seeding_status.get('id'):
rel.status_id = seeding_status.get('id')
rel.last_edit = int(time.time())
db.commit()
#let it seed
log.debug('%s is seeding with ratio: %s', (item['name'], item['seed_ratio']))
elif item['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.id, status = failed_status, single = True)
fireEvent('download.remove_failed', item, single = True)
rel.status_id = failed_status.get('id')
rel.last_edit = int(time.time())
db.commit()
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.media_id)
@ -743,24 +742,23 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if rel.status_id == seeding_status.get('id'):
if rel.movie.status_id == done_status.get('id'):
# Set the release to done as the movie has already been renamed
rel.status_id = downloaded_status.get('id')
rel.last_edit = int(time.time())
db.commit()
fireEvent('release.update_status', rel.id, status = downloaded_status, single = True)
# Allow the downloader to clean-up
item.update({'pause': False, 'scan': False, 'process_complete': True})
scan_items.append(item)
else:
# Set the release to snatched so that the renamer can process the release as if it was never seeding
rel.status_id = snatched_status.get('id')
rel.last_edit = int(time.time())
db.commit()
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Scan and Allow the downloader to clean-up
item.update({'pause': False, 'scan': True, 'process_complete': True})
scan_items.append(item)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.id, status = snatched_status, single = True)
# Remove the downloading tag
self.untagDir(item['folder'], 'downloading')
@ -776,6 +774,14 @@ Remove it if you want it to be renamed (again, or at least let it try again)
if not found:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.status_id == missing_status.get('id'):
if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.id, status = ignored_status, single = True)
else:
# Set the release to missing
fireEvent('release.update_status', rel.id, status = missing_status, single = True)
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())

17
couchpotato/core/plugins/scanner/main.py

@ -1,7 +1,8 @@
from couchpotato import get_session
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt
from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \
splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import File, Media
@ -24,7 +25,9 @@ class Scanner(Plugin):
'media': 314572800, # 300MB
'trailer': 1048576, # 1MB
}
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo'] #unpacking, smb-crap, hidden files
ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_',
'_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo',
'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files
ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate']
extensions = {
'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'],
@ -741,9 +744,16 @@ class Scanner(Plugin):
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False):
identifier = file_path.replace(folder, '') # root folder
year = self.findYear(file_path)
identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder
identifier = os.path.splitext(identifier)[0] # ext
try:
path_split = splitString(identifier, os.path.sep)
identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename
except: pass
if exclude_filename:
identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])]
@ -757,7 +767,6 @@ class Scanner(Plugin):
identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':')
# Year
year = self.findYear(identifier)
if year and identifier[:4] != year:
identifier = '%s %s' % (identifier.split(year)[0].strip(), year)
else:

1
couchpotato/core/plugins/status/main.py

@ -24,6 +24,7 @@ class StatusPlugin(Plugin):
'available': 'Available',
'suggest': 'Suggest',
'seeding': 'Seeding',
'missing': 'Missing',
}
status_cached = {}

4
couchpotato/core/providers/automation/__init__.py

@ -1,4 +1,4 @@
config = {
config = [{
'name': 'automation_providers',
'groups': [
{
@ -18,4 +18,4 @@ config = {
'options': [],
},
],
}
}]

7
couchpotato/core/providers/automation/bluray/__init__.py

@ -18,6 +18,13 @@ config = [{
'default': False,
'type': 'enabler',
},
{
'name': 'backlog',
'advanced': True,
'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)',
'default': False,
'type': 'bool',
},
],
},
],

39
couchpotato/core/providers/automation/bluray/main.py

@ -1,3 +1,4 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
@ -10,11 +11,49 @@ class Bluray(Automation, RSS):
interval = 1800
rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml'
backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s'
def getIMDBids(self):
movies = []
if self.conf('backlog'):
page = 0
while True:
page = page + 1
url = self.backlog_url % page
data = self.getHTMLData(url)
soup = BeautifulSoup(data)
try:
# Stop if the release year is before the minimal year
page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1]
if tryInt(page_year) < self.getMinimal('year'):
break
for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]:
name = table.h3.get_text().lower().split('blu-ray')[0].strip()
year = table.small.get_text().split('|')[1].strip()
if not name.find('/') == -1: # make sure it is not a double movie release
continue
if tryInt(year) < self.getMinimal('year'):
continue
imdb = self.search(name, year)
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
except:
log.debug('Error loading page: %s', page)
break
self.conf('backlog', value = False)
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:

34
couchpotato/core/providers/automation/flixster/__init__.py

@ -0,0 +1,34 @@
from .main import Flixster
def start():
return Flixster()
config = [{
'name': 'flixster',
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'flixster_automation',
'label': 'Flixster',
'description': 'Import movies from any public <a href="http://www.flixster.com/">Flixster</a> watchlist',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_ids_use',
'label': 'Use',
},
{
'name': 'automation_ids',
'label': 'User ID',
'type': 'combined',
'combine': ['automation_ids_use', 'automation_ids'],
},
],
},
],
}]

48
couchpotato/core/providers/automation/flixster/main.py

@ -0,0 +1,48 @@
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
import json
log = CPLog(__name__)
class Flixster(Automation):
url = 'http://www.flixster.com/api/users/%s/movies/ratings?scoreTypes=wts'
interval = 60
def getIMDBids(self):
ids = splitString(self.conf('automation_ids'))
if len(ids) == 0:
return []
movies = []
for movie in self.getWatchlist():
imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True)
movies.append(imdb_id)
return movies
def getWatchlist(self):
enablers = [tryInt(x) for x in splitString(self.conf('automation_ids_use'))]
ids = splitString(self.conf('automation_ids'))
index = -1
movies = []
for user_id in ids:
index += 1
if not enablers[index]:
continue
data = json.loads(self.getHTMLData(self.url % user_id))
for movie in data:
movies.append({'title': movie['movie']['title'], 'year': movie['movie']['year'] })
return movies

4
couchpotato/core/providers/nzb/__init__.py

@ -1,4 +1,4 @@
config = {
config = [{
'name': 'nzb_providers',
'groups': [
{
@ -11,4 +11,4 @@ config = {
'options': [],
},
],
}
}]

4
couchpotato/core/providers/torrent/__init__.py

@ -1,4 +1,4 @@
config = {
config = [{
'name': 'torrent_providers',
'groups': [
{
@ -11,4 +11,4 @@ config = {
'options': [],
},
],
}
}]

60
couchpotato/core/providers/torrent/ilovetorrents/__init__.py

@ -0,0 +1,60 @@
from main import ILoveTorrents
def start():
return ILoveTorrents()
config = [{
'name': 'ilovetorrents',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'ILoveTorrents',
'description': 'Where the Love of Torrents is Born',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
},
{
'name': 'username',
'label': 'Username',
'type': 'string',
'default': '',
'description': 'The user name for your ILT account',
},
{
'name': 'password',
'label': 'Password',
'type': 'password',
'default': '',
'description': 'The password for your ILT account.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]

128
couchpotato/core/providers/torrent/ilovetorrents/main.py

@ -0,0 +1,128 @@
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import re
import traceback
log = CPLog(__name__)
class ILoveTorrents(TorrentProvider):
urls = {
'download': 'http://www.ilovetorrents.me/%s',
'detail': 'http://www.ilovetorrents.me/%s',
'search': 'http://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
'test' : 'http://www.ilovetorrents.me/',
'login' : 'http://www.ilovetorrents.me/takelogin.php',
'login_check' : 'http://www.ilovetorrents.me'
}
cat_ids = [
(['41'], ['720p', '1080p', 'brrip']),
(['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
(['20'], ['dvdr'])
]
cat_backup_id = 200
disable_provider = False
http_time_between_calls = 1
def _searchOnTitle(self, title, movie, quality, results):
page = 0
total_pages = 1
cats = self.getCatId(quality['identifier'])
while page < total_pages:
movieTitle = tryUrlencode('"%s" %s' % (title, movie['library']['year']))
search_url = self.urls['search'] % (movieTitle, page, cats[0])
page += 1
data = self.getHTMLData(search_url, opener = self.login_opener)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'class': 'koptekst'})
if not results_table:
return
try:
pagelinks = soup.findAll(href = re.compile('page'))
pageNumbers = [int(re.search('page=(?P<pageNumber>.+'')', i['href']).group('pageNumber')) for i in pagelinks]
total_pages = max(pageNumbers)
except:
pass
entries = results_table.find_all('tr')
for result in entries[1:]:
prelink = result.find(href = re.compile('details.php'))
link = prelink['href']
download = result.find('a', href = re.compile('download.php'))['href']
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
return confirmed + trusted + vip + moderated
id = re.search('id=(?P<id>\d+)&', link).group('id')
url = self.urls['download'] % (download)
fileSize = self.parseSize(result.select('td.rowhead')[5].text)
results.append({
'id': id,
'name': toUnicode(prelink.find('b').text),
'url': url,
'detail_url': self.urls['detail'] % link,
'size': fileSize,
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return tryUrlencode({
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'Welcome to ILT',
})
def getMoreInfo(self, item):
cache_key = 'ilt.%s' % item['id']
description = self.getCache(cache_key)
if not description:
try:
full_description = self.getHTMLData(item['detail_url'], opener = self.login_opener)
html = BeautifulSoup(full_description)
nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1]
description = toUnicode(nfo_pre.text) if nfo_pre else ''
except:
log.error('Failed getting more info for %s', item['name'])
description = ''
self.setCache(cache_key, description, timeout = 25920000)
item['description'] = description
return item
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess

2
couchpotato/core/providers/torrent/thepiratebay/__init__.py

@ -16,7 +16,7 @@ config = [{
{
'name': 'enabled',
'type': 'enabler',
'default': True
'default': False
},
{
'name': 'domain',

31
couchpotato/core/providers/torrent/torrentshack/main.py

@ -15,7 +15,7 @@ class TorrentShack(TorrentProvider):
'login' : 'https://torrentshack.net/login.php',
'login_check': 'https://torrentshack.net/inbox.php',
'detail' : 'https://torrentshack.net/torrent/%s',
'search' : 'https://torrentshack.net/torrents.php?searchstr=%s&filter_cat[%d]=1',
'search' : 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download' : 'https://torrentshack.net/%s',
}
@ -31,7 +31,9 @@ class TorrentShack(TorrentProvider):
def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % (tryUrlencode('"%s" %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
scene_only = '1' if self.conf('scene_only') else ''
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url, opener = self.login_opener)
if data:
@ -49,22 +51,15 @@ class TorrentShack(TorrentProvider):
link = result.find('span', attrs = {'class' : 'torrent_name_link'}).parent
url = result.find('td', attrs = {'class' : 'torrent_td'}).find('a')
extra_info = ''
if result.find('span', attrs = {'class' : 'torrent_extra_info'}):
extra_info = result.find('span', attrs = {'class' : 'torrent_extra_info'}).text
if not self.conf('scene_only') or extra_info != '[NotScene]':
results.append({
'id': link['href'].replace('torrents.php?torrentid=', ''),
'name': unicode(link.span.string).translate({ord(u'\xad'): None}),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find_all('td')[6].string),
'leechers': tryInt(result.find_all('td')[7].string),
})
else:
log.info('Not adding release %s [NotScene]' % unicode(link.span.string).translate({ord(u'\xad'): None}))
results.append({
'id': link['href'].replace('torrents.php?torrentid=', ''),
'name': unicode(link.span.string).translate({ord(u'\xad'): None}),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find_all('td')[6].string),
'leechers': tryInt(result.find_all('td')[7].string),
})
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

2
couchpotato/static/style/settings.css

@ -542,7 +542,7 @@
line-height: 140%;
cursor: help;
}
.page .combined_table .head abbr.use, .page .combined_table .head abbr.automation_urls_use {
.page .combined_table .head abbr:first-child {
display: none;
}
.page .combined_table .head abbr.host {

38
libs/importlib/__init__.py

@ -0,0 +1,38 @@
"""Backport of importlib.import_module from 3.x."""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]

6
libs/rtorrent/__init__.py

@ -71,12 +71,10 @@ class RTorrent:
def _verify_conn(self):
# check for rpc methods that should be available
assert {"system.client_version",
"system.library_version"}.issubset(set(self._get_rpc_methods())),\
"Required RPC methods not available."
assert "system.client_version" in self._get_rpc_methods(), "Required RPC method not available."
assert "system.library_version" in self._get_rpc_methods(), "Required RPC method not available."
# minimum rTorrent version check
assert self._meets_version_requirement() is True,\
"Error: Minimum rTorrent version required is {0}".format(
MIN_RTORRENT_VERSION_STR)

7
libs/rtorrent/lib/torrentparser.py

@ -90,9 +90,10 @@ class TorrentParser():
def _calc_info_hash(self):
self.info_hash = None
if "info" in self._torrent_decoded.keys():
info_dict = self._torrent_decoded["info"]
self.info_hash = hashlib.sha1(bencode.encode(
info_dict)).hexdigest().upper()
info_encoded = bencode.encode(self._torrent_decoded["info"])
if info_encoded:
self.info_hash = hashlib.sha1(info_encoded).hexdigest().upper()
return(self.info_hash)

49
libs/synchronousdeluge/client.py

@ -1,4 +1,5 @@
import os
import platform
from collections import defaultdict
from itertools import imap
@ -23,22 +24,48 @@ class DelugeClient(object):
self._request_counter = 0
def _get_local_auth(self):
xdg_config = os.path.expanduser(os.environ.get("XDG_CONFIG_HOME", "~/.config"))
config_home = os.path.join(xdg_config, "deluge")
auth_file = os.path.join(config_home, "auth")
auth_file = ""
username = password = ""
with open(auth_file) as fd:
for line in fd:
if platform.system() in ('Windows', 'Microsoft'):
appDataPath = os.environ.get("APPDATA")
if not appDataPath:
import _winreg
hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders")
appDataReg = _winreg.QueryValueEx(hkey, "AppData")
appDataPath = appDataReg[0]
_winreg.CloseKey(hkey)
auth_file = os.path.join(appDataPath, "deluge", "auth")
else:
from xdg.BaseDirectory import save_config_path
try:
auth_file = os.path.join(save_config_path("deluge"), "auth")
except OSError, e:
return username, password
if os.path.exists(auth_file):
for line in open(auth_file):
if line.startswith("#"):
# This is a comment line
continue
line = line.strip()
try:
lsplit = line.split(":")
except Exception, e:
continue
if len(lsplit) == 2:
username, password = lsplit
elif len(lsplit) == 3:
username, password, level = lsplit
else:
continue
auth = line.split(":")
if len(auth) >= 2 and auth[0] == "localclient":
username, password = auth[0], auth[1]
break
if username == "localclient":
return (username, password)
return username, password
return ("", "")
def _create_module_method(self, module, method):
fullname = "{0}.{1}".format(module, method)

Loading…
Cancel
Save