remove auth for /cache, added getArt

Set correct headers, add imageurls to do, added getArt.
This commit is contained in:
John 2015-03-04 22:00:36 +01:00 committed by evilhero
parent dea9ee73a2
commit 052e6ecb0b
5 changed files with 248 additions and 171 deletions

8
.gitignore vendored
View File

@ -1,6 +1,12 @@
mylar.db
config.ini
*.pyc
*.py[co]
.idea
logs
.AppleDouble
cache/
custom_exceptions.csv
Thumbs.db
*.torrent
ehtumbs.db
Thumbs.db

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
@ -370,12 +373,12 @@ def check_setting_str(config, cfg_name, item_name, def_val, log=True):
else:
logger.debug(item_name + " -> ******")
return my_val
def initialize():
with INIT_LOCK:
global __INITIALIZED__, DBCHOICE, DBUSER, DBPASS, DBNAME, COMICVINE_API, DEFAULT_CVAPI, CVAPI_COUNT, CVAPI_TIME, CVAPI_MAX, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, LOGVERBOSE, OLDCONFIG_VERSION, OS_DETECT, OS_LANG, OS_ENCODING, \
queue, HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, HTTPS_FORCE_ON, API_ENABLED, API_KEY, LAUNCH_BROWSER, GIT_PATH, SAFESTART, AUTO_UPDATE, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, USER_AGENT, DESTINATION_DIR, MULTIPLE_DEST_DIRS, CREATE_FOLDERS, \
@ -392,7 +395,7 @@ def initialize():
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, STORYARCDIR, COPY2ARCDIR, CVURL, CVAPIFIX, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, ANNUALS_ON, CV_ONLY, CV_ONETIMER, WEEKFOLDER, UMASK
if __INITIALIZED__:
return False
@ -403,7 +406,7 @@ def initialize():
CheckSection('NZBsu')
CheckSection('DOGnzb')
CheckSection('Raw')
CheckSection('Experimental')
CheckSection('Experimental')
CheckSection('Newznab')
CheckSection('Torrents')
# Set global variables based on config file or use defaults
@ -411,7 +414,7 @@ def initialize():
HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 8090)
except:
HTTP_PORT = 8090
if HTTP_PORT < 21 or HTTP_PORT > 65535:
HTTP_PORT = 8090
@ -419,7 +422,7 @@ def initialize():
HTTPS_CERT = os.path.join(DATA_DIR, 'server.crt')
if HTTPS_KEY == '':
HTTPS_KEY = os.path.join(DATA_DIR, 'server.key')
CONFIG_VERSION = check_setting_str(CFG, 'General', 'config_version', '')
DBCHOICE = check_setting_str(CFG, 'General', 'dbchoice', 'sqlite3')
DBUSER = check_setting_str(CFG, 'General', 'dbuser', '')
@ -438,10 +441,10 @@ def initialize():
HTTP_ROOT = check_setting_str(CFG, 'General', 'http_root', '/')
ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0))
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', '')
HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', '')
HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', '')
HTTPS_FORCE_ON = bool(check_setting_int(CFG, 'General', 'https_force_on', 0))
API_ENABLED = bool(check_setting_int(CFG, 'General', 'api_enabled', 0))
API_KEY = check_setting_str(CFG, 'General', 'api_key', '')
API_KEY = check_setting_str(CFG, 'General', 'api_key', '')
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
AUTO_UPDATE = bool(check_setting_int(CFG, 'General', 'auto_update', 0))
LOGVERBOSE = bool(check_setting_int(CFG, 'General', 'logverbose', 0))
@ -451,16 +454,16 @@ def initialize():
VERBOSE = 1
MAX_LOGSIZE = check_setting_int(CFG, 'General', 'max_logsize', 1000000)
if not MAX_LOGSIZE:
MAX_LOGSIZE = 1000000
MAX_LOGSIZE = 1000000
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', '')
if not CACHE_DIR:
CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', '')
CHECK_GITHUB = bool(check_setting_int(CFG, 'General', 'check_github', 1))
CHECK_GITHUB_ON_STARTUP = bool(check_setting_int(CFG, 'General', 'check_github_on_startup', 1))
CHECK_GITHUB_INTERVAL = check_setting_int(CFG, 'General', 'check_github_interval', 360)
DESTINATION_DIR = check_setting_str(CFG, 'General', 'destination_dir', '')
MULTIPLE_DEST_DIRS = check_setting_str(CFG, 'General', 'multiple_dest_dirs', '')
CREATE_FOLDERS = bool(check_setting_int(CFG, 'General', 'create_folders', 1))
@ -606,7 +609,7 @@ def initialize():
ENABLE_CBT = bool(check_setting_int(CFG, 'Torrents', 'enable_cbt', 0))
CBT_PASSKEY = check_setting_str(CFG, 'Torrents', 'cbt_passkey', '')
SNATCHEDTORRENT_NOTIFY = bool(check_setting_int(CFG, 'Torrents', 'snatchedtorrent_notify', 0))
#this needs to have it's own category - for now General will do.
NZB_DOWNLOADER = check_setting_int(CFG, 'General', 'nzb_downloader', 0)
#legacy support of older config - reload into old values for consistency.
@ -680,7 +683,7 @@ def initialize():
EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0))
ALTEXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'altexperimental', 1))
if EXPERIMENTAL:
if EXPERIMENTAL:
PR.append('Experimental')
PR_NUM +=1
@ -712,7 +715,7 @@ def initialize():
elif CONFIG_VERSION == '5':
EN_NUM = 5 #addition of Newznab UID
else:
EN_NUM = 3
EN_NUM = 3
EXTRA_NEWZNABS = list(itertools.izip(*[itertools.islice(flattened_newznabs, i, None, EN_NUM) for i in range(EN_NUM)]))
@ -730,14 +733,14 @@ def initialize():
#update the configV and write the config.
CONFIG_VERSION = '5'
config_write()
#to counteract the loss of the 1st newznab entry because of a switch, let's rewrite to the tuple
if NEWZNAB_HOST and CONFIG_VERSION:
EXTRA_NEWZNABS.append((NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, int(NEWZNAB_ENABLED)))
#PR_NUM +=1
# Need to rewrite config here and bump up config version
CONFIG_VERSION = '5'
config_write()
config_write()
#print 'PR_NUM:' + str(PR_NUM)
if NEWZNAB:
@ -754,7 +757,7 @@ def initialize():
flattened_provider_order = check_setting_str(CFG, 'General', 'provider_order', [], log=False)
PROVIDER_ORDER = list(itertools.izip(*[itertools.islice(flattened_provider_order, i, None, 2) for i in range(2)]))
if len(flattened_provider_order) == 0:
if len(flattened_provider_order) == 0:
#priority provider sequence in order#, ProviderName
#print('creating provider sequence order now...')
TMPPR_NUM = 0
@ -791,7 +794,7 @@ def initialize():
#print 'provider already exists at : ' + str(new_order_seqnum) + ' -- ' + str(PR[TMPPR_NUM])
TMPPR_NUM +=1
#this isn't ready for primetime just yet...
#print 'Provider Order is:' + str(PROV_ORDER)
@ -814,9 +817,9 @@ def initialize():
folder_values = { 'series' : 'Series', 'publisher':'Publisher', 'year' : 'Year', 'first' : 'First', 'lowerfirst' : 'first' }
FILE_FORMAT = replace_all(FILE_FORMAT, file_values)
FOLDER_FORMAT = replace_all(FOLDER_FORMAT, folder_values)
CONFIG_VERSION = '1'
if CONFIG_VERSION == '1':
from mylar.helpers import replace_all
@ -837,10 +840,10 @@ def initialize():
'publisher': '$publisher',
'year': '$year',
'first': '$first'
}
}
FILE_FORMAT = replace_all(FILE_FORMAT, file_values)
FOLDER_FORMAT = replace_all(FOLDER_FORMAT, folder_values)
CONFIG_VERSION = '2'
if 'http://' not in SAB_HOST[:7] and 'https://' not in SAB_HOST[:8]:
@ -1024,7 +1027,7 @@ def initialize():
# Store the original umask
UMASK = os.umask(0)
os.umask(UMASK)
__INITIALIZED__ = True
return True
@ -1033,10 +1036,10 @@ def daemonize():
if threading.activeCount() != 1:
logger.warn('There are %r active threads. Daemonizing may cause \
strange behavior.' % threading.enumerate())
sys.stdout.flush()
sys.stderr.flush()
# Do first fork
try:
pid = os.fork()
@ -1048,7 +1051,7 @@ def daemonize():
os._exit(0)
except OSError, e:
sys.exit("1st fork failed: %s [%d]" % (e.strerror, e.errno))
os.setsid()
# Make sure I can read my own files and shut out others
@ -1070,7 +1073,7 @@ def daemonize():
si = open('/dev/null', "r")
so = open('/dev/null', "a+")
se = open('/dev/null', "a+")
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
@ -1086,8 +1089,8 @@ def launch_browser(host, port, root):
if host == '0.0.0.0':
host = 'localhost'
try:
try:
webbrowser.open('http://%s:%i%s' % (host, port, root))
except Exception, e:
logger.error('Could not launch browser: %s' % e)
@ -1123,7 +1126,7 @@ def config_write():
new_config['General']['https_key'] = HTTPS_KEY
new_config['General']['https_force_on'] = int(HTTPS_FORCE_ON)
new_config['General']['api_enabled'] = int(API_ENABLED)
new_config['General']['api_key'] = API_KEY
new_config['General']['api_key'] = API_KEY
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
new_config['General']['auto_update'] = int(AUTO_UPDATE)
new_config['General']['log_dir'] = LOG_DIR
@ -1134,7 +1137,7 @@ def config_write():
new_config['General']['annuals_on'] = int(ANNUALS_ON)
new_config['General']['cv_only'] = int(CV_ONLY)
new_config['General']['cv_onetimer'] = int(CV_ONETIMER)
new_config['General']['cvapifix'] = int(CVAPIFIX)
new_config['General']['cvapifix'] = int(CVAPIFIX)
new_config['General']['check_github'] = int(CHECK_GITHUB)
new_config['General']['check_github_on_startup'] = int(CHECK_GITHUB_ON_STARTUP)
new_config['General']['check_github_interval'] = CHECK_GITHUB_INTERVAL
@ -1332,9 +1335,9 @@ def config_write():
new_config['Raw']['raw_groups'] = RAW_GROUPS
new_config.write()
def start():
global __INITIALIZED__, started
#dbUpdateScheduler, searchScheduler, RSSScheduler, \
#WeeklyScheduler, VersionScheduler, FolderMonitorScheduler
@ -1342,7 +1345,7 @@ def start():
with INIT_LOCK:
if __INITIALIZED__:
# Start our scheduled background tasks
#from mylar import updater, search, PostProcessor
@ -1369,7 +1372,7 @@ def start():
#RSSScheduler.thread.start()
logger.info('Initiating startup-RSS feed checks.')
rsscheck.tehMain()
#weekly pull list gets messed up if it's not populated first, so let's populate it then set the scheduler.
logger.info('Checking for existance of Weekly Comic listing...')
@ -1378,7 +1381,7 @@ def start():
#now the scheduler (check every 24 hours)
SCHED.add_interval_job(weeklypull.pullit, hours=24)
#WeeklyScheduler.thread.start()
#let's do a run at the Wanted issues here (on startup) if enabled.
if NZB_STARTUP_SEARCH:
threading.Thread(target=search.searchforissue).start()
@ -1386,7 +1389,7 @@ def start():
if CHECK_GITHUB:
#VersionScheduler.thread.start()
SCHED.add_interval_job(versioncheck.checkGithub, minutes=CHECK_GITHUB_INTERVAL)
#run checkFolder every X minutes (basically Manual Run Post-Processing)
if ENABLE_CHECK_FOLDER:
if DOWNLOAD_SCAN_INTERVAL >0:
@ -1396,9 +1399,9 @@ def start():
else:
logger.error('You need to specify a monitoring time for the check folder option to work')
SCHED.start()
started = True
def dbcheck():
#if DBCHOICE == 'postgresql':
# import psycopg2
@ -1407,10 +1410,10 @@ def dbcheck():
#else:
conn = sqlite3.connect(DB_FILE)
c_error = 'sqlite3.OperationalError'
c_error = 'sqlite3.OperationalError'
c=conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)')
@ -1431,7 +1434,7 @@ def dbcheck():
csv_load()
#add in the late players to the game....
# -- Comics Table --
@ -1497,6 +1500,16 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE comics ADD COLUMN AlternateFileName TEXT')
try:
c.execute('SELECT ComicImageURL from comics')
except sqlite3.OperationalError:
c.execute('ALTER TABLE comics ADD COLUMN ComicImageURL TEXT')
try:
c.execute('SELECT ComicImageALTURL from comics')
except sqlite3.OperationalError:
c.execute('ALTER TABLE comics ADD COLUMN ComicImageALTURL TEXT')
# -- Issues Table --
try:
@ -1680,7 +1693,7 @@ def dbcheck():
## -- Snatched Table --
try:
c.execute('SELECT Provider from snatched')
except sqlite3.OperationalError:
@ -1750,7 +1763,7 @@ def dbcheck():
#if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the
#value in the sql so we can display it in the details screen for everyone to wonder at.
try:
c.execute('SELECT not_updated_db from comics')
@ -1781,7 +1794,7 @@ def dbcheck():
logger.info('Correcting Null entries that make the main page break on startup.')
c.execute("UPDATE Comics SET LatestDate='Unknown' WHERE LatestDate='None' or LatestDate is NULL")
conn.commit()
c.close()
@ -1815,7 +1828,7 @@ def csv_load():
shutil.copy(os.path.join(DATA_DIR,"custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError,IOError):
logger.error('Cannot create custom_exceptions.csv in ' + str(DATA_DIR) + '. Make sure _sample.csv is present and/or check permissions.')
return
return
else:
logger.error('Could not locate ' + str(EXCEPTIONS[i]) + ' file. Make sure it is in datadir: ' + DATA_DIR)
break
@ -1839,7 +1852,7 @@ def csv_load():
i+=1
conn.commit()
c.close()
c.close()
#def halt():
# global __INITIALIZED__, dbUpdateScheduler, seachScheduler, RSSScheduler, WeeklyScheduler, \
@ -1904,7 +1917,7 @@ def shutdown(restart=False, update=False):
cherrypy.engine.exit()
SCHED.shutdown(wait=False)
config_write()
if not restart and not update:
@ -1914,12 +1927,12 @@ def shutdown(restart=False, update=False):
try:
versioncheck.update()
except Exception, e:
logger.warn('Mylar failed to update: %s. Restarting.' % e)
logger.warn('Mylar failed to update: %s. Restarting.' % e)
if CREATEPID:
logger.info('Removing pidfile %s' % PIDFILE)
os.remove(PIDFILE)
if restart:
logger.info('Mylar is restarting...')
popen_list = [sys.executable, FULL_PATH]
@ -1928,5 +1941,5 @@ def shutdown(restart=False, update=False):
popen_list += ['--nolaunch']
logger.info('Restarting Mylar with ' + str(popen_list))
subprocess.Popen(popen_list, cwd=os.getcwd())
os._exit(0)

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
@ -14,35 +17,35 @@
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import mylar
from mylar import db, mb, importer, search, PostProcessor, versioncheck, logger
from mylar import db, importer, search, PostProcessor, versioncheck, logger
import lib.simplejson as simplejson
from xml.dom.minidom import Document
import copy
import cherrypy
import os
import urllib2
import cache
import imghdr
from cherrypy.lib.static import serve_file
cmd_list = [ 'getIndex', 'getComic', 'getComic', 'getUpcoming', 'getWanted', 'getHistory', 'getLogs',
cmd_list = ['getIndex', 'getComic', 'getUpcoming', 'getWanted', 'getHistory', 'getLogs',
'findComic', 'findIssue', 'addComic', 'delComic', 'pauseComic', 'resumeComic', 'refreshComic',
'addIssue', 'queueIssue', 'unqueueIssue', 'forceSearch', 'forceProcess', 'getVersion', 'checkGithub',
'shutdown', 'restart', 'update', 'getComicInfo', 'getIssueInfo']
'addIssue', 'queueIssue', 'unqueueIssue', 'forceSearch', 'forceProcess', 'getVersion', 'checkGithub',
'shutdown', 'restart', 'update', 'getComicInfo', 'getIssueInfo', 'getArt']
class Api(object):
def __init__(self):
self.apikey = None
self.cmd = None
self.id = None
self.img = None
self.kwargs = None
self.data = None
self.callback = None
def checkParams(self,*args,**kwargs):
if not mylar.API_ENABLED:
self.data = 'API not enabled'
return
@ -52,193 +55,197 @@ class Api(object):
if len(mylar.API_KEY) != 32:
self.data = 'API key not generated correctly'
return
if 'apikey' not in kwargs:
self.data = 'Missing api key'
return
if kwargs['apikey'] != mylar.API_KEY:
self.data = 'Incorrect API key'
return
else:
self.apikey = kwargs.pop('apikey')
if 'cmd' not in kwargs:
self.data = 'Missing parameter: cmd'
return
if kwargs['cmd'] not in cmd_list:
self.data = 'Unknown command: %s' % kwargs['cmd']
return
else:
self.cmd = kwargs.pop('cmd')
self.kwargs = kwargs
self.data = 'OK'
def fetchData(self):
if self.data == 'OK':
if self.data == 'OK':
logger.info('Recieved API command: ' + self.cmd)
methodToCall = getattr(self, "_" + self.cmd)
result = methodToCall(**self.kwargs)
if 'callback' not in self.kwargs:
if self.img:
return serve_file(path=self.img, content_type='image/jpeg')
if type(self.data) == type(''):
return self.data
else:
cherrypy.response.headers['Content-Type'] = "application/json"
return simplejson.dumps(self.data)
else:
self.callback = self.kwargs['callback']
self.data = simplejson.dumps(self.data)
self.data = self.callback + '(' + self.data + ');'
cherrypy.response.headers['Content-Type'] = "application/javascript"
return self.data
else:
return self.data
def _dic_from_query(self,query):
myDB = db.DBConnection()
rows = myDB.select(query)
rows_as_dic = []
for row in rows:
row_as_dic = dict(zip(row.keys(), row))
rows_as_dic.append(row_as_dic)
return rows_as_dic
def _getIndex(self, **kwargs):
self.data = self._dic_from_query('SELECT * from comics order by ComicSortName COLLATE NOCASE')
return
return
def _getComic(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
comic = self._dic_from_query('SELECT * from comics WHERE ComicID="' + self.id + '"')
issues = self._dic_from_query('SELECT * from issues WHERE ComicID="' + self.id + '"order by Int_IssueNumber DESC')
if mylar.ANNUALS_ON:
annuals = self._dic_from_query('SELECT * FROM annuals WHERE ComicID="' + self.id + '"')
else: annuals = None
self.data = { 'comic': comic, 'issues': issues, 'annuals': annuals }
return
def _getHistory(self, **kwargs):
self.data = self._dic_from_query('SELECT * from snatched order by DateAdded DESC')
return
def _getUpcoming(self, **kwargs):
self.data = self._dic_from_query("SELECT * from upcoming WHERE IssueID is NULL order by IssueDate DESC")
return
def _getWanted(self, **kwargs):
self.data = self._dic_from_query("SELECT * from issues WHERE Status='Wanted'")
return
def _getLogs(self, **kwargs):
pass
def _delComic(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
myDB.action('DELETE from comics WHERE ComicID="' + self.id + '"')
myDB.action('DELETE from issues WHERE ComicID="' + self.id + '"')
myDB.action('DELETE from upcoming WHERE ComicID="' + self.id + '"')
def _pauseComic(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'ComicID': self.id}
newValueDict = {'Status': 'Paused'}
myDB.upsert("comics", newValueDict, controlValueDict)
def _resumeComic(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'ComicID': self.id}
newValueDict = {'Status': 'Active'}
myDB.upsert("comics", newValueDict, controlValueDict)
def _refreshComic(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
try:
importer.addComictoDB(self.id)
except Exception, e:
self.data = e
return
def _addComic(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
try:
importer.addReleaseById(self.id)
except Exception, e:
self.data = e
return
def _queueIssue(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'IssueID': self.id}
newValueDict = {'Status': 'Wanted'}
myDB.upsert("issues", newValueDict, controlValueDict)
search.searchforissue(self.id)
search.searchforissue(self.id)
def _unqueueIssue(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
myDB = db.DBConnection()
controlValueDict = {'IssueID': self.id}
newValueDict = {'Status': 'Skipped'}
myDB.upsert("issues", newValueDict, controlValueDict)
def _forceSearch(self, **kwargs):
search.searchforissue()
def _forceProcess(self, **kwargs):
if 'nzb_name' not in kwargs:
self.data = 'Missing parameter: nzb_name'
@ -253,67 +260,108 @@ class Api(object):
self.nzb_folder = kwargs['nzb_folder']
forceProcess = PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder)
forceProcess.Process()
forceProcess.Process()
def _getVersion(self, **kwargs):
self.data = {
self.data = {
'git_path' : mylar.GIT_PATH,
'install_type' : mylar.INSTALL_TYPE,
'current_version' : mylar.CURRENT_VERSION,
'latest_version' : mylar.LATEST_VERSION,
'commits_behind' : mylar.COMMITS_BEHIND,
}
def _checkGithub(self, **kwargs):
versioncheck.checkGithub()
self._getVersion()
def _shutdown(self, **kwargs):
mylar.SIGNAL = 'shutdown'
def _restart(self, **kwargs):
mylar.SIGNAL = 'restart'
def _update(self, **kwargs):
mylar.SIGNAL = 'update'
def _getArtistArt(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getArtwork(ComicID=self.id)
def _getIssueArt(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getArtwork(IssueID=self.id)
def _getComicInfo(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getInfo(ComicID=self.id)
def _getIssueInfo(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
self.data = cache.getInfo(IssueID=self.id)
def _getArt(self, **kwargs):
if 'id' not in kwargs:
self.data = 'Missing parameter: id'
return
else:
self.id = kwargs['id']
img = None
image_path = os.path.join(mylar.CACHE_DIR, str(self.id) + '.jpg')
# Checks if its a valid path and file
if os.path.isfile(image_path):
# check if its a valid img
if imghdr.what(image_path):
self.img = image_path
return
else:
# If we cant find the image, lets check the db for a url.
comic = self._dic_from_query('SELECT * from comics WHERE ComicID="' + self.id + '"')
# Try every img url in the db
try:
img = urllib2.urlopen(comic[0]['ComicImageURL']).read()
except:
try:
img = urllib2.urlopen(comic[0]['ComicImageALTURL']).read()
except:
pass
if img:
# verify the img stream
if imghdr.what(None, img):
with open(image_path, 'wb') as f:
f.write(img)
self.img = image_path
return
else:
self.data = 'Failed return a image'
else:
self.data = 'Failed to return a image'

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
@ -29,11 +32,11 @@ import cherrypy
import mylar
from mylar import logger, helpers, db, mb, albumart, cv, parseit, filechecker, search, updater, moveit, comicbookdb
def is_exists(comicid):
myDB = db.DBConnection()
# See if the artist is already in the database
comiclist = myDB.select('SELECT ComicID, ComicName from comics WHERE ComicID=?', [comicid])
@ -47,9 +50,9 @@ def is_exists(comicid):
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,calledfrom=None,annload=None,chkwant=None,issuechk=None,issuetype=None,latestissueinfo=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
# from mylar import cache
myDB = db.DBConnection()
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
@ -88,8 +91,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if pullupd is None:
helpers.ComicSort(comicorder=mylar.COMICSORT, imported=comicid)
# we need to lookup the info for the requested ComicID in full now
# we need to lookup the info for the requested ComicID in full now
comic = cv.getComic(comicid,'comic')
logger.fdebug(comic)
if not comic:
logger.warn('Error fetching comic. ID for : ' + comicid)
@ -101,12 +105,12 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
newValueDict = {"Status": "Active"}
myDB.upsert("comics", newValueDict, controlValueDict)
return
if comic['ComicName'].startswith('The '):
sortname = comic['ComicName'][4:]
else:
sortname = comic['ComicName']
logger.info('Now adding/updating: ' + comic['ComicName'])
#--Now that we know ComicName, let's try some scraping
@ -114,7 +118,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
# gcd will return issue details (most importantly publishing date)
if not mylar.CV_ONLY:
if mismatch == "no" or mismatch is None:
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
#print ("gcdinfo: " + str(gcdinfo))
mismatch_com = "no"
if gcdinfo == "No Match":
@ -166,16 +170,16 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if mylar.ANNUALS_ON:
#we need to check first to see if there are pre-existing annuals that have been manually added, or else they'll get
#wiped out.
annualids = [] #to be used to make sure an ID isn't double-loaded
annualids = [] #to be used to make sure an ID isn't double-loaded
if annload is None:
pass
else:
for manchk in annload:
if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add.
if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add.
#print str(manchk['ReleaseComicID']), comic['ComicName'], str(SeriesYear), str(comicid)
manualAnnual(manchk['ReleaseComicID'], comic['ComicName'], SeriesYear, comicid)
annualids.append(manchk['ReleaseComicID'])
annualids.append(manchk['ReleaseComicID'])
annualcomicname = re.sub('[\,\:]', '', comic['ComicName'])
@ -203,7 +207,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#print "annissues :" + str(annissues)
# annuals happen once / year. determine how many.
annualyear = SeriesYear # no matter what, the year won't be less than this.
annualyear = SeriesYear # no matter what, the year won't be less than this.
#if annualval['AnnualYear'] is None:
# sresults = mb.findComic(annComicName, mode, issue=annissues)
#else:
@ -294,7 +298,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(annualyear) + ' is less than ' + str(sr['comicyear']))
if int(sr['issues']) > (2013 - int(sr['comicyear'])):
logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong')
#newCtrl = {"IssueID": issueid}
#newVals = {"Issue_Number": annualval['AnnualIssue'],
# "IssueDate": annualval['AnnualDate'],
@ -325,7 +329,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
mylar.FILE_FORMAT = chunk_f.sub(' ', chunk_f_f)
#do work to generate folder path
values = {'$Series': series,
@ -397,7 +401,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#thisci = urllib.quote_plus(str(comic['ComicImage']))
#urllib.urlretrieve(str(thisci), str(coverfile))
try:
cimage = re.sub('[\+]','%20', comic['ComicImage'])
request = urllib2.Request(cimage)#, headers={'Content-Type': 'application/x-www-form-urlencoded'})
@ -430,7 +434,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.info('Successfully retrieved cover for ' + comic['ComicName'])
except Exception, e:
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImageALT']))
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImageALT']))
PRComicImage = os.path.join('cache',str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage)
@ -469,6 +473,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"ComicName_Filesafe": comicname_filesafe,
"ComicYear": SeriesYear,
"ComicImage": ComicImage,
"ComicImageURL": comic.get("ComicImage", ""),
"ComicImageALTURL": comic.get("ComicImageALT", ""),
"Total": comicIssues,
"ComicVersion": comicVol,
"ComicLocation": comlocation,
@ -479,7 +485,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"ComicPublished": "Unknown",
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
#comicsort here...
@ -496,7 +502,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.info('Sucessfully retrieved issue details for ' + comic['ComicName'] )
#move to own function so can call independently to only refresh issue data
#issued is from cv.getComic, comic['ComicName'] & comicid would both be already known to do independent call.
#issued is from cv.getComic, comic['ComicName'] & comicid would both be already known to do independent call.
issuedata = updateissuedata(comicid, comic['ComicName'], issued, comicIssues, calledfrom, SeriesYear=SeriesYear, latestissueinfo=latestissueinfo)
if issuedata is None:
logger.warn('Unable to complete Refreshing / Adding issue data - this WILL create future problems if not addressed.')
@ -506,7 +512,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if not os.path.exists(os.path.join(comlocation,"cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation,"cvinfo"),"w") as text_file:
text_file.write(str(comic['ComicURL']))
logger.info('Updating complete for: ' + comic['ComicName'])
if calledfrom == 'weekly':
@ -565,12 +571,12 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.info('Checking this week pullist for new issues of ' + comic['ComicName'])
if comic['ComicName'] != comicname_filesafe:
cn_pull = comicname_filesafe
else:
else:
cn_pull = comic['ComicName']
updater.newpullcheck(ComicName=cn_pull,ComicID=comicid,issue=latestiss)
#here we grab issues that have been marked as wanted above...
results = []
results = []
issresults = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])
if issresults:
for issr in issresults:
@ -590,7 +596,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if results:
logger.info('Attempting to grab wanted issues for : ' + comic['ComicName'])
for result in results:
logger.fdebug('Searching for : ' + str(result['Issue_Number']))
logger.fdebug('Status of : ' + str(result['Status']))
@ -621,7 +627,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if imported == 'futurecheck':
logger.info('Returning to Future-Check module to complete the add & remove entry.')
return
return
if imported == 'yes':
logger.info('Successfully imported : ' + comic['ComicName'])
@ -646,7 +652,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
# because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
# CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)
gcdcomicid = gcomicid
myDB = db.DBConnection()
@ -706,7 +712,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
logger.info(u"Sucessfully retrieved details for " + ComicName )
# print ("Series Published" + parseit.resultPublished)
#--End
ComicImage = gcdinfo['ComicImage']
#comic book location on machine
@ -722,7 +728,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if '/' in comicdir:
comicdir = comicdir.replace('/','-')
if ',' in comicdir:
comicdir = comicdir.replace(',','')
comicdir = comicdir.replace(',','')
else: comicdir = u_comicname
series = comicdir
@ -795,7 +801,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
shutil.copy(ComicImage,comiclocal)
except IOError as e:
logger.error(u"Unable to save cover locally at this time.")
#if comic['ComicVersion'].isdigit():
# comicVol = "v" + comic['ComicVersion']
#else:
@ -810,6 +816,8 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
"ComicLocation": comlocation,
#"ComicVersion": comicVol,
"ComicImage": ComicImage,
"ComicImageURL": comic.get('ComicImage', ''),
"ComicImageALTURL": comic.get('ComicImageALT', ''),
#"ComicPublisher": comic['ComicPublisher'],
#"ComicPublished": comicPublished,
"DateAdded": helpers.today(),
@ -890,7 +898,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
#adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
if "?" in str(issdate):
issdate = "0000-00-00"
issdate = "0000-00-00"
controlValueDict = {"IssueID": issid}
newValueDict = {"ComicID": gcomicid,
@ -985,7 +993,7 @@ def issue_collection(issuedata,nostatus):
nowdate = datetime.datetime.now()
nowtime = nowdate.strftime("%Y%m%d")
if issuedata:
if issuedata:
for issue in issuedata:
@ -1128,7 +1136,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
return
# poll against annuals here - to make sure annuals are uptodate.
weeklyissue_check = annual_check(comicname, SeriesYear, comicid, issuetype, issuechk, weeklyissue_check)
weeklyissue_check = annual_check(comicname, SeriesYear, comicid, issuetype, issuechk, weeklyissue_check)
if weeklyissue_check is None:
weeklyissue_check = []
logger.fdebug('Finshed Annual checking.')
@ -1169,7 +1177,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if issnum.isdigit():
int_issnum = int( issnum ) * 1000
else:
if 'a.i.' in issnum.lower() or 'ai' in issnum.lower():
if 'a.i.' in issnum.lower() or 'ai' in issnum.lower():
issnum = re.sub('\.', '', issnum)
#int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('i')
if 'au' in issnum.lower():
@ -1343,7 +1351,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if recentchk <= 55:
lastpubdate = 'Present'
else:
@ -1356,7 +1364,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
latestiss = latestissueinfo[0]['latestiss']
lastpubdate = 'Present'
publishfigure = str(SeriesYear) + ' - ' + str(lastpubdate)
controlValueStat = {"ComicID": comicid}
@ -1461,7 +1469,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
issid = str(firstval['Issue_ID'])

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
@ -76,7 +79,7 @@ def initialize(options):
conf = {
'/': {
'tools.staticdir.root': os.path.join(mylar.PROG_DIR, 'data')
'tools.staticdir.root': os.path.join(mylar.PROG_DIR, 'data')
},
'/interfaces':{
'tools.staticdir.on': True,
@ -100,10 +103,11 @@ def initialize(options):
},
'/cache':{
'tools.staticdir.on': True,
'tools.staticdir.dir': mylar.CACHE_DIR
'tools.staticdir.dir': mylar.CACHE_DIR,
'tools.auth_basic.on': False
}
}
if options['http_password'] != "":
conf['/'].update({
'tools.auth_basic.on': True,
@ -115,16 +119,14 @@ def initialize(options):
# Prevent time-outs
cherrypy.engine.timeout_monitor.unsubscribe()
cherrypy.tree.mount(WebInterface(), options['http_root'], config = conf)
try:
cherrypy.process.servers.check_port(options['http_host'], options['http_port'])
cherrypy.server.start()
except IOError:
print 'Failed to start on port: %i. Is something else running?' % (options['http_port'])
sys.exit(0)
cherrypy.server.wait()