Merge branch 'development'

This commit is contained in:
evilhero 2018-06-14 12:43:12 -04:00
commit 81252f3ebb
11 changed files with 266 additions and 184 deletions

View File

@ -89,12 +89,13 @@ def main():
parser_maintenance.add_argument('-ij', '--importjson', action='store', help='Import a specified json file containing just {"ComicID": "XXXXX"} into current db')
parser_maintenance.add_argument('-st', '--importstatus', action='store_true', help='Provide current maintenance status')
parser_maintenance.add_argument('-u', '--update', action='store_true', help='force mylar to perform an update as if in GUI')
parser_maintenance.add_argument('-fs', '--fixslashes', action='store_true', help='remove double-slashes from within paths in db')
#parser_maintenance.add_argument('-it', '--importtext', action='store', help='Import a specified text file into current db')
args = parser.parse_args()
if args.maintenance:
if all([args.exportjson is None, args.importdatabase is None, args.importjson is None, args.importstatus is False, args.update is False]):
if all([args.exportjson is None, args.importdatabase is None, args.importjson is None, args.importstatus is False, args.update is False, args.fixslashes is False]):
print 'Expecting subcommand with the maintenance positional argumeent'
sys.exit()
mylar.MAINTENANCE = True
@ -194,7 +195,7 @@ def main():
back = os.path.join(backupdir, 'mylar.db')
back_1 = os.path.join(backupdir, 'mylar.db.1')
else:
ogfile = config_file
ogfile = mylar.CONFIG_FILE
back = os.path.join(backupdir, 'config.ini')
back_1 = os.path.join(backupdir, 'config.ini.1')
@ -221,7 +222,7 @@ def main():
if mylar.DAEMON:
mylar.daemonize()
if mylar.MAINTENANCE is True and any([args.exportjson, args.importjson, args.update is True, args.importstatus is True]):
if mylar.MAINTENANCE is True and any([args.exportjson, args.importjson, args.update is True, args.importstatus is True, args.fixslashes is True]):
loggermode = '[MAINTENANCE-MODE]'
if args.importstatus: #mylar.MAINTENANCE is True:
cs = maintenance.Maintenance('status')
@ -260,6 +261,11 @@ def main():
logger.info('%s file indicated as being written to json format - destination accepted as %s' % (loggermode, maintenance_path))
ej = maintenance.Maintenance('json-export', output=maintenance_path)
j = ej.json_export()
elif args.fixslashes:
#for running the fix slashes on the db manually
logger.info('%s method indicated as fix slashes' % loggermode)
fs = maintenance.Maintenance('fixslashes')
j = fs.fix_slashes()
else:
logger.info('%s Not a valid command: %s' % (loggermode, maintenance_info))
sys.exit()

View File

@ -57,43 +57,41 @@
<script>
$(document).ready(function() {
initActions();
$('#log_table').dataTable( {
"bProcessing": true,
"bServerSide": true,
"sAjaxSource": 'getLog',
"sPaginationType": "full_numbers",
"aaSorting": [[0, 'desc']],
"iDisplayLength": 25,
"bStateSave": true,
"oLanguage": {
"sSearch":"Filter:",
"sLengthMenu":"Show _MENU_ lines per page",
"sEmptyTable": "No log information available",
"sInfo":"Showing _START_ to _END_ of _TOTAL_ lines",
"sInfoEmpty":"Showing 0 to 0 of 0 lines",
"sInfoFiltered":"(filtered from _MAX_ total lines)"},
"fnRowCallback": function (nRow, aData, iDisplayIndex, iDisplayIndexFull) {
if (aData[1] === "ERROR") {
$('td', nRow).closest('tr').addClass("gradeX");
} else if (aData[1] === "WARNING") {
$('td', nRow).closest('tr').addClass("gradeW");
} else {
$('td', nRow).closest('tr').addClass("gradeZ");
}
return nRow;
},
"fnDrawCallback": function (o) {
// Jump to top of page
$('html,body').scrollTop(0);
},
"fnServerData": function ( sSource, aoData, fnCallback ) {
$('#log_table').dataTable( {
"bProcessing": true,
"bServerSide": true,
"sAjaxSource": 'getLog',
"sPaginationType": "full_numbers",
"aaSorting": [[0, 'desc']],
"iDisplayLength": 25,
"bStateSave": true,
"oLanguage": {
"sSearch":"Filter:",
"sLengthMenu":"Show _MENU_ lines per page",
"sEmptyTable": "No log information available",
"sInfo":"Showing _START_ to _END_ of _TOTAL_ lines",
"sInfoEmpty":"Showing 0 to 0 of 0 lines",
"sInfoFiltered":"(filtered from _MAX_ total lines)"},
"fnRowCallback": function (nRow, aData, iDisplayIndex, iDisplayIndexFull) {
if (aData[1] === "ERROR") {
$('td', nRow).closest('tr').addClass("gradeX");
} else if (aData[1] === "WARNING") {
$('td', nRow).closest('tr').addClass("gradeW");
} else {
$('td', nRow).closest('tr').addClass("gradeZ");
}
return nRow;
},
"fnDrawCallback": function (o) {
// Jump to top of page
$('html,body').scrollTop(0);
},
"fnServerData": function ( sSource, aoData, fnCallback ) {
/* Add some extra data to the sender */
$.getJSON(sSource, aoData, function (json) {
fnCallback(json)
});
}
}
});
});
</script>

View File

@ -687,7 +687,7 @@ class PostProcessor(object):
mlp.append(x)
else:
pass
if len(mlp) == 1:
if len(manual_list) == 1 and len(mlp) == 1:
manual_list = mlp
#logger.fdebug(module + '[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series')
@ -987,7 +987,7 @@ class PostProcessor(object):
continue
#launch failed download handling here.
elif metaresponse.startswith('file not found'):
filename_in_error = os.path.split(metaresponse, '||')[1]
filename_in_error = metaresponse.split('||')[1]
self._log("The file cannot be found in the location provided for metatagging to be used [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...")
logger.error(module + ' The file cannot be found in the location provided for metatagging to be used [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...')
else:
@ -1420,7 +1420,7 @@ class PostProcessor(object):
logger.error(module + ' This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it.')
#launch failed download handling here.
elif metaresponse.startswith('file not found'):
filename_in_error = os.path.split(metaresponse, '||')[1]
filename_in_error = metaresponse.split('||')[1]
self._log("The file cannot be found in the location provided for metatagging [" + filename_in_error + "]. Please verify it exists, and re-run if necessary.")
logger.error(module + ' The file cannot be found in the location provided for metagging [' + filename_in_error + ']. Please verify it exists, and re-run if necessary.')
else:
@ -1950,7 +1950,7 @@ class PostProcessor(object):
"annchk": annchk})
return self.queue.put(self.valreturn)
elif pcheck.startswith('file not found'):
filename_in_error = os.path.split(pcheck, '||')[1]
filename_in_error = pcheck.split('||')[1]
self._log("The file cannot be found in the location provided [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Aborting.")
logger.error(module + ' The file cannot be found in the location provided [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Aborting')
self.failed_files +=1

View File

@ -1039,6 +1039,7 @@ def dbcheck():
c.execute("DELETE from annuals WHERE ComicName='None' OR ComicName is NULL or Issue_Number is NULL")
c.execute("DELETE from upcoming WHERE ComicName='None' OR ComicName is NULL or IssueNumber is NULL")
c.execute("DELETE from importresults WHERE ComicName='None' OR ComicName is NULL")
c.execute("DELETE from storyarcs WHERE StoryArcID is NULL OR StoryArc is NULL")
c.execute("DELETE from Failed WHERE ComicName='None' OR ComicName is NULL OR ID is NULL")
logger.info('Ensuring DB integrity - Removing all Erroneous Comics (ie. named None)')
@ -1086,7 +1087,7 @@ def csv_load():
try:
shutil.copy(os.path.join(DATA_DIR, "custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError, IOError):
logger.error('Cannot create custom_exceptions.csv in ' + str(DATA_DIR) + '. Make sure _sample.csv is present and/or check permissions.')
logger.error('Cannot create custom_exceptions.csv in ' + str(DATA_DIR) + '. Make sure custom_exceptions_sample.csv is present and/or check permissions.')
return
else:
logger.error('Could not locate ' + str(EXCEPTIONS[i]) + ' file. Make sure it is in datadir: ' + DATA_DIR)

View File

@ -84,6 +84,34 @@ class Maintenance(object):
logger.info('[MAINTENANCE-MODE][%s] Successfully exported %s ComicID\'s to json file: %s' % (self.mode.upper(), len(self.comiclist), self.outputfile))
def fix_slashes(self):
self.sql_attachmylar()
for ct in self.dbmylar.execute("SELECT ComicID, ComicLocation FROM comics WHERE ComicLocation like ?", ['%' + os.sep.encode('unicode-escape') + os.sep.encode('unicode-escape') + '%']):
st = ct[1].find(os.sep.encode('unicode-escape')+os.sep.encode('unicode-escape'))
if st != -1:
rootloc = ct[1][:st]
clocation = ct[1][st+2:]
if clocation[0] != os.sep.encode('unicode-escape'):
new_path = os.path.join(rootloc, clocation)
logger.info('[Incorrect slashes in path detected for OS] %s' % os.path.join(rootloc, ct[1]))
logger.info('[PATH CORRECTION] %s' % new_path)
self.comiclist.append({'ComicLocation': new_path,
'ComicID': ct[0]})
for cm in self.comiclist:
try:
self.dbmylar.execute("UPDATE comics SET ComicLocation=? WHERE ComicID=?", (cm['ComicLocation'], cm['ComicID']))
except Exception as e:
logger.warn('Unable to correct entry: [ComicID:%s] %s [%e]' % (cm['ComicLocation'], cm['ComicID'],e))
self.sql_closemylar()
if len(self.comiclist) >0:
logger.info('[MAINTENANCE-MODE][%s] Successfully fixed the path slashes for %s series' % (self.mode.upper(), len(self.comiclist)))
else:
logger.info('[MAINTENANCE-MODE][%s] No series found with incorrect slashes in the path' % self.mode.upper())
def check_status(self):
try:
found = False

View File

@ -43,8 +43,9 @@ def movefiles(comicid, comlocation, imported):
logger.info("moving " + srcimp + " ... to " + dstimp)
try:
shutil.move(srcimp, dstimp)
files_moved.append({'srid': imported['srid'],
'filename': impr['comicfilename']})
files_moved.append({'srid': imported['srid'],
'filename': impr['comicfilename'],
'import_id': impr['import_id']})
except (OSError, IOError):
logger.error("Failed to move files - check directories and manually re-run.")

View File

@ -34,8 +34,21 @@ class NZBGet(object):
elif mylar.CONFIG.NZBGET_HOST[:4] == 'http':
protocol = "http"
nzbget_host = mylar.CONFIG.NZBGET_HOST[7:]
self.nzb_url = '%s://%s:%s@%s:%s/xmlrpc' % (protocol, mylar.CONFIG.NZBGET_USERNAME, mylar.CONFIG.NZBGET_PASSWORD, nzbget_host, mylar.CONFIG.NZBGET_PORT)
self.server = xmlrpclib.ServerProxy(self.nzb_url)
url = '%s://'
val = (protocol,)
if mylar.CONFIG.NZBGET_USERNAME is not None:
url = url + '%s:'
val = val + (mylar.CONFIG.NZBGET_USERNAME,)
if mylar.CONFIG.NZBGET_PASSWORD is not None:
url = url + '%s'
val = val + (mylar.CONFIG.NZBGET_PASSWORD,)
if any([mylar.CONFIG.NZBGET_USERNAME, mylar.CONFIG.NZBGET_PASSWORD]):
url = url + '@%s:%s/xmlrpc'
else:
url = url + '%s:%s/xmlrpc'
val = val + (nzbget_host,mylar.CONFIG.NZBGET_PORT,)
self.nzb_url = (url % val)
self.server = xmlrpclib.ServerProxy(self.nzb_url) #,allow_none=True)
def sender(self, filename, test=False):
if mylar.CONFIG.NZBGET_PRIORITY:
@ -59,7 +72,11 @@ class NZBGet(object):
nzbcontent64 = standard_b64encode(nzbcontent)
try:
logger.fdebug('sending now to %s' % self.nzb_url)
sendresponse = self.server.append(filename, nzbcontent64, mylar.CONFIG.NZBGET_CATEGORY, nzbgetpriority, False, False, '', 0, 'SCORE')
if mylar.CONFIG.NZBGET_CATEGORY is None:
nzb_category = ''
else:
nzb_category = mylar.CONFIG.NZBGET_CATEGORY
sendresponse = self.server.append(filename, nzbcontent64, nzb_category, nzbgetpriority, False, False, '', 0, 'SCORE')
except Exception as e:
logger.warn('uh-oh: %s' % e)
return {'status': False}
@ -78,7 +95,7 @@ class NZBGet(object):
try:
logger.fdebug('Now checking the active queue of nzbget for the download')
queueinfo = self.server.listgroups()
except Expection as e:
except Exception as e:
logger.warn('Error attempting to retrieve active queue listing: %s' % e)
return {'status': False}
else:
@ -86,7 +103,7 @@ class NZBGet(object):
queuedl = [qu for qu in queueinfo if qu['NZBID'] == nzbid]
if len(queuedl) == 0:
logger.warn('Unable to locate item in active queue. Could it be finished already ?')
return {'status': False}
return self.historycheck(nzbid)
stat = False
while stat is False:
@ -103,25 +120,29 @@ class NZBGet(object):
logger.fdebug('Download Left: %sMB' % queuedl[0]['RemainingSizeMB'])
logger.fdebug('health: %s' % (queuedl[0]['Health']/10))
logger.fdebug('destination: %s' % queuedl[0]['DestDir'])
logger.fdebug('File has now downloaded!')
time.sleep(5) #wait some seconds so shit can get written to history properly
history = self.server.history()
found = False
hq = [hs for hs in history if hs['NZBID'] == nzbid and 'SUCCESS' in hs['Status']]
if len(hq) > 0:
logger.fdebug('found matching completed item in history. Job has a status of %s' % hq[0]['Status'])
if hq[0]['DownloadedSizeMB'] == hq[0]['FileSizeMB']:
logger.fdebug('%s has final file size of %sMB' % (hq[0]['Name'], hq[0]['DownloadedSizeMB']))
if os.path.isdir(hq[0]['DestDir']):
logger.fdebug('location found @ %s' % hq[0]['DestDir'])
return {'status': True,
'name': re.sub('.nzb', '', hq[0]['NZBName']).strip(),
'location': hq[0]['DestDir'],
'failed': False}
return self.historycheck(nzbid)
else:
logger.warn('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq[0]['DestDir'])
return {'status': False}
else:
logger.warn('Could not find completed item in history')
return {'status': False}
def historycheck(self, nzbid):
history = self.server.history()
found = False
hq = [hs for hs in history if hs['NZBID'] == nzbid and 'SUCCESS' in hs['Status']]
if len(hq) > 0:
logger.fdebug('found matching completed item in history. Job has a status of %s' % hq[0]['Status'])
if hq[0]['DownloadedSizeMB'] == hq[0]['FileSizeMB']:
logger.fdebug('%s has final file size of %sMB' % (hq[0]['Name'], hq[0]['DownloadedSizeMB']))
if os.path.isdir(hq[0]['DestDir']):
logger.fdebug('location found @ %s' % hq[0]['DestDir'])
return {'status': True,
'name': re.sub('.nzb', '', hq[0]['NZBName']).strip(),
'location': hq[0]['DestDir'],
'failed': False}
else:
logger.warn('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq[0]['DestDir'])
return {'status': False}
else:
logger.warn('Could not find completed item in history')
return {'status': False}

View File

@ -500,18 +500,15 @@ class OPDS(object):
if not issue:
self.data = self._error_with_message('Issue Not Found')
return
else:
issuetype = 1
comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", (issue['ComicID'],)).fetchone()
if not comic:
self.data = self._error_with_message('Comic Not Found')
return
if issuetype:
self.file = issue['Location']
self.filename = os.path.split(issue['Location'])[1]
else:
comic = myDB.selectone("SELECT * from comics WHERE ComicID=?", (issue['ComicID'],)).fetchone()
if not comic:
self.data = self._error_with_message('Comic Not Found in Watchlist')
return
self.file = os.path.join(comic['ComicLocation'],issue['Location'])
self.filename = issue['Location']
else:
self.file = issue['Location']
self.filename = os.path.split(issue['Location'])[1]
return
def _StoryArcs(self, **kwargs):

View File

@ -69,7 +69,7 @@ class SABnzbd(object):
h = requests.get(self.sab_url, params=self.params['queue'], verify=False)
except Exception as e:
logger.info('uh-oh: %s' % e)
return {'status': False}
return self.historycheck(sendresponse)
else:
queueresponse = h.json()
logger.info('successfully queried the queue for status')
@ -92,50 +92,53 @@ class SABnzbd(object):
logger.warn('error: %s' % e)
logger.info('File has now downloaded!')
hist_params = {'mode': 'history',
'category': mylar.CONFIG.SAB_CATEGORY,
'failed': 0,
'output': 'json',
'apikey': mylar.CONFIG.SAB_APIKEY}
hist = requests.get(self.sab_url, params=hist_params, verify=False)
historyresponse = hist.json()
#logger.info(historyresponse)
histqueue = historyresponse['history']
found = {'status': False}
while found['status'] is False:
try:
for hq in histqueue['slots']:
#logger.info('nzo_id: %s --- %s [%s]' % (hq['nzo_id'], sendresponse, hq['status']))
if hq['nzo_id'] == sendresponse and hq['status'] == 'Completed':
logger.info('found matching completed item in history. Job has a status of %s' % hq['status'])
if os.path.isfile(hq['storage']):
logger.info('location found @ %s' % hq['storage'])
found = {'status': True,
'name': re.sub('.nzb', '', hq['nzb_name']).strip(),
'location': os.path.abspath(os.path.join(hq['storage'], os.pardir)),
'failed': False}
break
else:
logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage'])
break
elif hq['nzo_id'] == sendresponse and hq['status'] == 'Failed':
#get the stage / error message and see what we can do
stage = hq['stage_log']
for x in stage[0]:
if 'Failed' in x['actions'] and any([x['name'] == 'Unpack', x['name'] == 'Repair']):
if 'moving' in x['actions']:
logger.warn('There was a failure in SABnzbd during the unpack/repair phase that caused a failure: %s' % x['actions'])
else:
logger.warn('Failure occured during the Unpack/Repair phase of SABnzbd. This is probably a bad file: %s' % x['actions'])
if mylar.FAILED_DOWNLOAD_HANDLING is True:
found = {'status': True,
'name': re.sub('.nzb', '', hq['nzb_name']).strip(),
'location': os.path.abspath(os.path.join(hq['storage'], os.pardir)),
'failed': True}
break
return self.historycheck(sendresponse)
def historycheck(self, sendresponse):
hist_params = {'mode': 'history',
'category': mylar.CONFIG.SAB_CATEGORY,
'failed': 0,
'output': 'json',
'apikey': mylar.CONFIG.SAB_APIKEY}
hist = requests.get(self.sab_url, params=hist_params, verify=False)
historyresponse = hist.json()
#logger.info(historyresponse)
histqueue = historyresponse['history']
found = {'status': False}
while found['status'] is False:
try:
for hq in histqueue['slots']:
#logger.info('nzo_id: %s --- %s [%s]' % (hq['nzo_id'], sendresponse, hq['status']))
if hq['nzo_id'] == sendresponse and hq['status'] == 'Completed':
logger.info('found matching completed item in history. Job has a status of %s' % hq['status'])
if os.path.isfile(hq['storage']):
logger.info('location found @ %s' % hq['storage'])
found = {'status': True,
'name': re.sub('.nzb', '', hq['nzb_name']).strip(),
'location': os.path.abspath(os.path.join(hq['storage'], os.pardir)),
'failed': False}
break
except Exception as e:
logger.warn('error %s' % e)
break
else:
logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage'])
break
elif hq['nzo_id'] == sendresponse and hq['status'] == 'Failed':
#get the stage / error message and see what we can do
stage = hq['stage_log']
for x in stage[0]:
if 'Failed' in x['actions'] and any([x['name'] == 'Unpack', x['name'] == 'Repair']):
if 'moving' in x['actions']:
logger.warn('There was a failure in SABnzbd during the unpack/repair phase that caused a failure: %s' % x['actions'])
else:
logger.warn('Failure occured during the Unpack/Repair phase of SABnzbd. This is probably a bad file: %s' % x['actions'])
if mylar.FAILED_DOWNLOAD_HANDLING is True:
found = {'status': True,
'name': re.sub('.nzb', '', hq['nzb_name']).strip(),
'location': os.path.abspath(os.path.join(hq['storage'], os.pardir)),
'failed': True}
break
break
except Exception as e:
logger.warn('error %s' % e)
break
return found

View File

@ -2809,7 +2809,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if mylar.CONFIG.ENABLE_SNATCH_SCRIPT:
if mylar.USE_NZBGET:
clientmode = 'nzbget'
client_id = None
client_id = '%s' % send_to_nzbget['NZBID']
elif mylar.USE_SABNZBD:
clientmode = 'sabnzbd'
client_id = sendtosab['nzo_id']

View File

@ -17,6 +17,7 @@
from __future__ import with_statement
import os
import io
import sys
import cherrypy
import requests
@ -664,33 +665,44 @@ class WebInterface(object):
def wanted_Export(self,mode):
import unicodedata
myDB = db.DBConnection()
wantlist = myDB.select("SELECT * FROM issues WHERE Status=? AND ComicName NOT NULL", [mode])
wantlist = myDB.select("select b.ComicName, b.ComicYear, a.Issue_Number, a.IssueDate, a.ComicID, a.IssueID from issues a inner join comics b on a.ComicID=b.ComicID where a.status=? and b.ComicName is not NULL", [mode])
if wantlist is None:
logger.info("There aren't any issues marked as " + mode + ". Aborting Export.")
return
#write it a wanted_list.csv
#write out a wanted_list.csv
logger.info("gathered data - writing to csv...")
except_file = os.path.join(mylar.DATA_DIR, str(mode) + "_list.csv")
if os.path.exists(except_file):
wanted_file = os.path.join(mylar.DATA_DIR, str(mode) + "_list.csv")
if os.path.exists(wanted_file):
try:
os.remove(except_file)
os.remove(wanted_file)
except (OSError, IOError):
pass
wanted_file_new = os.path.join(mylar.DATA_DIR, str(mode) + '_list-1.csv')
logger.warn('%s already exists. Writing to: %s' % (wanted_file, wanted_file_new))
wanted_file = wanted_file_new
wcount=0
with open(str(except_file), 'w+') as f:
headrow = "SeriesName,SeriesYear,IssueNumber,IssueDate,ComicID,IssueID"
headerline = headrow.decode('utf-8', 'ignore')
f.write('%s\n' % (headerline.encode('ascii', 'replace').strip()))
for want in wantlist:
wantcomic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [want['ComicID']]).fetchone()
exceptln = wantcomic['ComicName'].encode('ascii', 'replace') + "," + str(wantcomic['ComicYear']) + "," + str(want['Issue_Number']) + "," + str(want['IssueDate']) + "," + str(want['ComicID']) + "," + str(want['IssueID'])
#logger.fdebug(exceptln)
wcount+=1
f.write('%s\n' % (exceptln.encode('ascii', 'replace').strip()))
logger.info("Successfully wrote to csv file " + str(wcount) + " entries from your " + mode + " list.")
with open(wanted_file, 'wb+') as f:
try:
fieldnames = ['SeriesName','SeriesYear','IssueNumber','IssueDate','ComicID','IssueID']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for want in wantlist:
writer.writerow({'SeriesName': want['ComicName'],
'SeriesYear': want['ComicYear'],
'IssueNumber': want['Issue_Number'],
'IssueDate': want['IssueDate'],
'ComicID': want['ComicID'],
'IssueID': want['IssueID']})
wcount += 1
except IOError as Argument:
logger.info("Error writing value to {}. {}".format(wanted_file, Argument))
except Exception as Argument:
logger.info("Unknown error: {}".format(Argument))
if wcount > 0:
logger.info('Successfully wrote to csv file %s entries from your %s list.' % (wcount, mode))
else:
logger.info('Nothing written to csv file for your %s list.' % mode)
raise cherrypy.HTTPRedirect("home")
wanted_Export.exposed = True
@ -2388,6 +2400,7 @@ class WebInterface(object):
flushImports.exposed = True
def markImports(self, action=None, **args):
import unicodedata
myDB = db.DBConnection()
comicstoimport = []
if action == 'massimport':
@ -2398,10 +2411,14 @@ class WebInterface(object):
comicid = cname['ComicID']
else:
comicid = None
comicstoimport.append({'ComicName': cname['ComicName'].decode('utf-8', 'replace'),
'DynamicName': cname['DynamicName'],
'Volume': cname['Volume'],
'ComicID': comicid})
try:
comicstoimport.append({'ComicName': unicodedata.normalize('NFKD', cname['ComicName']).encode('utf-8', 'ignore').decode('utf-8', 'ignore'),
'DynamicName': cname['DynamicName'],
'Volume': cname['Volume'],
'ComicID': comicid})
except Exception as e:
logger.warn('[ERROR] There was a problem attempting to queue %s %s [%s] to import (ignoring): %s' % (cname['ComicName'],cname['Volume'],comicid, e))
logger.info(str(len(comicstoimport)) + ' series will be attempted to be imported.')
else:
if action == 'importselected':
@ -3085,9 +3102,6 @@ class WebInterface(object):
"IssueDate": AD['Issue_Date'],
"ReleaseDate": AD['Store_Date']}
logger.info('CTRLWRITE TO: ' + str(newCtrl))
logger.info('WRITING: ' + str(newVals))
myDB.upsert("storyarcs", newVals, newCtrl)
@ -3959,11 +3973,21 @@ class WebInterface(object):
'srid': SRID}
self.addbyid(comicinfo['ComicID'], calledby=True, imported=imported, ogcname=comicinfo['ComicName'], nothread=True)
#status update.
ctrlVal = {"ComicID": comicinfo['ComicID']}
newVal = {"Status": 'Imported',
"SRID": SRID}
myDB.upsert("importresults", newVal, ctrlVal)
#if move files wasn't used - we need to update status at this point.
#if mylar.CONFIG.IMP_MOVE is False:
# #status update.
# for f in files:
# ctrlVal = {"ComicID": comicinfo['ComicID'],
# "impID": f['import_id']}
# newVal = {"Status": 'Imported',
# "SRID": SRID,
# "ComicFilename": f['comicfilename'],
# "ComicLocation": f['comiclocation'],
# "Volume": comicinfo['Volume'],
# "IssueNumber": comicinfo['IssueNumber'],
# "ComicName": comicinfo['ComicName'],
# "DynamicName": comicinfo['DynamicName']}
# myDB.upsert("importresults", newVal, ctrlVal)
logger.info('[IMPORT] Successfully verified import sequence data for : ' + comicinfo['ComicName'] + '. Currently adding to your watchlist.')
RemoveIDS.append(comicinfo['ComicID'])
@ -3983,16 +4007,16 @@ class WebInterface(object):
if volume is None or volume == 'None':
comic_and_vol = ComicName
else:
comic_and_vol = ComicName + ' (' + str(volume) + ')'
logger.info('[IMPORT][' + comic_and_vol + '] Now preparing to import. First I need to determine the highest issue, and possible year(s) of the series.')
comic_and_vol = '%s (%s)' % (ComicName, volume)
logger.info('[IMPORT][%s] Now preparing to import. First I need to determine the highest issue, and possible year(s) of the series.' % comic_and_vol)
if volume is None or volume == 'None':
logger.fdebug('[IMPORT] [none] dynamicname: ' + DynamicName)
logger.fdebug('[IMPORT] [none] dynamicname: %s' % DynamicName)
logger.fdebug('[IMPORT] [none] volume: None')
results = myDB.select("SELECT * FROM importresults WHERE DynamicName=? AND Volume IS NULL AND Status='Not Imported'", [DynamicName])
else:
logger.fdebug('[IMPORT] [!none] dynamicname: ' + DynamicName)
logger.fdebug('[IMPORT] [!none] volume: ' + volume)
logger.fdebug('[IMPORT] [!none] dynamicname: %s' % DynamicName)
logger.fdebug('[IMPORT] [!none] volume: %s' % volume)
results = myDB.select("SELECT * FROM importresults WHERE DynamicName=? AND Volume=? AND Status='Not Imported'", [DynamicName,volume])
if not results:
@ -4043,7 +4067,7 @@ class WebInterface(object):
if 'annual' in getiss.lower():
tmpiss = re.sub('[^0-9]','', getiss).strip()
if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4:
logger.fdebug('[IMPORT] annual detected with no issue [' + getiss + ']. Skipping this entry for determining series length.')
logger.fdebug('[IMPORT] annual detected with no issue [%s]. Skipping this entry for determining series length.' % getiss)
continue
else:
if (result['ComicYear'] not in yearRANGE) or all([yearRANGE is None, yearRANGE == 'None']):
@ -4069,8 +4093,8 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("importResults")
#figure out # of issues and the year range allowable
logger.fdebug('[IMPORT] yearTOP: ' + str(yearTOP))
logger.fdebug('[IMPORT] yearRANGE: ' + str(yearRANGE))
logger.fdebug('[IMPORT] yearTOP: %s' % yearTOP)
logger.fdebug('[IMPORT] yearRANGE: %s' % yearRANGE)
if starttheyear is None:
if all([yearTOP != None, yearTOP != 'None']):
if int(str(yearTOP)) > 0:
@ -4099,15 +4123,15 @@ class WebInterface(object):
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = len(comicstoIMP)
logger.fdebug('[IMPORT] number of issues: ' + str(numissues))
logger.fdebug('[IMPORT] number of issues: %s' % numissues)
ogcname = ComicName
mode='series'
displaycomic = helpers.filesafe(ComicName)
displaycomic = re.sub('[\-]','', displaycomic).strip()
displaycomic = re.sub('\s+', ' ', displaycomic).strip()
logger.fdebug('[IMPORT] displaycomic : ' + displaycomic)
logger.fdebug('[IMPORT] comicname : ' + ComicName)
logger.fdebug('[IMPORT] displaycomic : %s' % displaycomic)
logger.fdebug('[IMPORT] comicname : %s' % ComicName)
searchterm = '"' + displaycomic + '"'
try:
if yearRANGE is None:
@ -4124,7 +4148,7 @@ class WebInterface(object):
type='comic'
#we now need to cycle through the results until we get a hit on both dynamicname AND year (~count of issues possibly).
logger.fdebug('[IMPORT] [' + str(len(sresults)) + '] search results')
logger.fdebug('[IMPORT] [%s] search results' % len(sresults))
search_matches = []
for results in sresults:
rsn = filechecker.FileChecker()
@ -4138,11 +4162,11 @@ class WebInterface(object):
totalissues = int(results['issues']) / 12
totalyear_range = int(result_year) + totalissues #2000 + (101 / 12) 2000 +8.4 = 2008
logger.fdebug('[IMPORT] [' + str(totalyear_range) + '] Comparing: ' + re.sub('[\|\s]', '', DynamicName.lower()).strip() + ' - TO - ' + re.sub('[\|\s]', '', result_name.lower()).strip())
logger.fdebug('[IMPORT] [%s] Comparing: %s - TO - %s' % (totalyear_range, re.sub('[\|\s]', '', DynamicName.lower()).strip(), re.sub('[\|\s]', '', result_name.lower()).strip()))
if any([str(totalyear_range) in results['seriesrange'], result_year in results['seriesrange']]):
logger.fdebug('[IMPORT] LastIssueID: ' + str(results['lastissueid']))
logger.fdebug('[IMPORT] LastIssueID: %s' % results['lastissueid'])
if re.sub('[\|\s]', '', DynamicName.lower()).strip() == re.sub('[\|\s]', '', result_name.lower()).strip():
logger.fdebug('[IMPORT MATCH] ' + result_name + ' (' + str(result_comicid) + ')')
logger.fdebug('[IMPORT MATCH] %s (%s)' % (result_name, result_comicid))
search_matches.append({'comicid': results['comicid'],
'series': results['name'],
'dynamicseries': result_name,
@ -4160,13 +4184,13 @@ class WebInterface(object):
if len(search_matches) == 1:
sr = search_matches[0]
logger.info("[IMPORT] There is only one result...automagik-mode enabled for " + sr['series'] + " :: " + str(sr['comicid']))
logger.info('[IMPORT] There is only one result...automagik-mode enabled for %s :: %s' % (sr['series'], sr['comicid']))
resultset = 1
else:
if len(search_matches) == 0 or len(search_matches) is None:
logger.fdebug("[IMPORT] no results, removing the year from the agenda and re-querying.")
sresults = mb.findComic(searchterm, mode, issue=numissues) #ComicName, mode, issue=numissues)
logger.fdebug('[IMPORT] [' + str(len(sresults)) + '] search results')
logger.fdebug('[IMPORT] [%s] search results' % len(sresults))
for results in sresults:
rsn = filechecker.FileChecker()
rsn_run = rsn.dynamic_replace(results['name'])
@ -4179,10 +4203,10 @@ class WebInterface(object):
totalissues = int(results['issues']) / 12
totalyear_range = int(result_year) + totalissues #2000 + (101 / 12) 2000 +8.4 = 2008
logger.fdebug('[IMPORT][' + str(totalyear_range) + '] Comparing: ' + re.sub('[\|\s]', '', DynamicName.lower()).strip() + ' - TO - ' + re.sub('[\|\s]', '', result_name.lower()).strip())
logger.fdebug('[IMPORT][%s] Comparing: %s - TO - %s' % (totalyear_range, re.sub('[\|\s]', '', DynamicName.lower()).strip(), re.sub('[\|\s]', '', result_name.lower()).strip()))
if any([str(totalyear_range) in results['seriesrange'], result_year in results['seriesrange']]):
if re.sub('[\|\s]', '', DynamicName.lower()).strip() == re.sub('[\|\s]', '', result_name.lower()).strip():
logger.fdebug('[IMPORT MATCH] ' + result_name + ' (' + str(result_comicid) + ')')
logger.fdebug('[IMPORT MATCH] %s (%s)' % (result_name, result_comicid))
search_matches.append({'comicid': results['comicid'],
'series': results['name'],
'dynamicseries': result_name,
@ -4200,12 +4224,12 @@ class WebInterface(object):
if len(search_matches) == 1:
sr = search_matches[0]
logger.info("[IMPORT] There is only one result...automagik-mode enabled for " + sr['series'] + " :: " + str(sr['comicid']))
logger.info('[IMPORT] There is only one result...automagik-mode enabled for %s :: %s' % (sr['series'], sr['comicid']))
resultset = 1
else:
resultset = 0
else:
logger.info('[IMPORT] Returning results to Select option - there are ' + str(len(search_matches)) + ' possibilities, manual intervention required.')
logger.info('[IMPORT] Returning results to Select option - there are %s possibilities, manual intervention required.' % len(search_matches))
resultset = 0
#generate random Search Results ID to allow for easier access for viewing logs / search results.
@ -4229,7 +4253,6 @@ class WebInterface(object):
newVal = {"SRID": SRID,
"Status": 'Importing',
"ComicName": ComicName}
myDB.upsert("importresults", newVal, ctrlVal)
if resultset == 0:
@ -5007,10 +5030,6 @@ class WebInterface(object):
logger.fdebug('Now attempting to test NZBGet connection')
if nzbusername is None or nzbpassword is None:
logger.error('No Username / Password provided for NZBGet credentials. Unable to test API key')
return "Invalid Username/Password provided"
logger.info('Now testing connection to NZBGet @ %s:%s' % (nzbhost, nzbport))
if nzbhost[:5] == 'https':
protocol = 'https'
@ -5019,8 +5038,18 @@ class WebInterface(object):
protocol = 'http'
nzbgethost = nzbhost[7:]
nzb_url = '%s://%s:%s@%s:%s/xmlrpc' % (protocol, nzbusername, nzbpassword, nzbgethost, nzbport)
logger.info('nzb_url: %s' % nzb_url)
url = '%s://'
nzbparams = protocol,
if all([nzbusername is not None, nzbpassword is not None]):
url = url + '%s:%s@'
nzbparams = nzbparams + (nzbusername, nzbpassword)
elif nzbusername is not None:
url = url + '%s@'
nzbparams = nzbparams + (nzbusername,)
url = url + '%s:%s/xmlrpc'
nzbparams = nzbparams + (nzbgethost, nzbport,)
nzb_url = (url % nzbparams)
import xmlrpclib
nzbserver = xmlrpclib.ServerProxy(nzb_url)
@ -5029,7 +5058,6 @@ class WebInterface(object):
except Exception as e:
logger.warn('Error fetching data: %s' % e)
return 'Unable to retrieve data from NZBGet'
logger.info('Successfully verified connection to NZBGet at %s:%s' % (nzbgethost, nzbport))
return "Successfully verified connection to NZBGet"
NZBGet_test.exposed = True
@ -5412,7 +5440,6 @@ class WebInterface(object):
def orderThis(self, **kwargs):
logger.info('here')
return
orderThis.exposed = True