diff --git a/data/interfaces/default/base.html b/data/interfaces/default/base.html index 4e11671c..989f1b78 100755 --- a/data/interfaces/default/base.html +++ b/data/interfaces/default/base.html @@ -48,10 +48,16 @@ diff --git a/data/interfaces/default/importresults.html b/data/interfaces/default/importresults.html index 9bb8d9ef..b59102ab 100644 --- a/data/interfaces/default/importresults.html +++ b/data/interfaces/default/importresults.html @@ -13,6 +13,7 @@ <%def name="body()"> +

Borg Importing Results

@@ -54,6 +55,9 @@ + +

To be Imported

+
(green indicates confirmed on watchlist)
@@ -67,16 +71,20 @@ %if results: %for result in results: - + - - - - + + + <% myDB = db.DBConnection() files = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [result['ComicName']]) @@ -85,24 +93,76 @@ %endfor %else: - + %endif
${result['ComicName']} ${result['ComicYear']}${result['Status']}${result['ImportDate']}[Import] - [Remove] + ${result['Status']} + %if result['WatchMatch'] is not None: + + %endif
${result['ImportDate']}[Import] + [Remove] +
There are no results to display
There are no results to display
+ + +
+

Already on Watchlist

+
(you need to CONFIRM the match before doing an import!) +
+ + + + + + + + + + + + %if watchresults: + %for wresult in watchresults: + + + + + + + + + + <% + myDB = db.DBConnection() + files = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [wresult['ComicName']]) + + %> + %endfor + %else: + + + %endif + +
Comic NameYearStatusImport DateConfirmedOptions
${wresult['ComicName']}${wresult['ComicYear']}${wresult['Status']}${wresult['ImportDate']} + + %if wresult['WatchMatch']: + Confirm + %else: + No + %endif + [Import] + [Remove] +
There are no results to display
+ <%def name="javascriptIncludes()"> - + + + diff --git a/mylar/__init__.py b/mylar/__init__.py index 6f86ab98..fd3794e1 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -63,6 +63,8 @@ LOG_LIST = [] CACHE_DIR = None +PULLNEW = None + HTTP_PORT = None HTTP_HOST = None HTTP_USERNAME = None @@ -123,6 +125,7 @@ ADD_TO_CSV = True SKIPPED2WANTED = False CVINFO = False LOG_LEVEL = None +POST_PROCESSING = True SAB_HOST = None SAB_USERNAME = None @@ -224,8 +227,8 @@ def initialize(): NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\ NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\ RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \ - PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, \ - COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS + PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, \ + COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW if __INITIALIZED__: return False @@ -305,6 +308,7 @@ def initialize(): ENABLE_PRE_SCRIPTS = bool(check_setting_int(CFG, 'General', 'enable_pre_scripts', 0)) PRE_SCRIPTS = check_setting_str(CFG, 'General', 'pre_scripts', '') + POST_PROCESSING = bool(check_setting_int(CFG, 'General', 'post_processing', 1)) SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '') SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '') @@ -559,6 +563,7 @@ def config_write(): new_config['General']['extra_scripts'] = EXTRA_SCRIPTS new_config['General']['enable_pre_scripts'] = int(ENABLE_PRE_SCRIPTS) new_config['General']['pre_scripts'] = PRE_SCRIPTS + new_config['General']['post_processing'] = POST_PROCESSING new_config['SABnzbd'] = {} new_config['SABnzbd']['sab_host'] = SAB_HOST @@ -622,6 +627,7 @@ def start(): #weekly pull list gets messed up if it's not populated first, so let's populate it then set the scheduler. logger.info("Checking for existance of Weekly Comic listing...") + PULLNEW = 'no' #reset the indicator here. threading.Thread(target=weeklypull.pullit).start() #now the scheduler (check every 24 hours) SCHED.add_interval_job(weeklypull.pullit, hours=24) @@ -651,7 +657,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)') # c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT)') conn.commit c.close #new @@ -696,6 +702,11 @@ def dbcheck(): c.execute('SELECT UseFuzzy from comics') except sqlite3.OperationalError: c.execute('ALTER TABLE comics ADD COLUMN UseFuzzy TEXT') + + try: + c.execute('SELECT WatchMatch from importresults') + except sqlite3.OperationalError: + c.execute('ALTER TABLE importresults ADD COLUMN WatchMatch TEXT') # -- not implemented just yet ;) # for metadata... diff --git a/mylar/helpers.py b/mylar/helpers.py index b0ca8ac1..f9e86144 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -18,6 +18,7 @@ from operator import itemgetter import datetime import re import itertools +import os import mylar def multikeysort(items, columns): @@ -184,8 +185,29 @@ def decimal_issue(iss): deciss = (int(iss_b4dec) * 1000) + issdec return deciss -def rename_param(comicid, comicname, comicyear, issue, issueid=None): +def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None): + from mylar import db, logger myDB = db.DBConnection() + print ("comicid: " + str(comicid)) + print ("issue#: " + str(issue)) + # the issue here is a non-decimalized version, we need to see if it's got a decimal and if not, add '.00' + iss_find = issue.find('.') + if iss_find < 0: + # no decimal in issue number + iss = str(int(issue)) + ".00" + else: + iss_b4dec = issue[:iss_find] + iss_decval = issue[iss_find+1:] + if len(iss_decval) == 1: + iss = str(int(iss_b4dec)) + "." + iss_decval + else: + if issue.endswith(".00"): + iss = issue + else: + iss = str(int(iss_b4dec)) + "." + iss_decval.rstrip('0') + issue = iss + + print ("converted issue#: " + str(issue)) if issueid is None: chkissue = myDB.action("SELECT * from issues WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone() if chkissue is None: @@ -283,8 +305,10 @@ def rename_param(comicid, comicname, comicyear, issue, issueid=None): extensions = ('.cbr', '.cbz') + if ofilename.lower().endswith(extensions): + path, ext = os.path.splitext(ofilename) + if mylar.FILE_FORMAT == '': - self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG) logger.fdebug("Rename Files isn't enabled - keeping original filename.") #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(extensions): @@ -292,18 +316,18 @@ def rename_param(comicid, comicname, comicyear, issue, issueid=None): else: nfilename = ofilename else: - nfilename = helpers.replace_all(mylar.FILE_FORMAT, file_values) + nfilename = replace_all(mylar.FILE_FORMAT, file_values) if mylar.REPLACE_SPACES: #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) - nfilename = re.sub('[\,\:]', '', nfilename) + nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower() logger.fdebug("New Filename: " + str(nfilename)) if mylar.LOWERCASE_FILENAMES: - dst = (comlocation + "/" + nfilename + ext).lower() + dst = (comlocation + "/" + nfilename).lower() else: - dst = comlocation + "/" + nfilename + ext.lower() - logger.fdebug("Source: " + str(src)) + dst = comlocation + "/" + nfilename + logger.fdebug("Source: " + str(ofilename)) logger.fdebug("Destination: " + str(dst)) rename_this = { "destination_dir" : dst, diff --git a/mylar/importer.py b/mylar/importer.py index a08448e3..a0464090 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -379,7 +379,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): print ("imported is :" + str(imported)) if mylar.IMP_MOVE: logger.info("Mass import - Move files") - moveit.movefiles(comlocation,ogcname) + moveit.movefiles(comicid,comlocation,ogcname) #check for existing files... updater.forceRescan(comicid) diff --git a/mylar/librarysync.py b/mylar/librarysync.py old mode 100644 new mode 100755 index 9774bb69..189975ea --- a/mylar/librarysync.py +++ b/mylar/librarysync.py @@ -46,7 +46,6 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None) basedir = dir - watchmatch = {} comic_list = [] comiccnt = 0 extensions = ('cbr','cbz') @@ -365,18 +364,17 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None) #issue comparison now as well logger.info(u"Found " + str(comname) + " (" + str(comyear) + ") issue: " + str(comic_iss)) - watchfound+=1 -# updater.forceRescan(ComicID=comicid) -# if not any(d.get('ComicID', None) == str(comicid) for d in watch_kchoice): - watch_kchoice.append({ - "ComicID": str(comicid), - "ComicName": str(comname), - "ComicYear": str(comyear), - "ComicIssue": str(int(comic_iss)), - "ComicLocation": str(watch_location), - "OriginalLocation" : str(comlocation), - "OriginalFilename" : str(comfilename) - }) +# watchfound+=1 + watchmatch = str(comicid) +# watch_kchoice.append({ +# "ComicID": str(comicid), +# "ComicName": str(comname), +# "ComicYear": str(comyear), +# "ComicIssue": str(int(comic_iss)), +# "ComicLocation": str(watch_location), +# "OriginalLocation" : str(comlocation), +# "OriginalFilename" : str(comfilename) +# }) foundonwatch = "True" break elif int(spercent) < 80: @@ -384,33 +382,35 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None) cm_cn+=1 if foundonwatch == "False": + watchmatch = None #---if it's not a match - send it to the importer. - n = 0 - csplit = comic_andiss.split(None) - while ( n <= (len(csplit)-1) ): - if csplit[n].isdigit(): - logger.fdebug("issue detected") - comiss = splitit[n] - logger.fdebug("issue # : " + str(comiss)) - comicNAMER = n - 1 - com_NAME = csplit[0] - cmnam = 1 - while (cmnam <= comicNAMER): - com_NAME = str(com_NAME) + " " + str(csplit[cmnam]) - cmnam+=1 - logger.fdebug("comic: " + str(com_NAME)) - n+=1 - if result_comyear is None: result_comyear = '0000' #no year in filename basically. - print ("adding " + str(com_NAME) + " to the import-queue!") - impid = str(com_NAME) + "-" + str(result_comyear) + "-" + str(comiss) - print ("impid: " + str(impid)) - import_by_comicids.append({ - "impid": impid, - "comicname" : com_NAME, - "comicyear" : result_comyear, - "comfilename" : comfilename, - "comlocation" : comlocation.decode(mylar.SYS_ENCODING) - }) + n = 0 + csplit = comic_andiss.split(None) + while ( n <= (len(csplit)-1) ): + if csplit[n].isdigit(): + logger.fdebug("issue detected") + comiss = splitit[n] + logger.fdebug("issue # : " + str(comiss)) + comicNAMER = n - 1 + com_NAME = csplit[0] + cmnam = 1 + while (cmnam <= comicNAMER): + com_NAME = str(com_NAME) + " " + str(csplit[cmnam]) + cmnam+=1 + logger.fdebug("comic: " + str(com_NAME)) + n+=1 + if result_comyear is None: result_comyear = '0000' #no year in filename basically. + print ("adding " + str(com_NAME) + " to the import-queue!") + impid = str(com_NAME) + "-" + str(result_comyear) + "-" + str(comiss) + print ("impid: " + str(impid)) + import_by_comicids.append({ + "impid": impid, + "watchmatch": watchmatch, + "comicname" : com_NAME, + "comicyear" : result_comyear, + "comfilename" : comfilename, + "comlocation" : comlocation.decode(mylar.SYS_ENCODING) + }) if len(watch_kchoice) > 0: watchchoice['watchlist'] = watch_kchoice diff --git a/mylar/moveit.py b/mylar/moveit.py old mode 100644 new mode 100755 index d72c03d6..d50fae83 --- a/mylar/moveit.py +++ b/mylar/moveit.py @@ -1,10 +1,10 @@ import mylar -from mylar import db, logger +from mylar import db, logger, helpers import os import shutil -def movefiles(comlocation,ogcname,imported=None): +def movefiles(comicid,comlocation,ogcname,imported=None): myDB = db.DBConnection() print ("comlocation is : " + str(comlocation)) print ("original comicname is : " + str(ogcname)) @@ -14,7 +14,20 @@ def movefiles(comlocation,ogcname,imported=None): #print ("preparing to move " + str(len(impres)) + " files into the right directory now.") for impr in impres: srcimp = impr['ComicLocation'] - dstimp = os.path.join(comlocation, impr['ComicFilename']) + orig_filename = impr['ComicFilename'] + orig_iss = impr['impID'].rfind('-') + orig_iss = impr['impID'][orig_iss+1:] + print ("Issue :" + str(orig_iss)) + #before moving check to see if Rename to Mylar structure is enabled. + if mylar.IMP_RENAME: + print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT)) + renameit = helpers.rename_param(comicid, impr['ComicName'], orig_iss, orig_filename) + nfilename = renameit['nfilename'] + dstimp = os.path.join(comlocation,nfilename) + else: + print("Renaming files not enabled, keeping original filename(s)") + dstimp = os.path.join(comlocation,orig_filename) + logger.info("moving " + str(srcimp) + " ... to " + str(dstimp)) try: shutil.move(srcimp, dstimp) diff --git a/mylar/search.py b/mylar/search.py index bfcbb069..9ba13735 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -704,7 +704,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is if mylar.SAB_CATEGORY: tmpapi = tmpapi + "&cat=" + str(mylar.SAB_CATEGORY) logger.fdebug("...attaching category: " + str(tmpapi)) - if mylar.RENAME_FILES == 1: + if mylar.RENAME_FILES or mylar.POST_PROCESSING: tmpapi = tmpapi + "&script=ComicRN.py" logger.fdebug("...attaching rename script: " + str(tmpapi)) #final build of send-to-SAB diff --git a/mylar/updater.py b/mylar/updater.py index c4170ebb..97689a3f 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -305,7 +305,7 @@ def forceRescan(ComicID): while (som < fcn): #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr' - logger.fdebug("checking word - " + str(fcnew[som])) + #logger.fdebug("checking word - " + str(fcnew[som])) if ".cbr" in fcnew[som].lower(): fcnew[som] = fcnew[som].replace(".cbr", "") elif ".cbz" in fcnew[som].lower(): @@ -316,7 +316,7 @@ def forceRescan(ComicID): if fcnew[som] != " ": fcnew[som] = get_issue[0] if '.' in fcnew[som]: - logger.fdebug("decimal detected...adjusting.") + #logger.fdebug("decimal detected...adjusting.") try: i = float(fcnew[som]) except ValueError, TypeError: diff --git a/mylar/webserve.py b/mylar/webserve.py index 25ec6bf8..feb6f991 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -108,6 +108,26 @@ class WebInterface(object): def addComic(self, comicid, comicname=None, comicyear=None, comicimage=None, comicissues=None, comicpublisher=None, imported=None, ogcname=None): myDB = db.DBConnection() + print ("I'm here.") + if imported == "confirm": + # if it's coming from the importer and it's just for confirmation, record the right selection and break. + # if it's 'confirmed' coming in as the value for imported + # the ogcname will be the original comicid that is either correct/incorrect (doesn't matter which) + #confirmedid is the selected series (comicid) with the letter C at the beginning to denote Confirmed. + # then sql the original comicid which will hit on all the results for the given series. + # iterate through, and overwrite the existing watchmatch with the new chosen 'C' + comicid value + + confirmedid = "C" + str(comicid) + confirms = myDB.action("SELECT * FROM importresults WHERE WatchMatch=?", [ogcname]) + if confirms is None: + print ("There are no results that match...this is an ERROR.") + else: + for confirm in confirms: + controlValue = {"impID": confirm['impID']} + newValue = {"WatchMatch": str(confirmedid)} + myDB.upsert("importresults", newValue, controlValue) + self.importResults() + return sresults = [] cresults = [] mismatch = "no" @@ -499,9 +519,40 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % [comicid]) skipped2wanted.exposed = True - def ManualRename(self): + def manualRename(self, comicid): + print ("entering.") + if mylar.FILE_FORMAT == '': + print ("You haven't specified a File Format in Configuration/Advanced") + print ("Cannot rename files.") + return + + myDB = db.DBConnection() + comic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone() + comicdir = comic['ComicLocation'] + comicname = comic['ComicName'] + extensions = ('.cbr', '.cbz') + issues = myDB.action("SELECT * FROM issues WHERE ComicID=?", [comicid]) + comfiles = [] + for root, dirnames, filenames in os.walk(comicdir): + for filename in filenames: + if filename.lower().endswith(extensions): + print ("filename being checked is : " + str(filename)) + for issue in issues: + if issue['Location'] == filename: + print ("matched " + str(filename) + " to DB file " + str(issue['Location'])) + renameiss = helpers.rename_param(comicid, comicname, issue['Issue_Number'], filename, comicyear=None, issueid=None) + nfilename = renameiss['nfilename'] + srciss = os.path.join(comicdir,filename) + dstiss = os.path.join(comicdir,nfilename) + logger.info("Renaming " + str(filename) + " ... to " + str(nfilename)) + try: + shutil.move(srciss, dstiss) + except (OSError, IOError): + logger.error("Failed to move files - check directories and manually re-run.") + continue + print ("hello") - ManualRename.exposed = True + manualRename.exposed = True def searchScan(self, name): return serve_template(templatename="searchfix.html", title="Manage", name=name) @@ -638,6 +689,15 @@ class WebInterface(object): return serve_template(templatename="idirectory.html", title="Import a Directory") idirectory.exposed = True + def confirmResult(self,comicname,comicid): + #print ("here.") + mode='series' + sresults = mb.findComic(comicname, mode, None) + #print sresults + type='comic' + return serve_template(templatename="searchresults.html", title='Import Results for: "' + comicname + '"',searchresults=sresults, type=type, imported='confirm', ogcname=comicid) + confirmResult.exposed = True + def comicScan(self, path, scan=0, redirect=None, autoadd=0, libraryscan=0, imp_move=0, imp_rename=0, imp_metadata=0): mylar.LIBRARYSCAN = libraryscan mylar.ADD_COMICS = autoadd @@ -679,7 +739,8 @@ class WebInterface(object): "ComicName": soma_sl['comicname'], "ComicFilename": soma_sl['comfilename'], "ComicLocation": soma_sl['comlocation'].encode('utf-8'), - "ImportDate": helpers.today()} + "ImportDate": helpers.today(), + "WatchMatch": soma_sl['watchmatch']} myDB.upsert("importresults", newValue, controlValue) sl+=1 # because we could be adding volumes/series that span years, we need to account for this @@ -702,8 +763,9 @@ class WebInterface(object): def importResults(self): myDB = db.DBConnection() - results = myDB.select("SELECT * FROM importresults group by ComicName COLLATE NOCASE") - return serve_template(templatename="importresults.html", title="Import Results", results=results) + results = myDB.select("SELECT * FROM importresults WHERE WatchMatch is Null OR WatchMatch LIKE 'C%' group by ComicName COLLATE NOCASE") + watchresults = myDB.select("SELECT * FROM importresults WHERE WatchMatch is not Null AND WatchMatch NOT LIKE 'C%' group by ComicName COLLATE NOCASE") + return serve_template(templatename="importresults.html", title="Import Results", results=results, watchresults=watchresults) importResults.exposed = True def deleteimport(self, ComicName): @@ -713,7 +775,7 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("importResults") deleteimport.exposed = True - def preSearchit(self, ComicName, imp_rename, imp_move): + def preSearchit(self, ComicName): #print ("imp_rename:" + str(imp_rename)) #print ("imp_move:" + str(imp_move)) myDB = db.DBConnection() @@ -729,6 +791,23 @@ class WebInterface(object): for result in results: if result is None: break + elif result['WatchMatch'].startswith('C'): + print ("Confirmed. ComicID already provided - initiating auto-magik mode for import.") + comicid = result['WatchMatch'][1:] + print (result['WatchMatch'] + " .to. " + str(comicid)) + #since it's already in the watchlist, we just need to move the files and re-run the filechecker. + #self.refreshArtist(comicid=comicid,imported='yes') + if mylar.IMP_MOVE: + logger.info("Mass import - Move files") + comloc = myDB.action("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone() + mylar.moveit.movefiles(comicid,comloc['ComicLocation'],ComicName) + #check for existing files... + updater.forceRescan(comicid) + else: + print ("nothing to do if I'm not moving.") + + raise cherrypy.HTTPRedirect("importResults") + else: comicstoIMP.append(result['ComicLocation'].decode(mylar.SYS_ENCODING, 'replace')) getiss = result['impID'].rfind('-') @@ -796,9 +875,9 @@ class WebInterface(object): resultset = 0 if resultset == 1: - self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported=comicstoIMP,ogcname=ogcname) + self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported='yes',ogcname=ogcname) #imported=comicstoIMP,ogcname=ogcname) else: - return serve_template(templatename="searchresults.html", title='Search Results for: "' + ComicName + '"',searchresults=sresults, type=type, imported=comicstoIMP, ogcname=ogcname) + return serve_template(templatename="searchresults.html", title='Import Results for: "' + ComicName + '"',searchresults=sresults, type=type, imported='yes', ogcname=ogcname) #imported=comicstoIMP, ogcname=ogcname) preSearchit.exposed = True #--- @@ -868,6 +947,7 @@ class WebInterface(object): "lowercase_filenames" : helpers.checked(mylar.LOWERCASE_FILENAMES), "enable_extra_scripts" : helpers.checked(mylar.ENABLE_EXTRA_SCRIPTS), "extra_scripts" : mylar.EXTRA_SCRIPTS, + "post_processing" : helpers.checked(mylar.POST_PROCESSING), "branch" : version.MYLAR_VERSION, "br_type" : mylar.INSTALL_TYPE, "br_version" : mylar.versioncheck.getVersion(), @@ -971,7 +1051,7 @@ class WebInterface(object): sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, sab_directory=None, log_dir=None, log_level=0, blackhole=0, blackhole_dir=None, usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0, raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0, - preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, + preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs): mylar.HTTP_HOST = http_host mylar.HTTP_PORT = http_port @@ -1032,6 +1112,7 @@ class WebInterface(object): mylar.ENABLE_EXTRA_SCRIPTS = enable_extra_scripts mylar.EXTRA_SCRIPTS = extra_scripts mylar.ENABLE_PRE_SCRIPTS = enable_pre_scripts + mylar.POST_PROCESSING = post_processing mylar.PRE_SCRIPTS = pre_scripts mylar.LOG_DIR = log_dir mylar.LOG_LEVEL = log_level