FIX:(#162) Sab Download directory option available for Post-Processing, FIX:(#195) Series that had digits in the title would error out occasionally on File Checking, IMP: Minimum/Maximum size restrictions available for Experimental Search ONLY, IMP: some further work on the directory import screens

This commit is contained in:
evilhero 2013-02-08 22:34:02 -05:00
parent 1a9131446f
commit af7bdb3966
15 changed files with 210 additions and 77 deletions

View File

@ -245,7 +245,7 @@
grade = 'A'
%>
<tr class="grade${grade}">
<td id="select"><input type="checkbox" name="${issue['IssueID']}" class="checkbox" /></td>
<td id="select"><input type="checkbox" name="${issue['IssueID']}" class="checkbox" value="${issue['IssueID']}"/></td>
<td id="issuenumber">${issue['Issue_Number']}</td>
<td id="issuename">${issue['IssueName']}</td>
<td id="reldate">${issue['IssueDate']}</td>

View File

@ -156,6 +156,12 @@
<label>SABnzbd Password:</label>
<input type="password" name="sab_password" value="${config['sab_pass']}" size="20">
</div>
<div class="row">
<label>SABnzbd Download Directory</label>
<input type="text" name="sab_directory" value="${config['sab_directory']}" size="36" />
<small>Where your SAB downloads go... (optional)</small>
</div>
<div class="row">
<label>SABnzbd Category:</label>
<input type="text" name="sab_category" value="${config['sab_cat']}" size="20">
@ -328,7 +334,7 @@
</div>
</fieldset>
<fieldset>
<legend>Restrictions (not working yet)</legend>
<legend>Restrictions (only with Experimental ATM)</legend>
<div class="row checkbox left">
<input id="use_minsize" type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" onclick="initConfigCheckbox($(this));" name="use_minsize" value="1" ${config['use_minsize']} /><label>Minimum File size<small>(MB)</small></label>
</div>
@ -390,7 +396,7 @@
</div>
<div class="row">
<label>Folder Format</label>
<input type="text" name="folder_format" value="${config['folder_format']}" size="43">
<input type="text" title="$Publisher, $Series, $Year" name="folder_format" value="${config['folder_format']}" size="43">
<small>Use: $Publisher, $Series, $Year<br />
E.g.: $Publisher/$Series ($Year) = DC Comics/Action Comics (2011)</small>
</div>
@ -498,6 +504,9 @@
</%def>
<%def name="javascriptIncludes()">
<script>
$( document ).tooltip();
</script>
<script>
hideServerDivs = function () {

View File

@ -74,7 +74,7 @@
<td id="status">${result['Status']}</td>
<td id="importdate">${result['ImportDate']}</td>
<td id="addcomic">[<a href="preSearchit?ComicName=${result['ComicName']}&imp_rename=$imp_rename&imp_move=$imp_move">Import</a>]
[<a href="queueissue?ComicID=${result['ComicName']}">Write Metadata</a>]
[<a href="deleteimport?ComicName=${result['ComicName']}">Remove</a>]
</td>
</tr>
<%

View File

@ -32,7 +32,7 @@
<td class="comicyear">${result['comicyear']}</a></td>
<td class="issues">${result['issues']}</td>
<td class="add" nowrap="nowrap"><a href="addComic?comicid=${result['comicid']}&comicname=${result['name'] |u}&comicyear=${result['comicyear']}&comicpublisher=${result['publisher']}&comicimage=${result['comicimage']}&comicissues=${result['issues']}&imported=${imported}"><span class="ui-icon ui-icon-plus"></span> Add this Comic</a></td>
<td class="add" nowrap="nowrap"><a href="addComic?comicid=${result['comicid']}&comicname=${result['name'] |u}&comicyear=${result['comicyear']}&comicpublisher=${result['publisher']}&comicimage=${result['comicimage']}&comicissues=${result['issues']}&imported=${imported}&ogcname=${ogcname}"><span class="ui-icon ui-icon-plus"></span> Add this Comic</a></td>
</tr>
%endfor
%endif

View File

@ -139,6 +139,10 @@ class PostProcessor(object):
self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
logger.fdebug("nzb name: " + str(self.nzb_name))
logger.fdebug("nzb folder: " + str(self.nzb_folder))
# if the SAB Directory option is enabled, let's use that folder name and append the jobname.
if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
#lookup nzb_name in nzblog table to get issueid
#query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals

View File

@ -113,9 +113,9 @@ ZERO_LEVEL = False
ZERO_LEVEL_N = None
LOWERCASE_FILENAME = False
USE_MINSIZE = False
MINSIZE = None
MINSIZE = 10
USE_MAXSIZE = False
MAXSIZE = None
MAXSIZE = 60
AUTOWANT_UPCOMING = True
AUTOWANT_ALL = False
COMIC_COVER_LOCAL = False
@ -130,6 +130,7 @@ SAB_PASSWORD = None
SAB_APIKEY = None
SAB_CATEGORY = None
SAB_PRIORITY = None
SAB_DIRECTORY = None
NZBSU = False
NZBSU_APIKEY = None
@ -219,7 +220,7 @@ def initialize():
HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \
LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_DIRECTORY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \
@ -310,6 +311,7 @@ def initialize():
SAB_PASSWORD = check_setting_str(CFG, 'SABnzbd', 'sab_password', '')
SAB_APIKEY = check_setting_str(CFG, 'SABnzbd', 'sab_apikey', '')
SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', '')
SAB_DIRECTORY = check_setting_str(CFG, 'SABnzbd', 'sab_directory', '')
SAB_PRIORITY = check_setting_str(CFG, 'SABnzbd', 'sab_priority', '')
if SAB_PRIORITY.isdigit():
if SAB_PRIORITY == "0": SAB_PRIORITY = "Default"
@ -565,6 +567,7 @@ def config_write():
new_config['SABnzbd']['sab_apikey'] = SAB_APIKEY
new_config['SABnzbd']['sab_category'] = SAB_CATEGORY
new_config['SABnzbd']['sab_priority'] = SAB_PRIORITY
new_config['SABnzbd']['sab_directory'] = SAB_DIRECTORY
new_config['NZBsu'] = {}
new_config['NZBsu']['nzbsu'] = int(NZBSU)

View File

@ -63,18 +63,22 @@ def listFiles(dir,watchcomic,AlternateSearch=None):
comicsize = os.path.getsize(comicpath)
#print ("Comicsize:" + str(comicsize))
comiccnt+=1
if modwatchcomic.lower() in subname.lower():
jtd_len = len(modwatchcomic)
justthedigits = item[jtd_len:]
elif altsearchcomic.lower() in subname.lower():
jtd_len = len(altsearchcomic)
justthedigits = item[jtd_len:]
comiclist.append({
'ComicFilename': item,
'ComicLocation': comicpath,
'ComicSize': comicsize
'ComicSize': comicsize,
'JusttheDigits': justthedigits
})
watchmatch['comiclist'] = comiclist
else:
pass
#print ("directory found - ignoring")
logger.fdebug("you have a total of " + str(comiccnt) + " " + str(watchcomic) + " comics")
watchmatch['comiccount'] = comiccnt
return watchmatch

View File

@ -6,6 +6,7 @@ import lib.feedparser as feedparser
#import feedparser
import re
import logger
import mylar
def Startit(searchName, searchIssue, searchYear):
#searchName = "Uncanny Avengers"
@ -21,8 +22,15 @@ def Startit(searchName, searchIssue, searchYear):
searchIsOne = "0"+searchIssue
searchIsTwo = "00"+searchIssue
if mylar.USE_MINSIZE:
size_constraints = "minsize=" + str(mylar.MINSIZE)
else:
size_constraints = "minsize=10"
feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&minsize=10&dq=%s&max=25&more=1" %joinSearch)
if mylar.USE_MAXSIZE:
size_constraints = size_constraints + "&maxsize=" + str(mylar.MAXSIZE)
feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + "&dq=%s&max=25&more=1" %joinSearch)
totNum = len(feed.entries)

View File

@ -23,7 +23,7 @@ import urllib
import shutil
import mylar
from mylar import logger, helpers, db, mb, albumart, cv, parseit, filechecker, search, updater
from mylar import logger, helpers, db, mb, albumart, cv, parseit, filechecker, search, updater, moveit
def is_exists(comicid):
@ -40,7 +40,7 @@ def is_exists(comicid):
return False
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None):
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
# from mylar import cache
@ -370,27 +370,16 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None):
text_file.write("http://www.comicvine.com/" + str(comic['ComicName']).replace(" ", "-") + "/49-" + str(comicid))
logger.info(u"Updating complete for: " + comic['ComicName'])
print ("imported is : " + str(imported))
impres = myDB.action("SELECT * from importresults WHERE ComicName LIKE ?", [comic['ComicName']])
if impres is not None:
#print ("preparing to move " + str(len(impres)) + " files into the right directory now.")
for impr in impres:
srcimp = impr['ComicLocation']
dstimp = os.path.join(comlocation, impr['ComicFilename'])
print ("moving " + str(srcimp) + " ... to " + str(dstimp))
try:
shutil.move(srcimp, dstimp)
except (OSError, IOError):
print("Failed to move files - check directories and manually re-run.")
print("files moved.")
#now that it's moved / renamed ... we remove it from importResults or mark as completed.
results = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [comic['ComicName']])
if results is None: pass
else:
for result in results:
controlValue = {"impID": result['impid']}
newValue = {"Status": "Imported" }
myDB.upsert("importresults", newValue, controlValue)
#move the files...if imported is not empty (meaning it's not from the mass importer.)
if imported is None or imported == 'None':
pass
else:
print ("imported length is : " + str(len(imported)))
print ("imported is :" + str(imported))
if mylar.IMP_MOVE:
logger.info("Mass import - Move files")
moveit.movefiles(comlocation,ogcname)
#check for existing files...
updater.forceRescan(comicid)

View File

@ -99,8 +99,13 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
# let's clean up the name, just in case for comparison purposes...
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(watch['ComicName']))
#watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()
if ' the ' in watchcomic.lower():
#drop the 'the' from the watchcomic title for proper comparisons.
watchcomic = watchcomic[-4:]
alt_chk = "no" # alt-checker flag (default to no)
# account for alternate names as well
if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None':
altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', str(watch['AlternateSearch']))
@ -228,6 +233,9 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
comic_iss_b4 = re.sub('[\-\:\,]', ' ', str(comic_andiss))
comic_iss = comic_iss_b4.replace('.',' ')
logger.fdebug("adjusted comic and issue: " + str(comic_iss))
#remove 'the' from here for proper comparisons.
if ' the ' in comic_iss.lower():
comic_iss = comic_iss[-4:]
splitit = comic_iss.split(None)
logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
#bmm = re.findall('v\d', comic_iss)
@ -277,9 +285,9 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split)))
if (splitst) != len(watchcomic_split):
logger.fdebug("incorrect comic lengths...not a match")
if str(splitit[0]).lower() == "the":
logger.fdebug("THE word detected...attempting to adjust pattern matching")
splitit[0] = splitit[4:]
# if str(splitit[0]).lower() == "the":
# logger.fdebug("THE word detected...attempting to adjust pattern matching")
# splitit[0] = splitit[4:]
else:
logger.fdebug("length match..proceeding")
n = 0

32
mylar/moveit.py Normal file
View File

@ -0,0 +1,32 @@
import mylar
from mylar import db, logger
import os
import shutil
def movefiles(comlocation,ogcname,imported=None):
myDB = db.DBConnection()
print ("comlocation is : " + str(comlocation))
print ("original comicname is : " + str(ogcname))
impres = myDB.action("SELECT * from importresults WHERE ComicName=?", [ogcname])
if impres is not None:
#print ("preparing to move " + str(len(impres)) + " files into the right directory now.")
for impr in impres:
srcimp = impr['ComicLocation']
dstimp = os.path.join(comlocation, impr['ComicFilename'])
logger.info("moving " + str(srcimp) + " ... to " + str(dstimp))
try:
shutil.move(srcimp, dstimp)
except (OSError, IOError):
logger.error("Failed to move files - check directories and manually re-run.")
#print("files moved.")
#now that it's moved / renamed ... we remove it from importResults or mark as completed.
results = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [ogcname])
if results is None: pass
else:
for result in results:
controlValue = {"impID": result['impid']}
newValue = {"Status": "Imported" }
myDB.upsert("importresults", newValue, controlValue)
return

View File

@ -208,6 +208,13 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
resultPublished = subtxt3.findNext(text=True).rstrip()
#print ("pubdate:" + str(resultPublished))
subtxt9 = parsed.find("dd", {"id" : "series_format"})
resultFormat = subtxt9.findNext(text=True).rstrip()
# the caveat - if a series is ongoing but only has 1 issue published at a particular point in time,
# resultPublished will return just the date and not the word 'Present' which dictates on the main
# page if a series is Continuing / Ended .
if 'ongoing series' in resultFormat.lower() and 'was' not in resultFormat.lower():
resultPublished = resultPublished + " - Present"
coverst = soup.find("div", {"id" : "series_cover"})
if coverst < 0:
gcdcover = "None"

View File

@ -366,7 +366,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
nzbname = cleantitle
logger.fdebug("Cleantitle: " + str(cleantitle))
if len(re.findall('[^()]+', cleantitle)) == 1: cleantitle = "abcdefghijk 0 (1901).cbz"
if len(re.findall('[^()]+', cleantitle)) == 1: cleantitle = "abcdefghijk 0 (1901).cbz"
#----size constraints.
#if it's not within size constaints - dump it now and save some time.
# logger.fdebug("size : " + str(entry['size']))
# if mylar.USE_MINSIZE:
# conv_minsize = int(mylar.MINSIZE) * 1024 * 1024
# print("comparing " + str(conv_minsize) + " .. to .. " + str(entry['size']))
# if conv_minsize >= int(entry['size']):
# print("Failure to meet the Minimum size threshold - skipping")
# continue
# if mylar.USE_MAXSIZE:
# conv_maxsize = int(mylar.maxsize) * 1024 * 1024
# print("comparing " + str(conv_maxsize) + " .. to .. " + str(entry['size']))
# if conv_maxsize >= int(entry['size']):
# print("Failure to meet the Maximium size threshold - skipping")
# continue
# -- end size constaints.
if done:
break
#let's narrow search down - take out year (2010), (2011), etc

View File

@ -236,20 +236,21 @@ def forceRescan(ComicID):
tmpfc = fc['comiclist'][fn]
except IndexError:
break
temploc = tmpfc['ComicFilename'].replace('_', ' ')
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
# temploc = tmpfc['ComicFilename'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
#logger.fdebug("temploc: " + str(temploc))
if 'annual' not in temploc:
#remove the extension here
extensions = ('.cbr','.cbz')
if temploc.lower().endswith(extensions):
print ("removed extension for issue:" + str(temploc))
#print ("removed extension for issue:" + str(temploc))
temploc = temploc[:-4]
deccnt = str(temploc).count('.')
if deccnt > 1:
print ("decimal counts are :" + str(deccnt))
#print ("decimal counts are :" + str(deccnt))
#if the file is formatted with '.' in place of spaces we need to adjust.
#before replacing - check to see if digits on either side of decimal and if yes, DON'T REMOVE
occur=1
prevstart = 0
@ -262,28 +263,28 @@ def forceRescan(ComicID):
while start >=0 and n > 1:
start = temploc.find('.', start+len('.'))
n-=1
print "occurance " + str(occur) + " of . at position: " + str(start)
#print "occurance " + str(occur) + " of . at position: " + str(start)
if temploc[prevstart:start].isdigit():
if digitfound == "yes":
print ("this is a decimal, assuming decimal issue.")
#print ("this is a decimal, assuming decimal issue.")
decimalfound = "yes"
reconst = "." + temploc[prevstart:start] + " "
else:
print ("digit detected.")
#print ("digit detected.")
digitfound = "yes"
reconst = temploc[prevstart:start]
else:
reconst = temploc[prevstart:start] + " "
print "word: " + reconst
#print "word: " + reconst
tempreconstruct = tempreconstruct + reconst
print ("tempreconstruct is : " + tempreconstruct)
#print ("tempreconstruct is : " + tempreconstruct)
prevstart = (start+1)
occur+=1
print "word: " + temploc[prevstart:]
#print "word: " + temploc[prevstart:]
tempreconstruct = tempreconstruct + temploc[prevstart:]
print ("final filename to use is : " + str(tempreconstruct))
#print ("final filename to use is : " + str(tempreconstruct))
temploc = tempreconstruct
print("checking " + str(temploc))
#print("checking " + str(temploc))
fcnew = shlex.split(str(temploc))
fcn = len(fcnew)
n = 0
@ -374,7 +375,7 @@ def forceRescan(ComicID):
#logger.fdebug("let's compare with this issue value: " + str(fcdigit))
else:
# it's a word, skip it.
fcdigit = 1000000
fcdigit = 19283838380101193
#logger.fdebug("fcdigit: " + str(fcdigit))
#logger.fdebug("int_iss: " + str(int_iss))
if "." in str(int_iss):

View File

@ -34,7 +34,7 @@ import shutil
import mylar
from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, version, librarysync
from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, version, librarysync, moveit
#from mylar.helpers import checked, radio, today
import lib.simplejson as simplejson
@ -103,10 +103,10 @@ class WebInterface(object):
searchresults = sorted(searchresults, key=itemgetter('comicyear','issues'), reverse=True)
#print ("Results: " + str(searchresults))
return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type, imported=None)
return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type, imported=None, ogcname=None)
searchit.exposed = True
def addComic(self, comicid, comicname=None, comicyear=None, comicimage=None, comicissues=None, comicpublisher=None, imported=None):
def addComic(self, comicid, comicname=None, comicyear=None, comicimage=None, comicissues=None, comicpublisher=None, imported=None, ogcname=None):
myDB = db.DBConnection()
sresults = []
cresults = []
@ -177,8 +177,8 @@ class WebInterface(object):
#searchfix(-1).html is for misnamed comics and wrong years.
#searchfix-2.html is for comics that span multiple volumes.
return serve_template(templatename="searchfix-2.html", title="In-Depth Results", sresults=sresults)
print ("imported is: " + str(imported))
threading.Thread(target=importer.addComictoDB, args=[comicid,mismatch,None,imported]).start()
#print ("imported is: " + str(imported))
threading.Thread(target=importer.addComictoDB, args=[comicid,mismatch,None,imported,ogcname]).start()
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % comicid)
addComic.exposed = True
@ -311,14 +311,16 @@ class WebInterface(object):
else:
newaction = action
for IssueID in args:
if IssueID is None: continue
#print ("issueID: " + str(IssueID) + "... " + str(newaction))
if IssueID is None or 'issue_table' in IssueID:
continue
else:
mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone()
miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
if action == 'Downloaded':
if mi['Status'] == "Skipped" or mi['Status'] == "Wanted":
logger.info(u"Cannot change status to %s as comic is not Snatched or Downloaded" % (newaction))
continue
# continue
elif action == 'Archived':
logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
#updater.forceRescan(mi['ComicID'])
@ -326,14 +328,15 @@ class WebInterface(object):
elif action == 'Wanted':
logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
issuesToAdd.append(IssueID)
elif action == 'Skipped':
logger.info(u"Marking " + str(IssueID) + " as Skipped")
controlValueDict = {"IssueID": IssueID}
newValueDict = {"Status": newaction}
myDB.upsert("issues", newValueDict, controlValueDict)
if len(issuestoArchive) > 0:
updater.forceRescan(mi['ComicID'])
if len(issuesToAdd) > 0:
logger.debug("Marking issues: %s as Wanted" % issuesToAdd)
logger.debug("Marking issues: %s as Wanted" % (issuesToAdd))
threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
#if IssueID:
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % mi['ComicID'])
@ -703,9 +706,16 @@ class WebInterface(object):
return serve_template(templatename="importresults.html", title="Import Results", results=results)
importResults.exposed = True
def deleteimport(self, ComicName):
myDB = db.DBConnection()
logger.info("Removing import data for Comic: " + str(ComicName))
myDB.action('DELETE from importresults WHERE ComicName=?', [ComicName])
raise cherrypy.HTTPRedirect("importResults")
deleteimport.exposed = True
def preSearchit(self, ComicName, imp_rename, imp_move):
print ("imp_rename:" + str(imp_rename))
print ("imp_move:" + str(imp_move))
#print ("imp_rename:" + str(imp_rename))
#print ("imp_move:" + str(imp_move))
myDB = db.DBConnection()
results = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [ComicName])
#if results > 0:
@ -714,6 +724,7 @@ class WebInterface(object):
yearRANGE = []
yearTOP = 0
minISSUE = 0
startISSUE = 10000000
comicstoIMP = []
for result in results:
if result is None:
@ -728,27 +739,66 @@ class WebInterface(object):
print ("adding..." + str(result['ComicYear']))
yearRANGE.append(result['ComicYear'])
yearTOP = str(result['ComicYear'])
if int(getiss) > (minISSUE):
if int(getiss) > int(minISSUE):
print ("issue now set to : " + str(getiss) + " ... it was : " + str(minISSUE))
minISSUE = str(getiss)
if int(getiss) < int(startISSUE):
print ("issue now set to : " + str(getiss) + " ... it was : " + str(startISSUE))
startISSUE = str(getiss)
#figure out # of issues and the year range allowable
maxyear = int(yearTOP) - (int(minISSUE) / 12)
yearRANGE.append(str(maxyear))
print ("there is a " + str(maxyear) + " year variation based on the 12 issues/year")
if yearTOP > 0:
maxyear = int(yearTOP) - (int(minISSUE) / 12)
yearRANGE.append(str(maxyear))
print ("there is a " + str(maxyear) + " year variation based on the 12 issues/year")
#determine a best-guess to # of issues in series
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = int(minISSUE) - int(startISSUE)
#normally minissue would work if the issue #'s started at #1.
print ("the years involved are : " + str(yearRANGE))
print ("minimum issue level is : " + str(minISSUE))
print ("highest issue # is : " + str(minISSUE))
print ("lowest issue # is : " + str(startISSUE))
print ("approximate number of issues : " + str(numissues))
print ("issues present on system : " + str(len(comicstoIMP)))
print ("versioning checking: ")
cnsplit = ComicName.split()
cnwords = len(cnsplit)
cnvers = cnsplit[cnwords-1]
ogcname = ComicName
if 'v' in cnvers:
print ("possible versioning detected.")
if cnvers[1:].isdigit():
print (cnvers + " - assuming versioning. Removing from initial search pattern.")
ComicName = ComicName[:-((len(cnvers))+1)]
print ("new comicname is : " + str(ComicName))
# we need to pass the original comicname here into the entire importer module
# so that we can reference the correct issues later.
mode='series'
sresults = mb.findComic(ComicName, mode, issue=minISSUE, limityear=yearRANGE)
sresults = mb.findComic(ComicName, mode, issue=numissues, limityear=yearRANGE)
type='comic'
if len(sresults) == 1:
sr = sresults[0]
print ("only one result...automagik-mode enabled for " + str(sr['comicid']))
self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported=comicstoIMP)
#need to move the files here.
if len(sresults) == 0 or len(sresults) is None:
resultset = 1
# #need to move the files here.
elif len(sresults) == 0 or len(sresults) is None:
print ("no results, removing the year from the agenda and re-querying.")
sresults = mb.findComic(ComicName, mode, issue=minISSUE)
return serve_template(templatename="searchresults.html", title='Search Results for: "' + ComicName + '"',searchresults=sresults, type=type, imported=comicstoIMP)
sresults = mb.findComic(ComicName, mode, issue=numissues)
if len(sresults) == 1:
print ("only one result...automagik-mode enabled for " + str(sr['comicid']))
resultset = 1
else:
resultset = 0
else:
print ("returning results to screen - more than one possibility.")
resultset = 0
if resultset == 1:
self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported=comicstoIMP,ogcname=ogcname)
else:
return serve_template(templatename="searchresults.html", title='Search Results for: "' + ComicName + '"',searchresults=sresults, type=type, imported=comicstoIMP, ogcname=ogcname)
preSearchit.exposed = True
#---
@ -777,6 +827,7 @@ class WebInterface(object):
"sab_pass" : mylar.SAB_PASSWORD,
"sab_cat" : mylar.SAB_CATEGORY,
"sab_priority" : mylar.SAB_PRIORITY,
"sab_directory" : mylar.SAB_DIRECTORY,
"use_blackhole" : helpers.checked(mylar.BLACKHOLE),
"blackhole_dir" : mylar.BLACKHOLE_DIR,
"usenet_retention" : mylar.USENET_RETENTION,
@ -917,7 +968,7 @@ class WebInterface(object):
comic_config.exposed = True
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, logverbose=0, download_scan_interval=None, nzb_search_interval=None, nzb_startup_search=0, libraryscan_interval=None,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, log_dir=None, log_level=0, blackhole=0, blackhole_dir=None,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, sab_directory=None, log_dir=None, log_level=0, blackhole=0, blackhole_dir=None,
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None,
@ -938,6 +989,7 @@ class WebInterface(object):
mylar.SAB_APIKEY = sab_apikey
mylar.SAB_CATEGORY = sab_category
mylar.SAB_PRIORITY = sab_priority
mylar.SAB_DIRECTORY = sab_directory
mylar.BLACKHOLE = blackhole
mylar.BLACKHOLE_DIR = blackhole_dir
mylar.USENET_RETENTION = usenet_retention