FIX:(#742) Overlapping of text on ManageComics screen, FIX: Parsed names showing up on Upcoming list instead of actual series titles, FIX:(#744) If Alternate Search used, with a series that has '&' in it, would never get any matches, FIX:(#741) Search results would return multiples for the same search results (default search mode), FIX:(#738) Wipe Nzblog should now return to History page after wipe, IMP: Removed unnecessary logging that resulted in excessive spam, FIX: ManageComics screen - using checkboxes would result in errorswith a table error

This commit is contained in:
evilhero 2014-06-11 14:39:50 -04:00
parent 81a8029480
commit 3b148298df
9 changed files with 42 additions and 23 deletions

View File

@ -13,12 +13,11 @@
<%def name="body()">
<div class="table_wrapper">
<div id="manageheader" class="title">
<h1 class="clearfix">Manage comics</h1>
</div>
<form action="markComics" method="get" id="markComics">
<div id="markalbum">
<div id="markcomics" style="top:0;">
<select name="action" onChange="doAjaxCall('markComics',$(this),'table',true);" data-error="You didn't select any comics">
<option disabled="disabled" selected="selected">Choose...</option>
<option value="pause">Pause</option>
@ -29,6 +28,8 @@
selected comics
<input type="hidden" value="Go">
</div>
<div class="table_wrapper">
<table class="display" id="manage_comic">
<thead>
<tr>
@ -57,8 +58,8 @@
%endfor
</tbody>
</table>
</form>
</div>
</form>
</%def>
<%def name="headIncludes()">

View File

@ -98,7 +98,7 @@
</div>
</div>
<div id="tabs-2">
<a id="menu_link_edit" href="force_check">Force Check Availability</a>
<a id="menu_link_edit" href="future_check">Force Check Availability</a>
<div class="table_wrapper">
<table class="display_no_select" id="upcoming_table">
%if future_nodata_upcoming:

View File

@ -1188,8 +1188,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#logger.fdebug('latest date: ' + str(latestdate))
#logger.fdebug('first date: ' + str(firstdate))
#logger.fdebug('issue date: ' + str(firstval['Issue_Date']))
if firstval['Issue_Date'] > latestdate:
if issnum > latestiss:
if firstval['Issue_Date'] >= latestdate:
#logger.fdebug('date check hit for issue date > latestdate')
if int_issnum > helpers.issuedigits(latestiss):
#logger.fdebug('assigning latest issue to : ' + str(issnum))
latestiss = issnum
latestdate = str(firstval['Issue_Date'])
if firstval['Issue_Date'] < firstdate:
@ -1265,7 +1267,6 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
importantdates['LatestDate'] = latestdate
importantdates['LastPubDate'] = lastpubdate
if calledfrom == 'weekly':
return weeklyissue_check

View File

@ -32,7 +32,7 @@ def pullsearch(comicapi,comicquery,offset,explicit):
u_comicquery = u_comicquery.replace(" ", "%20")
if explicit == 'all' or explicit == 'loose':
PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=volume&query=' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&offset=' + str(offset)
PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=volume&query=' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&page=' + str(offset)
else:
# 02/22/2014 use the volume filter label to get the right results.
@ -106,8 +106,13 @@ def findComic(name, mode, issue, limityear=None, explicit=None):
#logger.fdebug("querying " + str(countResults))
if countResults > 0:
#2012/22/02 - CV API flipped back to offset usage instead of page
offsetcount = countResults
if explicit == 'all' or explicit == 'loose':
#all / loose uses page for offset
offsetcount = (countResults/100) + 1
else:
#explicit uses offset
offsetcount = countResults
searched = pullsearch(comicapi,comicquery,offsetcount,explicit)
comicResults = searched.getElementsByTagName('volume')
body = ''

View File

@ -556,15 +556,15 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
seriesname_mod = re.sub('[\&]', ' ', seriesname_mod)
foundname_mod = re.sub('[\&]', ' ', foundname_mod)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.\/]', '',seriesname_mod)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.]', '',seriesname_mod)
formatrem_seriesname = re.sub('[\-]', ' ',formatrem_seriesname)
formatrem_seriesname = re.sub('[\/]', '-', formatrem_seriesname) #not necessary since seriesname in a torrent file won't have /
formatrem_seriesname = re.sub('[\/]', ' ', formatrem_seriesname) #not necessary since seriesname in a torrent file won't have /
formatrem_seriesname = re.sub('\s+', ' ', formatrem_seriesname)
if formatrem_seriesname[:1] == ' ': formatrem_seriesname = formatrem_seriesname[1:]
formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.\/]', '',foundname_mod)
formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.]', '',foundname_mod)
formatrem_torsplit = re.sub('[\-]', ' ',formatrem_torsplit) #we replace the - with space so we'll get hits if differnces
#formatrem_torsplit = re.sub('[\/]', '-', formatrem_torsplit) #not necessary since if has a /, should be removed in above line
formatrem_torsplit = re.sub('[\/]', ' ', formatrem_torsplit) #not necessary since if has a /, should be removed in above line
formatrem_torsplit = re.sub('\s+', ' ', formatrem_torsplit)
logger.fdebug(str(len(formatrem_torsplit)) + ' - formatrem_torsplit : ' + formatrem_torsplit.lower())
logger.fdebug(str(len(formatrem_seriesname)) + ' - formatrem_seriesname :' + formatrem_seriesname.lower())

View File

@ -208,6 +208,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID)
if findit == 'yes':
break
if findit == 'yes': break
else:
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID)
@ -226,6 +227,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID)
if findit == 'yes':
break
if findit == 'yes': break
if searchprov == 'newznab':
searchprov = newznab_host[0].rstrip()
@ -863,11 +865,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
i+=1
logger.fdebug("chg_comic:" + str(chg_comic))
findcomic_chksplit = re.sub('[\-\:\,\.\?]', ' ', findcomic)
chg_comic = re.sub('[\s]', '', chg_comic)
findcomic_chksplit = re.sub('[\&]', 'and', findcomic_chksplit)
findcomic_chksplit = re.sub('[\s]', '', findcomic_chksplit)
#print chg_comic.upper()
#print findcomic_chksplit.upper()
if chg_comic.upper() == findcomic_chksplit.upper():
chg_comic = re.sub('[\-\:\,\.\?]', ' ', chg_comic)
chg_comic = re.sub('[\&]', 'and', chg_comic)
chg_comic = re.sub('[\s]', '', chg_comic)
logger.fdebug('chg_comic: ' + chg_comic.upper())
logger.fdebug('findcomic_chksplit: ' + findcomic_chksplit.upper())
if chg_comic.upper() in findcomic_chksplit.upper():
logger.fdebug("series contains numerics...adjusting..")
else:
changeup = "." + splitit[(len(splitit)-1)]

View File

@ -131,10 +131,10 @@ def dbUpdate(ComicIDList=None):
newVAL = {"Status": "Skipped"}
if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list):
logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number']))
#logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number']))
myDB.upsert("Annuals", newVAL, ctrlVAL)
else:
logger.fdebug('#' + str(issue['Issue_Number']) + ' writing issuedata: ' + str(newVAL))
#logger.fdebug('#' + str(issue['Issue_Number']) + ' writing issuedata: ' + str(newVAL))
myDB.upsert("Issues", newVAL, ctrlVAL)
fndissue.append({"IssueID": issue['IssueID']})
icount+=1
@ -157,7 +157,7 @@ def dbUpdate(ComicIDList=None):
for newi in newiss:
ctrlVAL = {"IssueID": newi['IssueID']}
newVAL = {"Status": newi['Status']}
logger.fdebug('writing issuedata: ' + str(newVAL))
#logger.fdebug('writing issuedata: ' + str(newVAL))
myDB.upsert("Issues", newVAL, ctrlVAL)
logger.info('I have added ' + str(len(newiss)) + ' new issues for this series that were not present before.')
@ -171,6 +171,7 @@ def dbUpdate(ComicIDList=None):
def latest_update(ComicID, LatestIssue, LatestDate):
# here we add to comics.latest
#logger.info(str(ComicID) + ' - updating latest_date to : ' + str(LatestDate))
myDB = db.DBConnection()
latestCTRLValueDict = {"ComicID": ComicID}
newlatestDict = {"LatestIssue": str(LatestIssue),
@ -250,6 +251,10 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd)
else: mylar.importer.updateissuedata(ComicID, ComicName, calledfrom='weeklycheck')#mylar.importer.addComictoDB(ComicID,mismatch,pullupd)
else:
#if 'annual' in ComicName.lower():
# logger.fdebug('Annual detected - refreshing series.')
# mylar.importer.updateissuedata(ComicID, ComicName, calledfrom='weeklycheck', issuetype='annual')
#else:
logger.fdebug('It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.')
return
else:

View File

@ -1022,7 +1022,7 @@ class WebInterface(object):
#limittheyear.append(cf['IssueDate'][-4:])
for ser in cflist:
logger.info('looking for new data for ' + ser['ComicName'] + '[#' + str(ser['IssueNumber']) + '] (' + str(ser['IssueDate'][-4:]) + ')')
searchresults, explicit = mb.findComic(ser['ComicName'], mode='pullseries', issue=ser['IssueNumber'], limityear=ser['IssueDate'][-4:], explicit='all')
searchresults, explicit = mb.findComic(ser['ComicName'], mode='pullseries', issue=ser['IssueNumber'], limityear=ser['IssueDate'][-4:], explicit='explicit')
print searchresults
if len(searchresults) > 1:
logger.info('More than one result returned - this may have to be a manual add')
@ -1323,6 +1323,8 @@ class WebInterface(object):
myDB = db.DBConnection()
comicsToAdd = []
for ComicID in args:
if ComicID == 'manage_comic_length':
break
if action == 'delete':
myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
@ -1338,7 +1340,6 @@ class WebInterface(object):
comicsToAdd.append(ComicID)
if len(comicsToAdd) > 0:
logger.fdebug("Refreshing comics: %s" % comicsToAdd)
#threading.Thread(target=importer.addComicIDListToDB, args=[comicsToAdd]).start()
threading.Thread(target=updater.dbUpdate, args=[comicsToAdd]).start()
raise cherrypy.HTTPRedirect("home")
markComics.exposed = True

View File

@ -613,6 +613,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepul
if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
logger.fdebug("matched on:" + comicnm + "..." + watchcomic.upper())
watchcomic = unlines[cnt]
pass
# elif ("ANNUAL" in week['EXTRA']):
# pass