FIX:(#639) If newznab was enabled, and API hits resulted, would error out on doing sdate comparisons, IMP: Added pagination to Annuals as was being cut-off, FIX: flipped back to using offset instead of page for CV searches, IMP: Removed unnecessary print statements

This commit is contained in:
evilhero 2014-03-02 14:57:57 -05:00
parent 436edde0e3
commit 08f8b4e265
6 changed files with 11 additions and 21 deletions

View File

@ -680,6 +680,7 @@
"sSearch": ""},
"bStateSave": true,
"bFilter": false,
"sPaginationType": "full_numbers",
"iDisplayLength": 10
});

View File

@ -207,7 +207,6 @@ class PostProcessor(object):
nm+=1
continue
else:
print 'i made it here...'
fn = 0
fccnt = int(watchmatch['comiccount'])
if len(watchmatch) == 1: continue
@ -218,7 +217,7 @@ class PostProcessor(object):
break
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
logger.fdebug("temploc: " + str(temploc))
#logger.fdebug("temploc: " + str(temploc))
ww = shlex.split(temploc)
lnw = len(ww)

View File

@ -720,7 +720,7 @@ def issuedigits(issnum):
try:
int_issnum = (int(issb4dec) * 1000) + (int(issaftdec) * 10)
except ValueError:
logger.fdebug('This has no issue # for me to get - Either a Graphic Novel or one-shot.')
#logger.fdebug('This has no issue # for me to get - Either a Graphic Novel or one-shot.')
int_issnum = 999999999999999
else:
try:

View File

@ -35,7 +35,7 @@ def pullsearch(comicapi,comicquery,offset):
#PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=volume&query=' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&page=' + str(offset)
# 02/22/2014 use the volume filter label to get the right results.
PULLURL = mylar.CVURL + 'volumes?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&page=' + str(offset) #offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
PULLURL = mylar.CVURL + 'volumes?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
#all these imports are standard on most modern python implementations
#download the file:
@ -70,10 +70,9 @@ def findComic(name, mode, issue, limityear=None):
#comicquery=name.replace(" ", "%20")
#comicquery=name.replace(" ", " AND ")
comicapi='583939a3df0a25fc4e8b7a29934a13078002dc27'
offset = 1
#let's find out how many results we get from the query...
searched = pullsearch(comicapi,comicquery,1)
searched = pullsearch(comicapi,comicquery,0)
if searched is None: return False
totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
logger.fdebug("there are " + str(totalResults) + " search results...")
@ -83,12 +82,9 @@ def findComic(name, mode, issue, limityear=None):
while (countResults < int(totalResults)):
#logger.fdebug("querying " + str(countResults))
if countResults > 0:
#new api - have to change to page # instead of offset count
offsetcount = (countResults/100) + 1
#2012/22/02 - CV API flipped back to offset usage instead of page
offsetcount = countResults
#2012/22/02 - CV API flipped back to offset usage instead of page :(
#if countResults == 1: offsetcount = 0
#else: offsetcount = countResults
searched = pullsearch(comicapi,comicquery,offsetcount)
comicResults = searched.getElementsByTagName('volume')
body = ''

View File

@ -723,7 +723,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('invalid date found. Unable to continue - skipping result.')
continue
#use store date instead of publication date for comparisons since publication date is usually +2 months
if StoreDate is None or StoreDate is '0000-00-00':
if StoreDate is None or StoreDate == '0000-00-00':
stdate = IssueDate
else:
stdate = StoreDate
@ -743,6 +743,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#logger.fdebug('econv:' + str(econv))
#convert it to a numeric
issuedate_int = time.mktime(econv[:len(econv)-1])
#logger.fdebug('issuedate_int:' + str(issuedate_int))
if postdate_int < issuedate_int:
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
continue

View File

@ -644,8 +644,8 @@ class WebInterface(object):
addArtists.exposed = True
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None):
print "ComicID:" + str(ComicID)
print "mode:" + str(mode)
logger.fdebug('ComicID:' + str(ComicID))
logger.fdebug('mode:' + str(mode))
now = datetime.datetime.now()
myDB = db.DBConnection()
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
@ -657,7 +657,6 @@ class WebInterface(object):
# comics that have X many issues
raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'pullseries'))
elif ComicID is None and mode == 'readlist':
print "blahblah"
# this is for marking individual comics from a readlist to be downloaded.
# Because there is no associated ComicID or IssueID, follow same pattern as in 'pullwant'
# except we know the Year
@ -711,30 +710,24 @@ class WebInterface(object):
# newStatus = {"Status": "Wanted"}
# myDB.upsert("issues", newStatus, controlValueDict)
#for future reference, the year should default to current year (.datetime)
print 'before db'
if mode == 'want':
issues = myDB.action("SELECT IssueDate, ReleaseDate FROM issues WHERE IssueID=?", [IssueID]).fetchone()
elif mode == 'want_ann':
issues = myDB.action("SELECT IssueDate, ReleaseDate FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
print 'after db'
if ComicYear == None:
ComicYear = str(issues['IssueDate'])[:4]
print 'after year'
if issues['ReleaseDate'] is None:
logger.info('No Store Date found for given issue. This is probably due to not Refreshing the Series beforehand.')
logger.info('I Will assume IssueDate as Store Date, but you should probably Refresh the Series and try again if required.')
storedate = issues['IssueDate']
else:
storedate = issues['ReleaseDate']
print 'there'
miy = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
print 'miy'
SeriesYear = miy['ComicYear']
AlternateSearch = miy['AlternateSearch']
Publisher = miy['ComicPublisher']
UseAFuzzy = miy['UseFuzzy']
ComicVersion = miy['ComicVersion']
print 'here'
foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID)
if foundcom == "yes":
# file check to see if issue exists and update 'have' count