diff --git a/mylar/cv.py b/mylar/cv.py index 82ec6acb..cb4f3ca1 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -81,12 +81,12 @@ def GetComicInfo(comicid,dom): comic['ComicYear'] = dom.getElementsByTagName('start_year')[n].firstChild.wholeText comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[n].firstChild.wholeText comic['ComicIssues'] = dom.getElementsByTagName('count_of_issues')[n].firstChild.wholeText - comic['ComicDesc'] = dom.getElementsByTagName('description')[n].firstChild.wholeText + #comic['ComicDesc'] = dom.getElementsByTagName('description')[n].firstChild.wholeText comic['ComicImage'] = dom.getElementsByTagName('super_url')[n].firstChild.wholeText comic['ComicPublisher'] = dom.getElementsByTagName('name')[cntit+1].firstChild.wholeText - comic['description'] = dom.getElementsByTagName('description')[n].firstChild.wholeText - comdescst = comic['description'].find('

') - comdesc = comic['description'][:comdescst] + #comic['description'] = dom.getElementsByTagName('description')[n].firstChild.wholeText + #comdescst = comic['description'].find('

') + #comdesc = comic['description'][:comdescst] #print ("Description: " + str(comdesc)) comicchoice.append({ @@ -95,7 +95,7 @@ def GetComicInfo(comicid,dom): 'Comicid': comicid, 'ComicURL': comic['ComicURL'], 'ComicIssues': comic['ComicIssues'], - 'ComicDesc': comic['ComicDesc'], + # 'ComicDesc': comic['ComicDesc'], 'ComicImage': comic['ComicImage'], 'ComicPublisher': comic['ComicPublisher'] }) diff --git a/mylar/filechecker.py b/mylar/filechecker.py index a341a382..8f19b981 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -38,6 +38,9 @@ def listFiles(dir,watchcomic): if '_' in subname: subname = subname.replace('_', ' ') if watchcomic.lower() in subname.lower(): + if 'annual' in subname.lower(): + print ("it's an annual - unsure how to proceed") + break comicpath = os.path.join(basedir, item) #print ( watchcomic + " - watchlist match on : " + comicpath) comicsize = os.path.getsize(comicpath) diff --git a/mylar/importer.py b/mylar/importer.py index efc9d793..d9990513 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -115,8 +115,10 @@ def addComictoDB(comicid): #print ("root dir for series: " + comlocation) #try to account for CV not updating new issues as fast as GCD + #seems CV doesn't update total counts + #comicIssues = gcdinfo['totalissues'] if gcdinfo['gcdvariation'] == "yes": - # comicIssues = str(int(comic['ComicIssues']) + 1) + #comicIssues = str(int(comic['ComicIssues']) + 1) comicIssues = comic['ComicIssues'] else: comicIssues = comic['ComicIssues'] @@ -126,7 +128,6 @@ def addComictoDB(comicid): "ComicYear": comic['ComicYear'], "ComicImage": comic['ComicImage'], "Total": comicIssues, - "Description": comic['ComicDesc'], "ComicLocation": comlocation, "ComicPublisher": comic['ComicPublisher'], "ComicPublished": parseit.resultPublished, @@ -204,7 +205,7 @@ def addComictoDB(comicid): int_issnum = int( gcdis / 1000 ) #get the latest issue / date using the date. if gcdval['GCDDate'] > latestdate: - latestiss = str(gcd_issue) + latestiss = str(issnum) latestdate = str(gcdval['GCDDate']) break #bb = iscnt @@ -294,8 +295,6 @@ def addComictoDB(comicid): # logger.debug(u"Updating cache for: " + comic['ComicName']) # cache.getThumb(ComicIDcomicid) - latestiss = latestiss + ".00" - controlValueStat = {"ComicID": comicid} newValueStat = {"Status": "Active", "Have": havefiles, diff --git a/mylar/parseit.py b/mylar/parseit.py index 7de3bca1..9e0b5bf2 100755 --- a/mylar/parseit.py +++ b/mylar/parseit.py @@ -129,7 +129,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID): #print ( "Comic Name: " + str(resultName[n]) ) fip = resultp('a',href=True)[1] resultID.append(fip['href']) - print ( "ID: " + str(resultID[n]) ) + #print ( "ID: " + str(resultID[n]) ) subtxt3 = resultp('td')[3] resultYear.append(subtxt3.findNext(text=True)) @@ -161,6 +161,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID): resultURL = str(resultID[n]) rptxt = resultp('td')[6] resultPublished = rptxt.findNext(text=True) + TotalIssues = resultIssues[n] #print ("Series Published: " + str(resultPublished)) break @@ -292,5 +293,6 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID): #altcount = 0 n+=1 gcdinfo['gcdvariation'] = issvariation + gcdinfo['totalissues'] = TotalIssues return gcdinfo ## -- end (GCD) -- ## diff --git a/mylar/updater.py b/mylar/updater.py index f6754c72..1acdd3b4 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -70,11 +70,11 @@ def weekly_update(ComicName): def foundsearch(ComicID, IssueID): myDB = db.DBConnection() - print ("Updater-ComicID: " + str(ComicID)) - print ("Updater-IssueID: " + str(IssueID)) + #print ("Updater-ComicID: " + str(ComicID)) + #print ("Updater-IssueID: " + str(IssueID)) comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() - print ("comic location: " + comic['ComicLocation']) + #print ("comic location: " + comic['ComicLocation']) #this is too soon - file hasn't downloaded even yet. #fixed and addressed in search.py and follow-thru here! #check sab history for completion here :) @@ -93,10 +93,10 @@ def foundsearch(ComicID, IssueID): myDB.upsert("comics", newHave, HaveDict) #--- issue = myDB.action('SELECT * FROM issues WHERE IssueID=? AND ComicID=?', [IssueID, ComicID]).fetchone() - print ("updating status to snatched") + #print ("updating status to snatched") controlValueDict = {"IssueID": IssueID} newValueDict = {"Status": "Snatched"} - print ("updating snatched db.") + #print ("updating snatched db.") myDB.upsert("issues", newValueDict, controlValueDict) snatchedupdate = {"IssueID": IssueID} newsnatchValues = {"ComicName": comic['ComicName'], @@ -110,7 +110,7 @@ def foundsearch(ComicID, IssueID): #this becomes an issue with files downloaded x2 or same name... - print ("finished updating snatched db.") + #print ("finished updating snatched db.") logger.info(u"Updating now complete for " + str(comic['ComicName']) + " issue: " + str(issue['Issue_Number'])) return @@ -118,53 +118,69 @@ def forceRescan(ComicID): myDB = db.DBConnection() # file check to see if issue exists rescan = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() - + logger.info(u"Now rechecking files for " + str(rescan['ComicName']) + " (" + str(rescan['ComicYear']) + ") in " + str(rescan['ComicLocation']) ) fc = filechecker.listFiles(dir=rescan['ComicLocation'], watchcomic=rescan['ComicName']) iscnt = rescan['Total'] havefiles = 0 - mylar.AUTOWANT_ALL = 0 fccnt = int(fc['comiccount']) issnum = 1 fcnew = [] n = 0 + reissues = myDB.action('SELECT * FROM issues WHERE ComicID=?', [ComicID]).fetchall() while (n < iscnt): + reiss = reissues[n] + int_iss = reiss['Int_IssueNumber'] fn = 0 haveissue = "no" - - print ("on issue " + str(int(n+1)) + " of " + str(iscnt) + " issues") - print ("checking issue: " + str(issnum)) - # stupid way to do this, but check each issue against file-list in fc. - while (fn < fccnt): + while (fn < fccnt): tmpfc = fc['comiclist'][fn] - print (str(issnum) + "against ... " + str(tmpfc['ComicFilename'])) temploc = tmpfc['ComicFilename'].replace('_', ' ') - fcnew = shlex.split(str(temploc)) - fcn = len(fcnew) - som = 0 - # this loop searches each word in the filename for a match. - while (som < fcn): - print (fcnew[som]) - #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr' - if ".cbr" in fcnew[som]: - fcnew[som] = fcnew[som].replace(".cbr", "") - elif ".cbz" in fcnew[som]: - fcnew[som] = fcnew[som].replace(".cbz", "") - if fcnew[som].isdigit(): - print ("digit detected") - fcdigit = fcnew[som].lstrip('0') - print ( "filename:" + str(int(fcnew[som])) + " - issue: " + str(issnum) ) - #fcdigit = fcnew[som].lstrip('0') + ".00" - if int(fcdigit) == int(issnum): - print ("matched") - print ("We have this issue - " + str(issnum) + " at " + tmpfc['ComicFilename'] ) - havefiles+=1 - haveissue = "yes" - isslocation = str(tmpfc['ComicFilename']) - break - print ("failed word match on:" + str(fcnew[som]) + "..continuing next word") - som+=1 - print (str(temploc) + " doesn't match anything...moving to next file.") + if 'annual' not in temploc: + fcnew = shlex.split(str(temploc)) + fcn = len(fcnew) + som = 0 + # this loop searches each word in the filename for a match. + while (som < fcn): + #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr' + if ".cbr" in fcnew[som]: + fcnew[som] = fcnew[som].replace(".cbr", "") + elif ".cbz" in fcnew[som]: + fcnew[som] = fcnew[som].replace(".cbz", "") + if fcnew[som].isdigit(): + if int(fcnew[som]) > 0: + fcdigit = fcnew[som].lstrip('0') + else: fcdigit = "0" + if int(fcdigit) == int_iss: + havefiles+=1 + haveissue = "yes" + isslocation = str(tmpfc['ComicFilename']) + break + som+=1 + else: pass fn+=1 - issnum+=1 - print ("you have " + str(havefiles) + " comics!") + if haveissue == "yes": break + #we have the # of comics, now let's update the db. + if haveissue == "no": + isslocation = "None" + if mylar.AUTOWANT_ALL: + issStatus = "Wanted" + else: + issStatus = "Skipped" + elif haveissue == "yes": + issStatus = "Downloaded" + controlValueDict = {"IssueID": reiss['IssueID']} + newValueDict = {"Location": isslocation, + "Status": issStatus + } + myDB.upsert("issues", newValueDict, controlValueDict) + n+=1 + + #let's update the total count of comics that was found. + controlValueStat = {"ComicID": rescan['ComicID']} + newValueStat = {"Have": havefiles + } + + myDB.upsert("comics", newValueStat, controlValueStat) + logger.info(u"I've found " + str(havefiles) + " / " + str(rescan['Total']) + " issues." ) + return diff --git a/mylar/webserve.py b/mylar/webserve.py index 411e469b..9e64dc6f 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -134,7 +134,7 @@ class WebInterface(object): #raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" & ComicID) editIssue.exposed=True - def markissues(self, action=None, **args): + def markissues(self, ComicID=None, action=None, **args): myDB = db.DBConnection() if action == 'WantedNew': newaction = 'Wanted' @@ -146,7 +146,7 @@ class WebInterface(object): mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone() miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone() logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction)) - controlValueDict = {"IssueID": mbid} + controlValueDict = {"IssueID": IssueID} newValueDict = {"Status": newaction} myDB.upsert("issues", newValueDict, controlValueDict) if action == 'Skipped': pass @@ -355,8 +355,6 @@ class WebInterface(object): forceSearch.exposed = True def forceRescan(self, ComicID): - myDB = db.DBConnection() - comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone() threading.Thread(target=updater.forceRescan, args=[ComicID]).start() raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID) forceRescan.exposed = True