diff --git a/data/interfaces/default/comicdetails.html b/data/interfaces/default/comicdetails.html index 66d26853..f1171a59 100644 --- a/data/interfaces/default/comicdetails.html +++ b/data/interfaces/default/comicdetails.html @@ -135,7 +135,7 @@ <% - if comic['Type'] == 'None' or comic['Type'] is None: + if comic['Type'] == 'None' or comic['Type'] is None or comic['Type'] == 'Print': comictype = 'Print' else: comictype = 'Digital' diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 18452778..eccfa22e 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -516,7 +516,7 @@
- + (Mins) Force RSS
<% rss_last=mylar.RSS_LASTRUN %>last run: ${rss_last} diff --git a/data/interfaces/default/importresults_popup.html b/data/interfaces/default/importresults_popup.html index 43cd7458..c4a189b8 100755 --- a/data/interfaces/default/importresults_popup.html +++ b/data/interfaces/default/importresults_popup.html @@ -36,8 +36,10 @@ grade = 'A' else: grade = 'Z' + if result['haveit'] != "No": grade = 'H'; + %> diff --git a/data/interfaces/default/manage.html b/data/interfaces/default/manage.html index aaf76e3d..914a7b1b 100755 --- a/data/interfaces/default/manage.html +++ b/data/interfaces/default/manage.html @@ -138,7 +138,7 @@
- +
diff --git a/data/interfaces/default/storyarc.html b/data/interfaces/default/storyarc.html index 22e9aaae..83ac5687 100755 --- a/data/interfaces/default/storyarc.html +++ b/data/interfaces/default/storyarc.html @@ -96,20 +96,7 @@ %if f_nodata['ComicID'] is not None: diff --git a/data/interfaces/default/weeklypull.html b/data/interfaces/default/weeklypull.html index 81ba9e3c..e41818b1 100755 --- a/data/interfaces/default/weeklypull.html +++ b/data/interfaces/default/weeklypull.html @@ -100,7 +100,7 @@ add series %else: %if weekly['ISSUE'] == '1' or weekly['ISSUE'] == '0': - Watch + Watch %else: add series %endif diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index cfced792..5f892e31 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -168,23 +168,27 @@ class PostProcessor(object): path_to_move = dupeinfo[0]['to_dupe'] file_to_move = os.path.split(path_to_move)[1] - if dupeinfo[0]['action'] == 'dupe_src': + if dupeinfo[0]['action'] == 'dupe_src' and mylar.FILE_OPTS == 'move': logger.info('[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') else: - logger.info('[DUPLICATE-CLEANUP] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') - - #check to make sure duplicate_dump directory exists: - checkdirectory = filechecker.validateAndCreateDirectory(mylar.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]') + if mylar.FILE_OPTS == 'move': + logger.info('[DUPLICATE-CLEANUP][MOVE-MODE] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') + else: + logger.info('[DUPLICATE-CLEANUP][COPY-MODE] NEW File will not be post-processed. Retaining file in original location [' + path_to_move + ']') + return True #this gets tricky depending on if it's the new filename or the existing filename, and whether or not 'copy' or 'move' has been selected. - try: - shutil.move(path_to_move, os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) - except (OSError, IOError): - logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) - return False + if mylar.FILE_OPTS == 'move': + #check to make sure duplicate_dump directory exists: + checkdirectory = filechecker.validateAndCreateDirectory(mylar.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]') + try: + shutil.move(path_to_move, os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) + except (OSError, IOError): + logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) + return False - logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) - return True + logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move)) + return True def Process(self): module = self.module @@ -279,7 +283,7 @@ class PostProcessor(object): if not any(re.sub('[\|\s]', '', cname.lower()) == x for x in loopchk): loopchk.append(re.sub('[\|\s]', '', cname.lower())) - if 'annual' in mod_seriesname.lower(): + if all([mylar.ANNUALS_ON, 'annual' in mod_seriesname.lower()]): mod_seriesname = re.sub('annual', '', mod_seriesname, flags=re.I).strip() #make sure we add back in the original parsed filename here. diff --git a/mylar/__init__.py b/mylar/__init__.py index b4267b19..5a56092a 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -1190,8 +1190,13 @@ def initialize(): runImmediately=True, delay=30) + if ALT_PULL == 2: + weektimer = 4 + else: + weektimer = 24 + WeeklyScheduler = scheduler.Scheduler(weeklypullit.Weekly(), - cycleTime=datetime.timedelta(hours=24), + cycleTime=datetime.timedelta(hours=weektimer), threadName="WEEKLYCHECK", runImmediately=True, delay=10) @@ -1654,7 +1659,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS readinglist(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, StoreDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT)') c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)') c.execute('CREATE TABLE IF NOT EXISTS searchresults (SRID TEXT, results Numeric, Series TEXT, publisher TEXT, haveit TEXT, name TEXT, deck TEXT, url TEXT, description TEXT, comicid TEXT, comicimage TEXT, issues TEXT, comicyear TEXT, ogcname TEXT)') conn.commit @@ -2086,6 +2091,17 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE searchresults ADD COLUMN ogcname TEXT') + ## -- futureupcoming Table -- + try: + c.execute('SELECT weeknumber from futureupcoming') + except sqlite3.OperationalError: + c.execute('ALTER TABLE futureupcoming ADD COLUMN weeknumber TEXT') + + try: + c.execute('SELECT year from futureupcoming') + except sqlite3.OperationalError: + c.execute('ALTER TABLE futureupcoming ADD COLUMN year TEXT') + ## -- Failed Table -- try: c.execute('SELECT DateFailed from Failed') diff --git a/mylar/cv.py b/mylar/cv.py index 5ee37398..01dd19c9 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -169,11 +169,6 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co #within the tagging (with CT). This compiles all of the IssueID's during a scan (in 100's), and returns the corresponding CV data #related to the given IssueID's - namely ComicID, Name, Volume (more at some point, but those are the important ones). offset = 1 - if len(comicidlist) <= 100: - endcnt = len(comicidlist) - else: - endcnt = 100 - id_count = 0 import_list = [] logger.fdebug('comicidlist:' + str(comicidlist)) @@ -182,6 +177,11 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co #break it up by 100 per api hit #do the first 100 regardless in_cnt = 0 + if id_count + 100 <= len(comicidlist): + endcnt = id_count + 100 + else: + endcnt = len(comicidlist) + for i in range(id_count, endcnt): if in_cnt == 0: tmpidlist = str(comicidlist[i]) @@ -198,7 +198,6 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co tGIL = GetImportList(searched) import_list += tGIL - endcnt +=100 id_count +=100 return import_list diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 6a18cefb..339e851e 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -426,14 +426,10 @@ class FileChecker(object): if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None and '#' not in sf: #find it in the original file to see if there's a decimal between. - #logger.fdebug('lastissue_label: ' + str(lastissue_label)) - #logger.fdebug('current sf: ' + str(sf)) - #logger.fdebug('file_length: ' + str(file_length)) - #logger.fdebug('search_file_length: ' + str(lastissue_mod_position)) - #logger.fdebug('trunced_search_length: ' + modfilename[lastissue_mod_position+1:] findst = lastissue_mod_position+1 - #findst = modfilename.find(lastissue_label, lastissue_mod_position+1) #lastissue_mod_position) #file_length - len(lastissue_label)) - #logger.fdebug('findst: ' + str(findst)) + if findst > len(modfilename): + findst = len(modfilename) -1 + if modfilename[findst] != '.' or modfilename[findst] != '#': #findst != '.' and findst != '#': if sf.isdigit(): logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue') diff --git a/mylar/helpers.py b/mylar/helpers.py index adc835e7..4ea531b8 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -2031,6 +2031,19 @@ def issue_status(IssueID): else: return False +def crc(filename): + import hashlib + #memory in lieu of speed (line by line) + #prev = 0 + #for eachLine in open(filename,"rb"): + # prev = zlib.crc32(eachLine, prev) + #return "%X"%(prev & 0xFFFFFFFF) + + #speed in lieu of memory (file into memory entirely) + #return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF) + + return hashlib.md5(filename).hexdigest() + def issue_find_ids(ComicName, ComicID, pack, IssueNumber): import db, logger diff --git a/mylar/importer.py b/mylar/importer.py index 7d975d12..0722979e 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -582,7 +582,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No lastpubdate = issuedata['LastPubDate'] series_status = issuedata['SeriesStatus'] #move the files...if imported is not empty & not futurecheck (meaning it's not from the mass importer.) - logger.info('imported is : ' + str(imported)) + #logger.info('imported is : ' + str(imported)) if imported is None or imported == 'None' or imported == 'futurecheck': pass else: diff --git a/mylar/librarysync.py b/mylar/librarysync.py index 2b27fea8..2411b211 100755 --- a/mylar/librarysync.py +++ b/mylar/librarysync.py @@ -67,6 +67,8 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None, comic = files comicpath = os.path.join(r, files) comicsize = os.path.getsize(comicpath) + logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes') + t = filechecker.FileChecker(dir=r, file=comic) results = t.listFiles() #logger.info(results) @@ -83,7 +85,6 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None, #'annualcomicid': annual_comicid, #'scangroup': scangroup} - logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes') if results: resultline = '[PARSE-' + results['parse_status'].upper() + ']' diff --git a/mylar/locg.py b/mylar/locg.py index 5af7cffd..b02ef673 100755 --- a/mylar/locg.py +++ b/mylar/locg.py @@ -105,14 +105,16 @@ def locg(pulldate=None,weeknumber=None,year=None): cl_dyninfo = cl_d.dynamic_replace(comicname) dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip() - controlValueDict = {'COMIC': comicname, + controlValueDict = {'DYNAMICNAME': dynamic_name, 'ISSUE': re.sub('#', '', x['issue']).strip()} + newValueDict = {'SHIPDATE': x['shipdate'], 'PUBLISHER': x['publisher'], 'STATUS': 'Skipped', + 'COMIC': comicname, 'COMICID': comicid, 'ISSUEID': issueid, - 'DYNAMICNAME': dynamic_name, + #'DYNAMICNAME': dynamic_name, 'WEEKNUMBER': x['weeknumber'], 'YEAR': x['year']} myDB.upsert("weekly", newValueDict, controlValueDict) diff --git a/mylar/mb.py b/mylar/mb.py index da7067c5..944a6ae0 100755 --- a/mylar/mb.py +++ b/mylar/mb.py @@ -300,11 +300,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None): if not any(int(x) == int(i) for x in yearRange): yearRange.append(str(i)) - logger.fdebug('[RESULT] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']') + logger.fdebug('[RESULT][' + str(limityear) + '] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']') if tmpYr != xmlYr: xmlYr = tmpYr - if any([limityear in yearRange, limityear == 'None']): + if any(map(lambda v: v in limityear, yearRange)) or limityear == 'None': xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText idl = len (result.getElementsByTagName('id')) idt = 0 @@ -358,8 +358,8 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None): xmltype = 'Print' elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower(): xmltype = 'Digital' - else: - xmltype = 'Print' + else: + xmltype = 'Print' if xmlid in comicLibrary: haveit = comicLibrary[xmlid] diff --git a/mylar/moveit.py b/mylar/moveit.py index df137fbe..151e59b9 100755 --- a/mylar/moveit.py +++ b/mylar/moveit.py @@ -2,12 +2,13 @@ import mylar from mylar import db, logger, helpers, updater import os import shutil - +import ast def movefiles(comicid, comlocation, imported): #comlocation is destination #comicid is used for rename files_moved = [] + imported = ast.literal_eval(imported) myDB = db.DBConnection() @@ -61,19 +62,22 @@ def movefiles(comicid, comlocation, imported): myDB.upsert("importresults", newValue, controlValue) return -def archivefiles(comicid, ogdir, ogcname): +def archivefiles(comicid, comlocation, imported): myDB = db.DBConnection() # if move files isn't enabled, let's set all found comics to Archive status :) - result = myDB.select("SELECT * FROM importresults WHERE ComicName=?", [ogcname]) - if result is None: - pass - else: + imported = ast.literal_eval(imported) + ComicName = imported['ComicName'] + impres = imported['filelisting'] + + if impres is not None: scandir = [] - for res in result: - if any([os.path.dirname(res['ComicLocation']) in x for x in scandir]): - pass - else: - scandir.append(os.path.dirname(res['ComicLocation'])) + for impr in impres: + srcimp = impr['comiclocation'] + orig_filename = impr['comicfilename'] + + if not any([os.path.abspath(os.path.join(srcimp, os.pardir)) == x for x in scandir]): + scandir.append(os.path.abspath(os.path.join(srcimp, os.pardir))) + for sdir in scandir: logger.info('Updating issue information and setting status to Archived for location: ' + sdir) @@ -82,4 +86,22 @@ def archivefiles(comicid, ogdir, ogcname): logger.info('Now scanning in files.') updater.forceRescan(comicid) + for result in impres: + try: + res = result['import_id'] + except: + #if it's an 'older' import that wasn't imported, just make it a basic match so things can move and update properly. + controlValue = {"ComicFilename": result['comicfilename'], + "SRID": imported['srid']} + newValue = {"Status": "Imported", + "ComicID": comicid} + else: + controlValue = {"impID": result['import_id'], + "ComicFilename": result['comicfilename']} + newValue = {"Status": "Imported", + "SRID": imported['srid'], + "ComicID": comicid} + myDB.upsert("importresults", newValue, controlValue) + + return diff --git a/mylar/search.py b/mylar/search.py index be185d29..712b788b 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -286,13 +286,15 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD logger.info('Finished searching via :' + str(searchmode) + '. Issue not found - status kept as Wanted.') else: logger.fdebug('Could not find issue doing a manual search via : ' + str(searchmode)) + if searchprov == '32P' and mylar.MODE_32P == 0: + return findit, 'None' i+=1 return findit, 'None' def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None): - if any([allow_packs is None, allow_packs == 'None']): + if any([allow_packs is None, allow_packs == 'None', allow_packs == 0]): allow_packs = False logger.info('allow_packs set to :' + str(allow_packs)) @@ -305,6 +307,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa elif nzbprov == 'experimental': apikey = 'none' verify = False + elif nzbprov == 'Torznab': + verify = False elif nzbprov == 'newznab': #updated to include Newznab Name now name_newznab = newznab_host[0].rstrip() @@ -515,13 +519,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if nzbprov == '': bb = "no results" rss = "no" - elif nzbprov == '32P': - searchterm = {'series': ComicName, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher} - #first we find the id on the serieslist of 32P - #then we call the ajax against the id and issue# and volume (if exists) - a = auth32p.info32p(searchterm=searchterm) - bb = a.searchit() - rss = "no" + if nzbprov == '32P': + if all([mylar.MODE_32P == 1,mylar.ENABLE_32P]): + searchterm = {'series': ComicName, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher} + #first we find the id on the serieslist of 32P + #then we call the ajax against the id and issue# and volume (if exists) + a = auth32p.info32p(searchterm=searchterm) + bb = a.searchit() + rss = "no" + else: + bb = "no results" + rss = "no" elif nzbprov == 'KAT': cmname = re.sub("%20", " ", str(comsrc)) logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch)) @@ -797,11 +805,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa else: # convert it to a tuple dateconv = email.utils.parsedate_tz(pubdate) - # convert it to a numeric time, then subtract the timezone difference (+/- GMT) - if dateconv[-1] is not None: - postdate_int = time.mktime(dateconv[:len(dateconv) -1]) - dateconv[-1] - else: - postdate_int = time.mktime(dateconv[:len(dateconv) -1]) + try: + # convert it to a numeric time, then subtract the timezone difference (+/- GMT) + if dateconv[-1] is not None: + postdate_int = time.mktime(dateconv[:len(dateconv) -1]) - dateconv[-1] + else: + postdate_int = time.mktime(dateconv[:len(dateconv) -1]) + except: + logger.warn('Unable to parse posting date from provider result set for :' + entry['title']) + continue #convert it to a Thu, 06 Feb 2014 00:00:00 format issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d') diff --git a/mylar/webserve.py b/mylar/webserve.py index 2c9baa8a..44178eee 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -551,16 +551,16 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname)) addStoryArc.exposed = True - def wanted_Export(self): + def wanted_Export(self,mode): import unicodedata myDB = db.DBConnection() - wantlist = myDB.select("SELECT * FROM issues WHERE Status='Wanted' AND ComicName NOT NULL") + wantlist = myDB.select("SELECT * FROM issues WHERE Status=? AND ComicName NOT NULL", [mode]) if wantlist is None: - logger.info("There aren't any issues marked as Wanted. Aborting Export.") + logger.info("There aren't any issues marked as " + mode + ". Aborting Export.") return #write it a wanted_list.csv logger.info("gathered data - writing to csv...") - except_file = os.path.join(mylar.DATA_DIR, "wanted_list.csv") + except_file = os.path.join(mylar.DATA_DIR, str(mode) + "_list.csv") if os.path.exists(except_file): try: os.remove(except_file) @@ -576,11 +576,11 @@ class WebInterface(object): for want in wantlist: wantcomic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [want['ComicID']]).fetchone() exceptln = wantcomic['ComicName'].encode('ascii', 'replace') + "," + str(wantcomic['ComicYear']) + "," + str(want['Issue_Number']) + "," + str(want['IssueDate']) + "," + str(want['ComicID']) + "," + str(want['IssueID']) - logger.fdebug(exceptln) + #logger.fdebug(exceptln) wcount+=1 f.write('%s\n' % (exceptln.encode('ascii', 'replace').strip())) - logger.info("Successfully wrote to csv file " + str(wcount) + " entries from your Wanted list.") + logger.info("Successfully wrote to csv file " + str(wcount) + " entries from your " + mode + " list.") raise cherrypy.HTTPRedirect("home") wanted_Export.exposed = True @@ -1691,11 +1691,12 @@ class WebInterface(object): futurepulllist.exposed = True - def add2futurewatchlist(self, ComicName, Issue, Publisher, ShipDate, FutureID=None): - #ShipDate is a tuple ('weeknumber','startweek','midweek','endweek','year') + def add2futurewatchlist(self, ComicName, Issue, Publisher, ShipDate, weeknumber, year, FutureID=None): + #ShipDate is just weekinfo['midweek'] #a tuple ('weeknumber','startweek','midweek','endweek','year') myDB = db.DBConnection() + logger.info(ShipDate) if FutureID is not None: - chkfuture = myDB.selectone('SELECT * FROM futureupcoming WHERE ComicName=? AND IssueNumber=? WHERE weeknumber=?', [ComicName, Issue, ShipDate['weeknumber']]).fetchone() + chkfuture = myDB.selectone('SELECT * FROM futureupcoming WHERE ComicName=? AND IssueNumber=? WHERE weeknumber=? AND year=?', [ComicName, Issue, weeknumber, year]).fetchone() if chkfuture is not None: logger.info('Already on Future Upcoming list - not adding at this time.') return @@ -1706,7 +1707,9 @@ class WebInterface(object): "Publisher": Publisher} newVal = {"Status": "Wanted", - "IssueDate": ShipDate['midweek']} + "IssueDate": ShipDate, + "weeknumber": weeknumber, + "year": year} myDB.upsert("futureupcoming", newVal, newCtrl) @@ -2040,10 +2043,14 @@ class WebInterface(object): status = kwargs['status'] results = [] myDB = db.DBConnection() - issues = myDB.select('SELECT * from issues WHERE Status=?', [status]) + if mylar.ANNUALS_ON: + issues = myDB.select("SELECT * from issues WHERE Status=? AND ComicName NOT LIKE '%Annual%'", [status]) + annuals = myDB.select("SELECT * from annuals WHERE Status=?", [status]) + else: + issues = myDB.select("SELECT * from issues WHERE Status=?", [status]) + annuals = [] for iss in issues: results.append(iss) - annuals = myDB.select('SELECT * from annuals WHERE Status=?', [status]) for ann in annuals: results.append(ann) @@ -2176,10 +2183,13 @@ class WebInterface(object): myDB.upsert("comics", newValueDict, controlValueDict) logger.info('[MANAGE COMICS][RESUME] ' + ComicName + ' has now been put into a Resumed State.') else: + logger.info('appending ' + str(ComicID) + ' to refresh list.') comicsToAdd.append(ComicID) + logger.info(comicsToAdd) + if len(comicsToAdd) > 0: - logger.info('[MANAGE COMICS][REFRESH] Refreshing ' + len(comicsToAdd) + ' series') + logger.info('[MANAGE COMICS][REFRESH] Refreshing ' + str(len(comicsToAdd)) + ' series') threading.Thread(target=updater.dbUpdate, args=[comicsToAdd]).start() markComics.exposed = True @@ -2641,7 +2651,7 @@ class WebInterface(object): else: issue_int = helpers.issuedigits(arc['IssueNumber']) logger.fdebug('int_issue = ' + str(issue_int)) - isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=? AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone() + isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=?", [issue_int, comic['ComicID']]).fetchone() #AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone() if isschk is None: logger.fdebug("we matched on name, but issue " + arc['IssueNumber'] + " doesn't exist for " + comic['ComicName']) else: @@ -3402,10 +3412,8 @@ class WebInterface(object): #taking this outside of the transaction in an attempt to stop db locking. if mylar.IMP_MOVE and movealreadyonlist == "yes": - # for md in movedata: mylar.moveit.movefiles(movedata_comicid, movedata_comiclocation, movedata_comicname) updater.forceRescan(comicid) - raise cherrypy.HTTPRedirect("importResults") #figure out # of issues and the year range allowable @@ -3415,12 +3423,16 @@ class WebInterface(object): if all([yearTOP != None, yearTOP != 'None']): if int(str(yearTOP)) > 0: minni = helpers.issuedigits(minISSUE) - #logger.info(minni) + logger.info(minni) if minni < 1 or minni > 999999999: + logger.info('here') maxyear = int(str(yearTOP)) else: - maxyear = int(str(yearTOP)) - (minni / 12) + logger.info('there') + maxyear = int(str(yearTOP)) - ( (minni/1000) / 12 ) if str(maxyear) not in yearRANGE: + logger.info('maxyear:' + str(maxyear)) + logger.info('yeartop:' + str(yearTOP)) for i in range(maxyear, int(yearTOP),1): if not any(int(x) == int(i) for x in yearRANGE): yearRANGE.append(str(i)) @@ -3570,8 +3582,8 @@ class WebInterface(object): if len(search_matches) > 1: # if we matched on more than one series above, just save those results instead of the entire search result set. for sres in search_matches: - cVal = {"SRID": SRID, - "comicid": sres['comicid']} + cVal = {"SRID": SRID, + "comicid": sres['comicid']} #should store ogcname in here somewhere to account for naming conversions above. nVal = {"Series": ComicName, "results": len(search_matches), @@ -3591,8 +3603,8 @@ class WebInterface(object): # store the search results for series that returned more than one result for user to select later / when they want. # should probably assign some random numeric for an id to reference back at some point. for sres in sresults: - cVal = {"SRID": SRID, - "comicid": sres['comicid']} + cVal = {"SRID": SRID, + "comicid": sres['comicid']} #should store ogcname in here somewhere to account for naming conversions above. nVal = {"Series": ComicName, "results": len(sresults), @@ -3652,7 +3664,9 @@ class WebInterface(object): else: if not Volume.lower().startswith('v'): volume = 'v' + str(Volume) - results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[DynamicName,Volume]) + else: + volume = Volume + results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[DynamicName,volume]) files = [] for result in results: files.append({'comicfilename': result['ComicFilename'], @@ -3666,7 +3680,7 @@ class WebInterface(object): 'filelisting': files, 'srid': SRID} - return serve_template(templatename="importresults_popup.html", title="results", searchtext=ComicName, searchresults=results, imported=imported) + return serve_template(templatename="importresults_popup.html", title="results", searchtext=ComicName, searchresults=searchresults, imported=imported) importresults_popup.exposed = True diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index 9806b7b5..4dc6225c 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -64,7 +64,7 @@ def pullit(forcecheck=None): newpull.newpull() elif mylar.ALT_PULL == 2: logger.info('[PULL-LIST] Populating & Loading pull-list data directly from alternate website') - chk_locg = locg.locg(pulldate) + chk_locg = locg.locg('00000000') #setting this to 00000000 will do a Recreate on every call instead of a Refresh if chk_locg['status'] == 'up2date': logger.info('[PULL-LIST] Pull-list is already up-to-date with ' + str(chk_locg['count']) + 'issues. Polling watchlist against it to see if anything is new.') mylar.PULLNEW = 'no'
@@ -155,9 +155,12 @@
Export -

+
+
+
Additional Options
Reading List Management
diff --git a/data/interfaces/default/searchresults.html b/data/interfaces/default/searchresults.html index c665e28c..c6760abb 100755 --- a/data/interfaces/default/searchresults.html +++ b/data/interfaces/default/searchresults.html @@ -67,15 +67,16 @@ %for result in searchresults: <% if result['comicyear'] == '2016': - grade = 'A' + grade = 'A' else: - grade = 'Z' + grade = 'Z' if result['haveit'] != "No": - grade = 'H'; - if result['type'] == 'Digital': + grade = 'H'; + + rtype = None + if type != 'story_arc': + if result['type'] == 'Digital': rtype = '[Digital]' - else: - rtype = None %>
${item['SpanYears']} ${css}
${item['Have']}/${item['Total']}
- - - + %if item['CV_ArcID']: %endif diff --git a/data/interfaces/default/upcoming.html b/data/interfaces/default/upcoming.html index 235c3aaa..c7262117 100755 --- a/data/interfaces/default/upcoming.html +++ b/data/interfaces/default/upcoming.html @@ -149,7 +149,7 @@ %for f_nodata in future_nodata_upcoming:
- + ${f_nodata['ComicName']}