diff --git a/data/interfaces/default/comicdetails.html b/data/interfaces/default/comicdetails.html index 97647266..deac430a 100755 --- a/data/interfaces/default/comicdetails.html +++ b/data/interfaces/default/comicdetails.html @@ -1,7 +1,7 @@ <%inherit file="base.html"/> <%! - import os - from mylar import db + import os, re + from mylar import db, helpers import mylar %> @@ -112,10 +112,41 @@ if comic['Type'] == 'None' or comic['Type'] is None or comic['Type'] == 'Print': comictype = 'Print' else: - comictype = 'Digital' + comictype = comic['Type'] %>
- +
+ + + diff --git a/data/interfaces/default/storyarc_detail.poster.html b/data/interfaces/default/storyarc_detail.poster.html index 3bbe9955..3b88cf53 100755 --- a/data/interfaces/default/storyarc_detail.poster.html +++ b/data/interfaces/default/storyarc_detail.poster.html @@ -34,10 +34,8 @@ %if weekly['AUTOWANT']: diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index d0b2481c..a08d9b98 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -569,11 +569,28 @@ class PostProcessor(object): for isc in issuechk: datematch = "True" if isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00': - monthval = isc['ReleaseDate'] - watch_issueyear = isc['ReleaseDate'][:4] + try: + if isc['DigitalDate'] != '0000-00-00' and int(re.sub('-', '', isc['DigitalDate']).strip()) <= int(re.sub('-', '', isc['ReleaseDate']).strip()): + monthval = isc['DigitalDate'] + watch_issueyear = isc['DigitalDate'][:4] + else: + monthval = isc['ReleaseDate'] + watch_issueyear = isc['ReleaseDate'][:4] + except: + monthval = isc['ReleaseDate'] + watch_issueyear = isc['ReleaseDate'][:4] + else: - monthval = isc['IssueDate'] - watch_issueyear = isc['IssueDate'][:4] + try: + if isc['DigitalDate'] != '0000-00-00' and int(re.sub('-', '', isc['DigitalDate']).strip()) <= int(re.sub('-', '', isc['ReleaseDate']).strip()): + monthval = isc['DigitalDate'] + watch_issueyear = isc['DigitalDate'][:4] + else: + monthval = isc['IssueDate'] + watch_issueyear = isc['IssueDate'][:4] + except: + monthval = isc['IssueDate'] + watch_issueyear = isc['IssueDate'][:4] if len(watchmatch) >= 1 and watchmatch['issue_year'] is not None: #if the # of matches is more than 1, we need to make sure we get the right series @@ -587,7 +604,12 @@ class PostProcessor(object): #logger.info(module + ' ReleaseDate: ' + str(isc['ReleaseDate'])) #logger.info(module + ' IssueDate: ' + str(isc['IssueDate'])) - if isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00': + if isc['DigitalDate'] is not None and isc['DigitalDate'] != '0000-00-00': + if int(isc['DigitalDate'][:4]) < int(watchmatch['issue_year']): + logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['DigitalDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename') + datematch = "False" + + elif isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00': if int(isc['ReleaseDate'][:4]) < int(watchmatch['issue_year']): logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['ReleaseDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename') datematch = "False" @@ -1285,9 +1307,13 @@ class PostProcessor(object): #loop through the hits here. if len(manual_list) == 0 and len(manual_arclist) == 0: logger.info(module + ' No matches for Manual Run ... exiting.') + if mylar.APILOCK is True: + mylar.APILOCK = False return elif len(manual_arclist) > 0 and len(manual_list) == 0: logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.') + if mylar.APILOCK is True: + mylar.APILOCK = False return elif len(manual_arclist) > 0: logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.') diff --git a/mylar/__init__.py b/mylar/__init__.py index c16726e4..910fa211 100644 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -482,21 +482,21 @@ def dbcheck(): c.execute('SELECT ReleaseDate from storyarcs') except sqlite3.OperationalError: try: - c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)') c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist') c.execute('DROP TABLE readinglist') except sqlite3.OperationalError: logger.warn('Unable to update readinglist table to new storyarc table format.') c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT, TorrentID_32P TEXT, LatestIssueID TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT, ImageURL TEXT, ImageURL_ALT TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, DigitalDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT, ImageURL TEXT, ImageURL_ALT TEXT)') c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT, Hash TEXT, crc TEXT)') c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT, PROVIDER TEXT, ID TEXT, AltNZBName TEXT, OneOff TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)') + c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, format TEXT, rowid INTEGER PRIMARY KEY)') c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT, Volume TEXT, IssueNumber TEXT, DynamicName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT, StatusChange TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT, DateAdded TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, DigitalDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT, DateAdded TEXT)') c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)') c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)') @@ -505,6 +505,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)') conn.commit c.close @@ -619,6 +620,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE comics ADD COLUMN LatestIssueID TEXT') + try: + c.execute('SELECT Collects from comics') + except sqlite3.OperationalError: + c.execute('ALTER TABLE comics ADD COLUMN Collects CLOB') + try: c.execute('SELECT DynamicComicName from comics') if CONFIG.DYNAMIC_UPDATE < 3: @@ -661,6 +667,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE issues ADD COLUMN ImageURL_ALT TEXT') + try: + c.execute('SELECT DigitalDate from issues') + except sqlite3.OperationalError: + c.execute('ALTER TABLE issues ADD COLUMN DigitalDate TEXT') + ## -- ImportResults Table -- try: @@ -812,6 +823,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE weekly ADD COLUMN annuallink TEXT') + try: + c.execute('SELECT format from weekly') + except sqlite3.OperationalError: + c.execute('ALTER TABLE weekly ADD COLUMN format TEXT') + ## -- Nzblog Table -- try: @@ -892,6 +908,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE annuals ADD COLUMN DateAdded TEXT') + try: + c.execute('SELECT DigitalDate from annuals') + except sqlite3.OperationalError: + c.execute('ALTER TABLE annuals ADD COLUMN DigitalDate TEXT') + ## -- Snatched Table -- try: @@ -984,6 +1005,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE storyarcs ADD COLUMN DateAdded TEXT') + try: + c.execute('SELECT DigitalDate from storyarcs') + except sqlite3.OperationalError: + c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT') + ## -- searchresults Table -- try: c.execute('SELECT SRID from searchresults') diff --git a/mylar/cv.py b/mylar/cv.py index 7eb86118..a414ce82 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -190,7 +190,7 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co else: tmpidlist += '|' + str(comicidlist[i]) in_cnt +=1 - logger.info('tmpidlist: ' + str(tmpidlist)) + logger.fdebug('tmpidlist: ' + str(tmpidlist)) searched = pulldetails(None, 'import', offset=0, comicidlist=tmpidlist) @@ -287,6 +287,8 @@ def GetComicInfo(comicid, dom, safechk=None): #the description field actually holds the Volume# - so let's grab it try: descchunk = dom.getElementsByTagName('description')[0].firstChild.wholeText + desc_soup = Soup(descchunk, "html.parser") + desclinks = desc_soup.findAll('a') comic_desc = drophtml(descchunk) desdeck +=1 except: @@ -312,26 +314,87 @@ def GetComicInfo(comicid, dom, safechk=None): comic['Aliases'] = 'None' comic['ComicVersion'] = 'None' #noversion' - #logger.info('comic_desc:' + comic_desc) - #logger.info('comic_deck:' + comic_deck) - #logger.info('desdeck: ' + str(desdeck)) #figure out if it's a print / digital edition. comic['Type'] = 'None' if comic_deck != 'None': - if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower()]): + if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'hardcover' in comic_deck.lower()]): if 'print' in comic_deck.lower(): comic['Type'] = 'Print' elif 'digital' in comic_deck.lower(): - comic['Type'] = 'Digital' + comic['Type'] = 'Digital' + elif 'paperback' in comic_deck.lower(): + comic['Type'] = 'TPB' + elif 'hardcover' in comic_deck.lower(): + comic['Type'] = 'HC' + if comic_desc != 'None' and comic['Type'] == 'None': if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower(): comic['Type'] = 'Print' elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower(): comic['Type'] = 'Digital' + elif all(['paperback' in comic_desc[:60].lower(), 'paperback can be found' not in comic_desc.lower()]) or 'collects' in comic_desc[:60].lower(): + comic['Type'] = 'TPB' + elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower(): + comic['Type'] = 'HC' else: comic['Type'] = 'Print' + if all([comic_desc != 'None', 'trade paperback' in comic_desc[:30].lower(), 'collecting' in comic_desc[:40].lower()]): + #ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145. + first_collect = comic_desc.lower().find('collecting') + #logger.info('first_collect: %s' % first_collect) + #logger.info('comic_desc: %s' % comic_desc) + #logger.info('desclinks: %s' % desclinks) + issue_list = [] + for fc in desclinks: + #logger.info('fc: %s' % fc) + fc_id = fc['data-ref-id'] + #logger.info('fc_id: %s' % fc_id) + fc_name = fc.findNext(text=True) + #logger.info('fc_name: %s' % fc_name) + if fc_id.startswith('4000'): + fc_cid = None + fc_isid = fc_id + iss_start = fc_name.find('#') + issuerun = fc_name[iss_start:].strip() + fc_name = fc_name[:iss_start].strip() + elif fc_id.startswith('4050'): + fc_cid = fc_id + fc_isid = None + issuerun = fc.next_sibling + lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ') + if len(lines) > 0: + for x in sorted(lines, reverse=True): + srchline = issuerun.rfind(x) + if srchline != -1: + try: + if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ': + issuerun = issuerun[:srchline+len(x)] + break + except: + continue + if issuerun.endswith('.') or issuerun.endswith(','): + #logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1])) + issuerun = issuerun[:-1] + if issuerun.endswith(' and '): + issuerun = issuerun[:-4].strip() + elif issuerun.endswith(' and'): + issuerun = issuerun[:-3].strip() + + # except: + # pass + issue_list.append({'series': fc_name, + 'comicid': fc_cid, + 'issueid': fc_isid, + 'issues': issuerun}) + #first_collect = cis + + logger.info('Collected issues in volume: %s' % issue_list) + comic['Issue_List'] = issue_list + else: + comic['Issue_List'] = 'None' + while (desdeck > 0): if desdeck == 1: if comic_desc == 'None': @@ -412,19 +475,7 @@ def GetComicInfo(comicid, dom, safechk=None): comic['FirstIssueID'] = dom.getElementsByTagName('id')[0].firstChild.wholeText -# print ("fistIss:" + str(comic['FirstIssueID'])) -# comicchoice.append({ -# 'ComicName': comic['ComicName'], -# 'ComicYear': comic['ComicYear'], -# 'Comicid': comicid, -# 'ComicURL': comic['ComicURL'], -# 'ComicIssues': comic['ComicIssues'], -# 'ComicImage': comic['ComicImage'], -# 'ComicVolume': ParseVol, -# 'ComicPublisher': comic['ComicPublisher'] -# }) - -# comic['comicchoice'] = comicchoice + #logger.info('comic: %s' % comic) return comic def GetIssuesInfo(comicid, dom, arcid=None): @@ -495,6 +546,19 @@ def GetIssuesInfo(comicid, dom, arcid=None): tempissue['StoreDate'] = subtrack.getElementsByTagName('store_date')[0].firstChild.wholeText except: tempissue['StoreDate'] = '0000-00-00' + try: + digital_desc = subtrack.getElementsByTagName('description')[0].firstChild.wholeText + except: + tempissue['DigitalDate'] = '0000-00-00' + else: + tempissue['DigitalDate'] = '0000-00-00' + if all(['digital' in digital_desc.lower()[-90:], 'print' in digital_desc.lower()[-90:]]): + #get the digital date of issue here... + mff = mylar.filechecker.FileChecker() + vlddate = mff.checkthedate(digital_desc[-90:], fulldate=True) + #logger.fdebug('vlddate: %s' % vlddate) + if vlddate: + tempissue['DigitalDate'] = vlddate try: tempissue['Issue_Number'] = subtrack.getElementsByTagName('issue_number')[0].firstChild.wholeText except: @@ -517,6 +581,7 @@ def GetIssuesInfo(comicid, dom, arcid=None): 'Issue_Number': tempissue['Issue_Number'], 'Issue_Date': tempissue['CoverDate'], 'Store_Date': tempissue['StoreDate'], + 'Digital_Date': tempissue['DigitalDate'], 'Issue_Name': tempissue['Issue_Name'], 'Image': tempissue['ComicImage'], 'ImageALT': tempissue['ComicImageALT'] @@ -531,6 +596,7 @@ def GetIssuesInfo(comicid, dom, arcid=None): 'Issue_Number': tempissue['Issue_Number'], 'Issue_Date': tempissue['CoverDate'], 'Store_Date': tempissue['StoreDate'], + 'Digital_Date': tempissue['DigitalDate'], 'Issue_Name': tempissue['Issue_Name'] }) @@ -538,6 +604,7 @@ def GetIssuesInfo(comicid, dom, arcid=None): firstdate = tempissue['CoverDate'] n-= 1 + #logger.fdebug('issue_info: %s' % issuech) #issue['firstdate'] = firstdate return issuech, firstdate @@ -817,8 +884,7 @@ def GetImportList(results): return serieslist def drophtml(html): - from bs4 import BeautifulSoup - soup = BeautifulSoup(html, "html.parser") + soup = Soup(html, "html.parser") text_parts = soup.findAll(text=True) #print ''.join(text_parts) diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 32e1a6a5..5d064112 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -1,4 +1,5 @@ #/usr/bin/env python +# -*- coding: utf-8 -*- # This file is part of Mylar. # # Mylar is free software: you can redistribute it and/or modify @@ -325,6 +326,56 @@ class FileChecker(object): #split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(? 0: + logger.fdebug('[MINI-DECIMAL SERIES] MAX ISSUES IN SERIES: %s' % x) + spf.append('(of %s)' % x) + except Exception as e: + logger.error('Exception: %s' % e) + spf.append(x) + + elif x == ')': + pass + elif x == 'p' or x == 'ctc': + try: + if spf[wrdcnt-1].isdigit(): + logger.debug('THIS SHOULD BE : %s%s' % (spf[wrdcnt-1], x)) + newline = '%s%s' % (spf[wrdcnt-1], x) + spf[wrdcnt -1] = newline + #wrdcnt =-1 + elif spf[wrdcnt-1][-1] == 'p' and spf[wrdcnt-1][:-1].isdigit() and x == 'ctc': + logger.fdebug('THIS SHOULD BE : %s%s' % (spf[wrdcnt-1], x)) + newline = '%s%s' % (spf[wrdcnt-1], x) + spf[wrdcnt -1] = newline + #wrdcnt =-1 + except Exception as e: + logger.error('[ERROR] %s' % e) + logger.warn('this should not be passed: %s' % x) + spf.append(x) + else: + spf.append(x) + wrdcnt +=1 + + if len(spf) > 0: + split_file = spf + logger.fdebug('NEWLY SPLIT REORGD: %s' % split_file) + #10-20-2018 ---END + if len(split_file) == 1: logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.') ret_sf1 = re.sub('\-',' ', ret_sf1).strip() @@ -339,7 +390,7 @@ class FileChecker(object): lastissue_label = None lastissue_position = 0 lastmod_position = 0 - + booktype = 'issue' #exceptions that are considered alpha-numeric issue numbers exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU') @@ -416,7 +467,7 @@ class FileChecker(object): count = match.group() found = True - if not found: + if found is False: match = re.search('(?<=\(of\s)\d+(?=\))', sf, re.IGNORECASE) if match: count = match.group() @@ -431,7 +482,7 @@ class FileChecker(object): logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label)) validcountchk = True - if all([lastissue_position == (split_file.index(sf) -1), lastissue_label is not None and '#' not in sf]): + if all([lastissue_position == (split_file.index(sf) -1), lastissue_label is not None, '#' not in sf, sf != 'p']): #find it in the original file to see if there's a decimal between. findst = lastissue_mod_position+1 if findst > len(modfilename): @@ -439,11 +490,17 @@ class FileChecker(object): if modfilename[findst] != '.' or modfilename[findst] != '#': #findst != '.' and findst != '#': if sf.isdigit(): - logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue') - possible_issuenumbers.append({'number': sf, - 'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)}) - 'mod_position': self.char_file_position(modfilename, sf, lastmod_position), - 'validcountchk': validcountchk}) + seper_num = False + for x in datecheck: + if x['position'] == split_file.index(sf, lastissue_position): + seper_num = True + if seper_num is False: + logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue') + + #possible_issuenumbers.append({'number': sf, + # 'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)}) + # 'mod_position': self.char_file_position(modfilename, sf, lastmod_position), + # 'validcountchk': validcountchk}) #used to see if the issue is an alpha-numeric (ie. 18.NOW, 50-X, etc) lastissue_position = split_file.index(sf, lastissue_position) @@ -481,7 +538,6 @@ class FileChecker(object): else: if ('#' in sf or sf.isdigit()) or validcountchk: - logger.fdebug('validated: ' + sf) if validcountchk: #if it's not a decimal but the digits are back-to-back, then it's something else. possible_issuenumbers.append({'number': lastissue_label, @@ -593,11 +649,57 @@ class FileChecker(object): else: raise ValueError except ValueError, e: + #10-20-2018 - to detect issue numbers such as #000.0000½ + if lastissue_label is not None and lastissue_position == int(split_file.index(sf))-1 and sf == 'XCV': + logger.info('this should be: %s%s' % (lastissue_label, sf)) + pi = [] + for x in possible_issuenumbers: + if (x['number'] == lastissue_label and x['position'] == lastissue_position) or (x['number'] == sf and x['position'] == split_file.index(sf, lastissue_position)): + pass + else: + pi.append({'number': x['number'], + 'position': x['position'], + 'mod_position': x['mod_position'], + 'validcountchk': x['validcountchk']}) + + lastissue_label = '%s%s' % (lastissue_label, sf) + pi.append({'number': lastissue_label, + 'position': lastissue_position, + 'mod_position': lastmod_position, + 'validcountchk': validcountchk}) + + if len(pi) > 0: + possible_issuenumbers = pi + + elif sf.lower() == 'of' and lastissue_label is not None and lastissue_position == int(split_file.index(sf))-1: + logger.info('MINI-SERIES DETECTED') + + else: + if any([re.sub('[\(\)]', '', sf.lower()).strip() == 'tpb', re.sub('[\(\)]', '', sf.lower()).strip() == 'digital tpb']): + logger.info('TRADE PAPERBACK DETECTED. NOT DETECTING ISSUE NUMBER - ASSUMING VOLUME') + booktype = 'TPB' + try: + if volume_found['volume'] is not None: + possible_issuenumbers.append({'number': volume_found['volume'], + 'position': volume_found['position'], + 'mod_position': self.char_file_position(modfilename, volume_found['volume'], lastmod_position), + 'validcountchk': validcountchk}) + except: + possible_issuenumbers.append({'number': '1', + 'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)}) + 'mod_position': self.char_file_position(modfilename, sf, lastmod_position), + 'validcountchk': validcountchk}) + + elif any([sf.lower() == 'gn', sf.lower() == 'graphic novel']): + logger.info('GRAPHIC NOVEL DETECTED. NOT DETECTING ISSUE NUMBER - ASSUMING VOLUME') + booktype = 'GN' + else: + logger.info('[%s] Error detecting issue # - ignoring this result : %s' % (e, sf)) + volumeprior = False volumeprior_label = None sep_volume = False pass - #logger.fdebug('Error detecting issue # - ignoring this result : ' + str(sf)) #keep track of where in the original modfilename the positions are in order to check against it for decimal places, etc. file_length += len(sf) + 1 #1 for space @@ -742,13 +844,19 @@ class FileChecker(object): issue_number_position -=1 if issue_number is None: - logger.fdebug('No issue number present in filename.') + if any([booktype == 'TPB', booktype == 'GN']): + logger.info('%s detected. Volume assumption is number: %s' % (booktype, volume_found)) + else: + if len(volume_found) > 0: + logger.info('UNKNOWN TPB/GN detected. Volume assumption is number: %s' % (volume_found)) + else: + logger.info('No issue number present in filename.') else: logger.fdebug('issue verified as : ' + issue_number) issue_volume = None if len(volume_found) > 0: issue_volume = 'v' + str(volume_found['volume']) - if all([highest_series_pos + 1 != volume_found['position'], highest_series_pos != volume_found['position'] + 1, sep_volume == False]): + if all([highest_series_pos + 1 != volume_found['position'], highest_series_pos != volume_found['position'] + 1, sep_volume == False, booktype == 'issue', len(possible_issuenumbers) > 0]): logger.fdebug('Extra item(s) are present between the volume label and the issue number. Checking..') split_file.insert(int(issue_number_position), split_file.pop(volume_found['position'])) #highest_series_pos-1, split_file.pop(volume_found['position'])) logger.fdebug('new split: ' + str(split_file)) @@ -761,10 +869,13 @@ class FileChecker(object): else: highest_series_pos = volume_found['position'] logger.fdebug('Volume detected as : ' + issue_volume) + + if all([len(volume_found) == 0, booktype != 'issue']) or all([len(volume_found) == 0, issue_number_position == len(split_file)]): + issue_volume = 'v1' #at this point it should be in a SERIES ISSUE VOLUME YEAR kind of format #if the position of the issue number is greater than the highest series position, make it the highest series position. - if issue_number_position > highest_series_pos: + if issue_number_position != len(split_file) and issue_number_position > highest_series_pos: if not volume_found: highest_series_pos = issue_number_position else: @@ -807,16 +918,13 @@ class FileChecker(object): tmpval = yearposition - issue_number_position else: tmpval = 1 - #logger.fdebug('TMPVAL: %s' % tmpval) except: pass else: if tmpval > 2: logger.fdebug('There are %s extra words between the issue # and the year position. Deciphering if issue title or part of series title.' % tmpval) tmpval1 = ' '.join(split_file[issue_number_position+1:yearposition]) - #logger.info('%s' % tmpval) if split_file[issue_number_position+1] == '-': - #logger.info('dash detected') usevalue = ' '.join(split_file[issue_number_position+2:yearposition]) splitv = split_file[issue_number_position+2:yearposition] else: @@ -904,8 +1012,15 @@ class FileChecker(object): if splitvalue is not None: logger.fdebug('[SPLITVALUE] possible issue title: %s' % splitvalue) alt_series = '%s %s' % (series_name, splitvalue) - alt_issue = splitvalue + if booktype != 'issue': + if alt_issue is not None: + alt_issue = re.sub('tpb', '', splitvalue, flags=re.I).strip() + if alt_series is not None: + alt_series = re.sub('tpb', '', alt_series, flags=re.I).strip() if alt_series is not None: + if booktype != 'issue': + if alt_series is not None: + alt_series = re.sub('tpb', '', alt_series, flags=re.I).strip() logger.fdebug('Alternate series / issue title: %s [%s]' % (alt_series, alt_issue)) #if the filename is unicoded, it won't match due to the unicode translation. Keep the unicode as well as the decoded. @@ -921,28 +1036,33 @@ class FileChecker(object): series_name = re.sub('special', '', series_name, flags=re.I).strip() series_name_decoded = re.sub('special', '', series_name_decoded, flags=re.I).strip() - if issue_number is None or series_name is None: - logger.fdebug('Cannot parse the filename properly. I\'m going to make note of this filename so that my evil ruler can make it work.') - if series_name is not None: - dreplace = self.dynamic_replace(series_name)['mod_seriesname'] + if (any([issue_number is None, series_name is None]) and booktype == 'issue'): + + if all([issue_number is None, booktype == 'issue', issue_volume is not None]): + logger.info('Possible UKNOWN TPB/GN detected - no issue number present, no clarification in filename, but volume present with series title') else: - dreplace = None - return {'parse_status': 'failure', - 'sub': path_list, - 'comicfilename': filename, - 'comiclocation': self.dir, - 'series_name': series_name, - 'series_name_decoded': series_name_decoded, - 'alt_series': alt_series, - 'alt_issue': alt_issue, - 'dynamic_name': dreplace, - 'issue_number': issue_number, - 'justthedigits': issue_number, #redundant but it's needed atm - 'series_volume': issue_volume, - 'issue_year': issue_year, - 'annual_comicid': None, - 'scangroup': scangroup, - 'reading_order': None} + logger.fdebug('Cannot parse the filename properly. I\'m going to make note of this filename so that my evil ruler can make it work.') + + if series_name is not None: + dreplace = self.dynamic_replace(series_name)['mod_seriesname'] + else: + dreplace = None + return {'parse_status': 'failure', + 'sub': path_list, + 'comicfilename': filename, + 'comiclocation': self.dir, + 'series_name': series_name, + 'series_name_decoded': series_name_decoded, + 'alt_series': alt_series, + 'alt_issue': alt_issue, + 'dynamic_name': dreplace, + 'issue_number': issue_number, + 'justthedigits': issue_number, #redundant but it's needed atm + 'series_volume': issue_volume, + 'issue_year': issue_year, + 'annual_comicid': None, + 'scangroup': scangroup, + 'reading_order': None} if self.justparse: return {'parse_status': 'success', @@ -1323,7 +1443,7 @@ class FileChecker(object): return {'AS_Alt': AS_Alt, 'AS_Tuple': AS_Tuple} - def checkthedate(self, txt): + def checkthedate(self, txt, fulldate=False): # txt='''\ # Jan 19, 1990 # January 19, 1990 @@ -1334,17 +1454,28 @@ class FileChecker(object): # Jan 1990 # January1990''' - fmts = ('%Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','%b %Y','%B%Y','%b %d,%Y','%m-%Y','%B %Y','%Y-%m-%d','%Y-%m','%Y%m') + fmts = ('%Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','(%m/%d/%Y)','%b %Y','%B%Y','%b %d,%Y','%m-%Y','%B %Y','%Y-%m-%d','%Y-%m','%Y%m') parsed=[] - for e in txt.splitlines(): - for fmt in fmts: - try: - t = dt.datetime.strptime(e, fmt) - parsed.append((e, fmt, t)) - break - except ValueError as err: - pass + if fulldate is False: + for e in txt.splitlines(): + for fmt in fmts: + try: + t = dt.datetime.strptime(e, fmt) + parsed.append((e, fmt, t)) + break + except ValueError as err: + pass + else: + for e in txt.split(): + logger.info('word: %s' % e) + for fmt in fmts: + try: + t = dt.datetime.strptime(e, fmt) + parsed.append((e, fmt, t)) + break + except ValueError as err: + pass # check that all the cases are handled success={t[0] for t in parsed} @@ -1354,12 +1485,17 @@ class FileChecker(object): dateyear = None + #logger.info('parsed: %s' % parsed) + for t in parsed: # logger.fdebug('"{:20}" => "{:20}" => {}'.format(*t) - dateyear = t[2].year + if fulldate is False: + dateline = t[2].year + else: + dateline = t[2].strftime('%Y-%m-%d') break - return dateyear + return dateline def validateAndCreateDirectory(dir, create=False, module=None): if module is None: diff --git a/mylar/helpers.py b/mylar/helpers.py index 5c23988f..f0e1dec0 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -1424,7 +1424,8 @@ def havetotals(refreshit=None): "percent": percent, "totalissues": totalissues, "haveissues": haveissues, - "DateAdded": comic['LastUpdated']}) + "DateAdded": comic['LastUpdated'], + "ComicType": comic['Type']}) return comics @@ -1842,17 +1843,22 @@ def listPull(weeknumber, year): library[row['ComicID']] = row['ComicID'] return library -def listLibrary(): +def listLibrary(comicid=None): import db library = {} myDB = db.DBConnection() - list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid group by a.comicid") + if comicid is None: + list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid group by a.comicid") + else: + list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid WHERE a.comicid=? group by a.comicid", [re.sub('4050-', '', comicid).strip()]) + for row in list: library[row['ComicID']] = {'comicid': row['ComicID'], 'status': row['Status']} if row['ReleaseComicID'] is not None: library[row['ReleaseComicID']] = {'comicid': row['ComicID'], 'status': row['Status']} + return library def listStoryArcs(): diff --git a/mylar/importer.py b/mylar/importer.py index d9b9504c..a4b0b4cb 100644 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -22,6 +22,7 @@ import sys import shlex import datetime import re +import json import urllib import urllib2 import shutil @@ -204,17 +205,23 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No publisher = re.sub('!', '', comic['ComicPublisher']) # thanks Boom! publisher = helpers.filesafe(publisher) year = SeriesYear - if comicVol is None: + booktype = comic['Type'] + if booktype == 'Print' or all([comic['Type'] != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]): + chunk_fb = re.sub('\$Type', '', mylar.CONFIG.FOLDER_FORMAT) + chunk_b = re.compile(r'\s+') + chunk_folder_format = chunk_b.sub(' ', chunk_fb) + else: + chunk_folder_format = mylar.CONFIG.FOLDER_FORMAT + + if any([comicVol is None, comic['Type'] != 'Print']): comicVol = 'None' #if comversion is None, remove it so it doesn't populate with 'None' if comicVol == 'None': - chunk_f_f = re.sub('\$VolumeN', '', mylar.CONFIG.FOLDER_FORMAT) + chunk_f_f = re.sub('\$VolumeN', '', chunk_folder_format) chunk_f = re.compile(r'\s+') chunk_folder_format = chunk_f.sub(' ', chunk_f_f) logger.fdebug('No version # found for series, removing from folder format') logger.fdebug("new folder format: " + str(chunk_folder_format)) - else: - chunk_folder_format = mylar.CONFIG.FOLDER_FORMAT #do work to generate folder path @@ -225,12 +232,14 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No '$publisher': publisher.lower(), '$VolumeY': 'V' + str(year), '$VolumeN': comicVol.upper(), - '$Annual': 'Annual' + '$Annual': 'Annual', + '$Type': booktype } try: if mylar.CONFIG.FOLDER_FORMAT == '': comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + SeriesYear + ")") else: + chunk_folder_format = re.sub('[()|[]]', '', chunk_folder_format).strip() comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values)) except Exception as e: if 'TypeError' in e: @@ -308,6 +317,11 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No tmpseriesname = as_dinfo['mod_seriesname'] dynamic_seriesname = re.sub('[\|\s]','', tmpseriesname.lower()).strip() + if comic['Issue_List'] != 'None': + issue_list = json.dumps(comic['Issue_List']) + else: + issue_list = None + controlValueDict = {"ComicID": comicid} newValueDict = {"ComicName": comic['ComicName'], "ComicSortName": sortname, @@ -327,6 +341,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No # "ComicPublished": gcdinfo['resultPublished'], "ComicPublished": "Unknown", "Type": comic['Type'], + "Collects": issue_list, "DateAdded": helpers.today(), "Status": "Loading"} @@ -887,6 +902,7 @@ def issue_collection(issuedata, nostatus): "Issue_Number": issue['Issue_Number'], "IssueDate": issue['IssueDate'], "ReleaseDate": issue['ReleaseDate'], + "DigitalDate": issue['DigitalDate'], "Int_IssueNumber": issue['Int_IssueNumber'], "ImageURL": issue['ImageURL'], "ImageURL_ALT": issue['ImageURL_ALT'] @@ -1005,6 +1021,7 @@ def manualAnnual(manual_comicid=None, comicname=None, comicyear=None, comicid=No 'IssueName': cleanname, 'IssueDate': str(firstval['Issue_Date']), 'ReleaseDate': str(firstval['Store_Date']), + 'DigitalDate': str(firstval['Digital_Date']), 'Status': astatus, 'ReleaseComicName': sr['ComicName']}) n+=1 @@ -1018,6 +1035,7 @@ def manualAnnual(manual_comicid=None, comicname=None, comicyear=None, comicid=No "Int_IssueNumber": helpers.issuedigits(ann['Issue_Number']), "IssueDate": ann['IssueDate'], "ReleaseDate": ann['ReleaseDate'], + "DigitalDate": ann['DigitalDate'], "IssueName": ann['IssueName'], "ComicID": ann['ComicID'], #this is the series ID "ReleaseComicID": ann['ReleaseComicID'], #this is the series ID for the annual(s) @@ -1100,6 +1118,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call issname = cleanname issdate = str(firstval['Issue_Date']) storedate = str(firstval['Store_Date']) + digitaldate = str(firstval['Digital_Date']) int_issnum = None if issnum.isdigit(): int_issnum = int(issnum) * 1000 @@ -1264,6 +1283,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call "Issue_Number": issnum, "IssueDate": issdate, "ReleaseDate": storedate, + "DigitalDate": digitaldate, "Int_IssueNumber": int_issnum, "ImageURL": firstval['Image'], "ImageURL_ALT": firstval['ImageALT']}) @@ -1467,6 +1487,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, annualslis issname = cleanname issdate = str(firstval['Issue_Date']) stdate = str(firstval['Store_Date']) + digdate = str(firstval['Digital_Date']) int_issnum = helpers.issuedigits(issnum) iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone() @@ -1494,6 +1515,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, annualslis "Int_IssueNumber": int_issnum, "IssueDate": issdate, "ReleaseDate": stdate, + "DigitalDate": digdate, "IssueName": issname, "ComicID": comicid, "IssueID": issid, diff --git a/mylar/locg.py b/mylar/locg.py index bf2b6729..bd7b046c 100755 --- a/mylar/locg.py +++ b/mylar/locg.py @@ -83,12 +83,13 @@ def locg(pulldate=None,weeknumber=None,year=None): 'annuallink': x['link'], 'year': x['year'], 'volume': x['volume'], - 'seriesyear': x['seriesyear']}) + 'seriesyear': x['seriesyear'], + 'format': x['type']}) shipdate = x['shipdate'] myDB = db.DBConnection() - myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, rowid INTEGER PRIMARY KEY)") + myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)") #clear out the upcoming table here so they show the new values properly. if pulldate == '00000000': @@ -123,7 +124,8 @@ def locg(pulldate=None,weeknumber=None,year=None): 'ANNUALLINK': x['annuallink'], 'YEAR': x['year'], 'VOLUME': x['volume'], - 'SERIESYEAR': x['seriesyear']} + 'SERIESYEAR': x['seriesyear'], + 'FORMAT': x['format']} myDB.upsert("weekly", newValueDict, controlValueDict) logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber)) diff --git a/mylar/mb.py b/mylar/mb.py index 34d34a44..b58f48e2 100755 --- a/mylar/mb.py +++ b/mylar/mb.py @@ -373,7 +373,7 @@ def findComic(name, mode, issue, limityear=None, type=None): xmltype = 'Print' elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower(): xmltype = 'Digital' - elif 'paperback' in xmldesc[:60].lower() and 'paperback can be found' not in xmldesc.lower(): + elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc.lower(): xmltype = 'TPB' elif 'hardcover' in xmldesc[:60].lower() and 'hardcover can be found' not in xmldesc.lower(): xmltype = 'HC' @@ -458,8 +458,8 @@ def storyarcinfo(xmlid): try: logger.fdebug('story_arc ascension') - issuecount = len( arcdom.getElementsByTagName('issue') ) issuedom = arcdom.getElementsByTagName('issue') + issuecount = len( issuedom ) #arcdom.getElementsByTagName('issue') ) isc = 0 arclist = '' ordernum = 1 diff --git a/mylar/search.py b/mylar/search.py index 802f8cff..8cad3bd6 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -38,7 +38,7 @@ from base64 import b16encode, b32decode from operator import itemgetter from wsgiref.handlers import format_date_time -def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None, allow_packs=None, oneoff=False, manual=False, torrentid_32p=None): +def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None, allow_packs=None, oneoff=False, manual=False, torrentid_32p=None, digitaldate=None): mylar.COMICINFO = [] unaltered_ComicName = None @@ -313,7 +313,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD prov_count+=1 continue if searchmode == 'rss': - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host) + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate) if findit['status'] is False: if AlternateSearch is not None and AlternateSearch != "None": chkthealt = AlternateSearch.split('##') @@ -323,7 +323,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD for calt in chkthealt: AS_Alternate = re.sub('##', '', calt) logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate)) - findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate) if findit['status'] is True: break if findit['status'] is True: @@ -333,7 +333,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD break else: - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p) + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate) if all([searchprov == '32P', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]): checked_once = True if findit['status'] is False: @@ -345,7 +345,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD for calt in chkthealt: AS_Alternate = re.sub('##', '', calt) logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate)) - findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate) if findit['status'] is True: break if findit['status'] is True: @@ -409,7 +409,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD return findit, 'None' -def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None, oneoff=False, cmloopit=None, manual=False, torznab_host=None, torrentid_32p=None): +def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None, oneoff=False, cmloopit=None, manual=False, torznab_host=None, torrentid_32p=None, digitaldate=None): if any([allow_packs is None, allow_packs == 'None', allow_packs == 0, allow_packs == '0']) and all([mylar.CONFIG.ENABLE_TORRENT_SEARCH, mylar.CONFIG.ENABLE_32P]): allow_packs = False @@ -927,8 +927,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa continue else: stdate = IssueDate + logger.fdebug('issue date used is : %s' % stdate) else: stdate = StoreDate + logger.fdebug('store date used is : %s' % stdate) + logger.fdebug('date used is : %s' % stdate) postdate_int = None if nzbprov == '32P' and RSS == 'no': @@ -959,42 +962,70 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa logger.warn('Unable to parse posting date from provider result set for :' + entry['title']) continue - #convert it to a Thu, 06 Feb 2014 00:00:00 format - issue_converted = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d') - issue_convert = issue_converted + datetime.timedelta(days=-1) - # to get past different locale's os-dependent dates, let's convert it to a generic datetime format - try: - stamp = time.mktime(issue_convert.timetuple()) - issconv = format_date_time(stamp) - except OverflowError: - logger.fdebug('Error attempting to convert the timestamp into a generic format. Probably due to the epoch limiation.') - issconv = issue_convert.strftime('%a, %d %b %Y %H:%M:%S') - #convert it to a tuple - econv = email.utils.parsedate_tz(issconv) - econv2 = datetime.datetime(*econv[:6]) - #convert it to a numeric and drop the GMT/Timezone - try: - issuedate_int = time.mktime(econv[:len(econv) -1]) - except OverflowError: - logger.fdebug('Unable to convert timestamp to integer format. Forcing things through.') - isyear = econv[1] - epochyr = '1970' - if int(isyear) <= int(epochyr): - tm = datetime.datetime(1970, 1, 1) - issuedate_int = int(time.mktime(tm.timetuple())) + if all([digitaldate != '0000-00-00', digitaldate is not None]): + i = 0 + else: + digitaldate_int = '00000000' + i = 1 + + while i <= 1: + logger.info('i: %s' % i) + if i == 0: + usedate = digitaldate else: - continue + usedate = stdate + logger.fdebug('usedate: %s' % usedate) + #convert it to a Thu, 06 Feb 2014 00:00:00 format + issue_converted = datetime.datetime.strptime(usedate.rstrip(), '%Y-%m-%d') + issue_convert = issue_converted + datetime.timedelta(days=-1) + # to get past different locale's os-dependent dates, let's convert it to a generic datetime format + try: + stamp = time.mktime(issue_convert.timetuple()) + issconv = format_date_time(stamp) + except OverflowError: + logger.fdebug('Error attempting to convert the timestamp into a generic format. Probably due to the epoch limiation.') + issconv = issue_convert.strftime('%a, %d %b %Y %H:%M:%S') + #convert it to a tuple + econv = email.utils.parsedate_tz(issconv) + econv2 = datetime.datetime(*econv[:6]) + #convert it to a numeric and drop the GMT/Timezone + try: + usedate_int = time.mktime(econv[:len(econv) -1]) + except OverflowError: + logger.fdebug('Unable to convert timestamp to integer format. Forcing things through.') + isyear = econv[1] + epochyr = '1970' + if int(isyear) <= int(epochyr): + tm = datetime.datetime(1970, 1, 1) + usedate_int = int(time.mktime(tm.timetuple())) + else: + continue + if i == 0: + digitaldate_int = usedate_int + digconv2 = econv2 + else: + issuedate_int = usedate_int + issconv2 = econv2 + i+=1 + try: #try new method to get around issues populating in a diff timezone thereby putting them in a different day. - if dateconv2.date() < econv2.date(): - logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), econv2.date())) + #logger.info('digitaldate: %s' % digitaldate) + #logger.info('dateconv2: %s' % dateconv2.date()) + #logger.info('digconv2: %s' % digconv2.date()) + if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date(): + logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) + elif dateconv2.date() < issconv2.date(): + logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date())) logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') continue else: logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) except: #if the above fails, drop down to the integer compare method as a failsafe. - if postdate_int < issuedate_int: + if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int: + logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) + elif postdate_int < issuedate_int: logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int)) logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') continue @@ -1311,10 +1342,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa #need to convert dates to just be yyyy-mm-dd and do comparison, time operator in the below calc as well which probably throws off$ F_ComicVersion = '1' if postdate_int is not None: - if postdate_int >= issuedate_int and nzbprov == '32P': + if digitaldate != '0000-00-00' and all([postdate_int >= digitaldate_int, nzbprov == '32P']): + logger.fdebug('32P torrent discovery. Posting date (' + str(pubdate) + ') is after DIGITAL store date (' + str(digitaldate) + '), forcing volume label to be the same as series label (0-Day Enforcement): v' + str(F_ComicVersion) + ' --> v' + str(S_ComicVersion)) + F_ComicVersion = D_ComicVersion + elif all([postdate_int >= issuedate_int, nzbprov == '32P']): logger.fdebug('32P torrent discovery. Posting date (' + str(pubdate) + ') is after store date (' + str(stdate) + '), forcing volume label to be the same as series label (0-Day Enforcement): v' + str(F_ComicVersion) + ' --> v' + str(S_ComicVersion)) F_ComicVersion = D_ComicVersion - + else: + pass logger.fdebug("FCVersion: " + str(F_ComicVersion)) logger.fdebug("DCVersion: " + str(D_ComicVersion)) logger.fdebug("SCVersion: " + str(S_ComicVersion)) @@ -1914,6 +1949,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): 'Issue_Number': iss['Issue_Number'], 'IssueDate': iss['IssueDate'], 'StoreDate': iss['ReleaseDate'], + 'DigitalDate': iss['DigitalDate'], 'SARC': None, 'StoryArcID': None, 'IssueArcID': None, @@ -1933,6 +1969,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): 'Issue_Number': iss['IssueNumber'], 'IssueDate': iss['IssueDate'], 'StoreDate': iss['ReleaseDate'], + 'DigitalDate': iss['DigitalDate'], 'SARC': iss['StoryArc'], 'StoryArcID': iss['StoryArcID'], 'IssueArcID': iss['IssueArcID'], @@ -1952,6 +1989,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): 'Issue_Number': iss['Issue_Number'], 'IssueDate': iss['IssueDate'], 'StoreDate': iss['ReleaseDate'], #need to replace with Store date + 'DigitalDate': iss['DigitalDate'], 'SARC': None, 'StoryArcID': None, 'IssueArcID': None, @@ -1991,7 +2029,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): else: storyarc_watchlist = True if result['StoreDate'] == '0000-00-00' or result['StoreDate'] is None: - if result['IssueDate'] is None or result['IssueDate'] == '0000-00-00': + if any([result['IssueDate'] is None, result['IssueDate'] == '0000-00-00']) and result['DigitalDate'] == '0000-00-00': logger.fdebug('ComicID: ' + str(result['ComicID']) + ' has invalid Date data. Skipping searching for this series.') continue @@ -2018,6 +2056,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): IssueDate = result['IssueDate'] StoreDate = result['StoreDate'] + DigitalDate = result['DigitalDate'] if result['IssueDate'] is None: ComicYear = SeriesYear @@ -2046,7 +2085,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): mode = result['mode'] - foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), SeriesYear, Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=result['SARC'], IssueArcID=result['IssueArcID'], mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=Comicname_filesafe, allow_packs=AllowPacks, oneoff=OneOff, torrentid_32p=TorrentID_32p) + foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), SeriesYear, Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=result['SARC'], IssueArcID=result['IssueArcID'], mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=Comicname_filesafe, allow_packs=AllowPacks, oneoff=OneOff, torrentid_32p=TorrentID_32p, digitaldate=DigitalDate) if foundNZB['status'] is True: updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov, SARC=result['SARC'], IssueArcID=result['IssueArcID'], hash=foundNZB['info']['t_hash']) @@ -2093,6 +2132,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): actissueid = None IssueDate = result['IssueDate'] StoreDate = result['ReleaseDate'] + DigitalDate = result['DigitalDate'] TorrentID_32p = None elif mode == 'pullwant': ComicName = result['COMIC'] @@ -2109,6 +2149,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): TorrentID_32p = None IssueDate = result['SHIPDATE'] StoreDate = IssueDate + DigitalDate = '0000-00-00' else: comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() if mode == 'want_ann': @@ -2126,6 +2167,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): ComicVersion = comic['ComicVersion'] IssueDate = result['IssueDate'] StoreDate = result['ReleaseDate'] + DigitalDate = result['DigitalDate'] SARC = None IssueArcID = None actissueid = issueid @@ -2138,7 +2180,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): else: IssueYear = str(IssueDate)[:4] - foundNZB, prov = search_init(ComicName, IssueNumber, str(IssueYear), SeriesYear, Publisher, IssueDate, StoreDate, actissueid, AlternateSearch, UseFuzzy, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, mode=mode, rsscheck=rsscheck, ComicID=ComicID, filesafe=Comicname_filesafe, allow_packs=allow_packs, oneoff=oneoff, manual=manual, torrentid_32p=TorrentID_32p) + foundNZB, prov = search_init(ComicName, IssueNumber, str(IssueYear), SeriesYear, Publisher, IssueDate, StoreDate, actissueid, AlternateSearch, UseFuzzy, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, mode=mode, rsscheck=rsscheck, ComicID=ComicID, filesafe=Comicname_filesafe, allow_packs=allow_packs, oneoff=oneoff, manual=manual, torrentid_32p=TorrentID_32p, digitaldate=DigitalDate) if manual is True: return foundNZB if foundNZB['status'] is True: @@ -2192,7 +2234,7 @@ def searchIssueIDList(issuelist): else: AllowPacks = False - foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p) + foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate']) if foundNZB['status'] is True: updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash']) logger.info('Completed search request.') diff --git a/mylar/webserve.py b/mylar/webserve.py index 08c878f8..720ede53 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -26,6 +26,7 @@ from datetime import timedelta, date import re import json import copy +import ntpath from mako.template import Template from mako.lookup import TemplateLookup @@ -179,6 +180,13 @@ class WebInterface(object): else: comicImage = comic['ComicImage'] comicpublisher = helpers.publisherImages(comic['ComicPublisher']) + + if comic['Collects'] is not None: + issues_list = json.loads(comic['Collects']) + else: + issues_list = None + #logger.info('issues_list: %s' % issues_list) + comicConfig = { "fuzzy_year0": helpers.radio(int(usethefuzzy), 0), "fuzzy_year1": helpers.radio(int(usethefuzzy), 1), @@ -196,6 +204,7 @@ class WebInterface(object): "publisher_image_alt": comicpublisher['publisher_image_alt'], "publisher_imageH": comicpublisher['publisher_imageH'], "publisher_imageW": comicpublisher['publisher_imageW'], + "issue_list": issues_list, "ComicImage": comicImage + '?' + datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S') } @@ -223,6 +232,7 @@ class WebInterface(object): "Int_IssueNumber": ann['Int_IssueNumber'], "IssueName": issuename, "IssueDate": ann['IssueDate'], + "DigitalDate": ann['DigitalDate'], "Status": ann['Status'], "Location": ann['Location'], "ComicID": ann['ComicID'], @@ -574,6 +584,7 @@ class WebInterface(object): st_issueid = str(storyarcid) + "_" + str(random.randint(1000,9999)) issnum = arcval['Issue_Number'] issdate = str(arcval['Issue_Date']) + digitaldate = str(arcval['Digital_Date']) storedate = str(arcval['Store_Date']) int_issnum = helpers.issuedigits(issnum) @@ -603,6 +614,7 @@ class WebInterface(object): "Issue_Number": issnum, "IssueDate": issdate, "ReleaseDate": storedate, + "DigitalDate": digitaldate, "ReadingOrder": readingorder, #n +1, "Int_IssueNumber": int_issnum, "Manual": manual_mod}) @@ -644,7 +656,8 @@ class WebInterface(object): "TotalIssues": storyarcissues, "ReadingOrder": AD['ReadingOrder'], "IssueDate": AD['IssueDate'], - "ReleaseDate": AD['ReleaseDate'], + "ReleaseDate": AD['ReleaseDate'], + "DigitalDate": AD['DigitalDate'], "SeriesYear": seriesYear, "IssuePublisher": issuePublisher, "CV_ArcID": arcid, @@ -657,8 +670,10 @@ class WebInterface(object): logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.') self.ArcWatchlist(storyarcid) if arcrefresh: + logger.info('%s Successfully Refreshed %s' % (module, storyarcname)) return else: + logger.info('%s Successfully Added %s' % (module, storyarcname)) raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname)) addStoryArc.exposed = True @@ -898,6 +913,8 @@ class WebInterface(object): if comic['ComicName'] is None: ComicName = "None" else: ComicName = comic['ComicName'] seriesdir = comic['ComicLocation'] + seriesyear = comic['ComicYear'] + seriesvol = comic['ComicVersion'] logger.info(u"Deleting all traces of Comic: " + ComicName) myDB.action('DELETE from comics WHERE ComicID=?', [ComicID]) myDB.action('DELETE from issues WHERE ComicID=?', [ComicID]) @@ -912,10 +929,12 @@ class WebInterface(object): shutil.rmtree(seriesdir) except: logger.warn('Unable to remove directory after removing series from Mylar.') + else: + logger.info('Successfully removed directory: %s' % (seriesdir)) else: logger.warn('Unable to remove directory as it does not exist in : ' + seriesdir) myDB.action('DELETE from readlist WHERE ComicID=?', [ComicID]) - + logger.info('Successful deletion of %s %s (%s) from your watchlist' % (ComicName, seriesvol, seriesyear)) helpers.ComicSort(sequence='update') raise cherrypy.HTTPRedirect("home") deleteSeries.exposed = True @@ -1682,7 +1701,8 @@ class WebInterface(object): "HAVEIT": haveit, "LINK": linkit, "HASH": None, - "AUTOWANT": False + "AUTOWANT": False, + "FORMAT": weekly['format'] }) else: if any(x['ComicName'].lower() == weekly['COMIC'].lower() for x in autowant): @@ -1698,7 +1718,8 @@ class WebInterface(object): "HAVEIT": haveit, "LINK": linkit, "HASH": None, - "AUTOWANT": True + "AUTOWANT": True, + "FORMAT": weekly['format'] }) else: weeklyresults.append({ @@ -1713,7 +1734,8 @@ class WebInterface(object): "HAVEIT": haveit, "LINK": linkit, "HASH": None, - "AUTOWANT": False + "AUTOWANT": False, + "FORMAT": weekly['format'] }) if tmp_status == 'Wanted': @@ -2817,6 +2839,9 @@ class WebInterface(object): logger.fdebug('[%s] Issue to renumber sequence from : %s' % (issuearcid, valid_readingorder)) reading_seq = 1 for rc in sorted(readchk, key=itemgetter('ReadingOrder'), reverse=False): + filename = None + if rc['Location'] is not None: + filename = ntpath.basename(rc['Location']) if str(issuearcid) == str(rc['IssueArcID']): logger.fdebug('new order sequence detected at #: %s' % valid_readingorder) if valid_readingorder > int(rc['ReadingOrder']): @@ -2839,10 +2864,8 @@ class WebInterface(object): else: #valid_readingorder if valid_readingorder < old_reading_seq: - logger.info('2') reading_seq = int(rc['ReadingOrder']) else: - logger.info('3') reading_seq = oldreading_seq +1 logger.fdebug('old sequence discovered at %s to %s' % (oldreading_seq, reading_seq)) oldreading_seq = None @@ -2855,7 +2878,8 @@ class WebInterface(object): logger.fdebug('reordering existing sequence as lower sequence has changed. Altering from %s to %s' % (rc['ReadingOrder'], reading_seq)) new_readorder.append({'IssueArcID': IssueArcID, 'IssueID': issueid, - 'ReadingOrder': reading_seq}) + 'ReadingOrder': reading_seq, + 'filename': filename}) #we resequence in the following way: # everything before the new reading number stays the same @@ -2866,6 +2890,14 @@ class WebInterface(object): #newrl = 0 for rl in sorted(new_readorder, key=itemgetter('ReadingOrder'), reverse=False): + if rl['filename'] is not None: + try: + if int(rl['ReadingOrder']) != int(rl['filename'][:rl['filename'].find('-')]) and mylar.CONFIG.READ2FILENAME is True: + logger.fdebug('Order-Change: %s TO %s' % (int(rl['filename'][:rl['filename'].find('-')]), int(rl['ReadingOrder']))) + logger.fdebug('%s to %s' % (rl['filename'], helpers.renamefile_readingorder(rl['ReadingOrder']) + '-' + rl['filename'][rl['filename'].find('-')+1:])) + except: + pass + rl_ctrl = {"IssueID": rl['IssueID'], "IssueArcID": rl['IssueArcID'], "StoryArcID": storyarcid} @@ -3156,7 +3188,8 @@ class WebInterface(object): logger.info("No Story Arcs to search") else: #cycle through the story arcs here for matches on the watchlist - arcdir = helpers.filesafe(ArcWatch[0]['StoryArc']) + arcname = ArcWatch[0]['StoryArc'] + arcdir = helpers.filesafe(arcname) arcpub = ArcWatch[0]['Publisher'] if arcpub is None: arcpub = ArcWatch[0]['IssuePublisher'] @@ -3441,6 +3474,8 @@ class WebInterface(object): myDB.upsert("storyarcs", newVal, ctrlVal) logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as " + issue['Status']) + arcstats = self.storyarc_main(StoryArcID) + logger.info('[STORY-ARCS] Completed Missing/Recheck Files for %s [%s / %s]' % (arcname, arcstats['Have'], arcstats['TotalIssues'])) return ArcWatchlist.exposed = True diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index bd2cb389..29e50f9b 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -46,7 +46,7 @@ def pullit(forcecheck=None, weeknumber=None, year=None): except (sqlite3.OperationalError, TypeError), msg: logger.info(u"Error Retrieving weekly pull list - attempting to adjust") myDB.action("DROP TABLE weekly") - myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)") + myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, format TEXT, rowid INTEGER PRIMARY KEY)") pulldate = '00000000' logger.fdebug(u"Table re-created, trying to populate") else: @@ -440,7 +440,7 @@ def pullit(forcecheck=None, weeknumber=None, year=None): logger.info(u"Populating the NEW Weekly Pull list into Mylar for week " + str(weeknumber)) myDB.action("drop table if exists weekly") - myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)") + myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, format TEXT, rowid INTEGER PRIMARY KEY)") csvfile = open(newfl, "rb") creader = csv.reader(csvfile, delimiter='\t') diff --git a/mylar/wwt.py b/mylar/wwt.py index fbbeecd4..df95f928 100755 --- a/mylar/wwt.py +++ b/mylar/wwt.py @@ -14,7 +14,7 @@ # You should have received a copy of the GNU General Public License # along with Mylar. If not, see . -import lib.requests as requests +import requests from bs4 import BeautifulSoup, UnicodeDammit import urlparse import re
-
+
-
-
<% if arcdetail['percent'] == 101: css = '
' @@ -48,9 +46,11 @@ %>
${css}
${arcdetail['Have']}/${arcdetail['Total']}
+ +
-
+

${storyarcname}

(${spanyears})
@@ -81,7 +81,7 @@
<% - optpos = '
' + optpos = '
' %> ${optpos}
diff --git a/data/interfaces/default/weeklypull.html b/data/interfaces/default/weeklypull.html index 40415408..54a0cf71 100755 --- a/data/interfaces/default/weeklypull.html +++ b/data/interfaces/default/weeklypull.html @@ -129,6 +129,10 @@ %if weekly['SERIESYEAR'] is not None:  (${weekly['SERIESYEAR']}) %endif + + %if weekly['FORMAT'] == 'Digital': +  [${weekly['FORMAT']}] + %endif
${weekly['ISSUE']}