From 44a92cacec2f864d4029030727e48e8fb00b3722 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 28 Feb 2017 10:59:19 -0500 Subject: [PATCH 01/15] FIX:(#1578) Uncaught exceptions on manual search with torrents, FIX:(#1399) RSSCheck spamming the console, FIX: Numerical Sorting not working correctly on home page and comic details pages --- data/interfaces/default/comicdetails.html | 163 ++++++---------------- data/interfaces/default/index.html | 46 +++--- mylar/search.py | 27 ++-- 3 files changed, 82 insertions(+), 154 deletions(-) diff --git a/data/interfaces/default/comicdetails.html b/data/interfaces/default/comicdetails.html index 20ab0e3f..c6922de8 100644 --- a/data/interfaces/default/comicdetails.html +++ b/data/interfaces/default/comicdetails.html @@ -70,8 +70,7 @@
@@ -193,85 +192,7 @@
-
- - - - - -
-
-
- -
-
-
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %elif comic['ComicPublisher'] == 'Image': - Image - %elif comic['ComicPublisher'] == 'Dark Horse Comics': - Darkhorse - %elif comic['ComicPublisher'] == 'IDW Publishing': - IDW - %endif -
-
- -
-
- -
-
- -
-
- -
-
- -
- <% - if comic['UseFuzzy'] == "0" or comic['UseFuzzy'] is None: - fuzzy = "None" - fuzzy_year = "0" - elif comic['UseFuzzy'] == "1": - fuzzy = "Remove Year" - fuzzy_year = "1" - elif comic['UseFuzzy'] == "2": - fuzzy = "Fuzzy Year" - fuzzy_year = "2" - - %> - -
- -
- -
-
-
-
+
@@ -530,7 +451,7 @@ - +
@@ -841,49 +762,50 @@ initActions(); $('#issue_table').dataTable( { - "bDestroy": true, - "aoColumnDefs": [ - { 'bSortable': false, 'aTargets': [ 0, 3 ] }, - { 'bVisible': false, 'aTargets': [1] }, - { 'sType': 'numeric', 'aTargets': [1] }, - { 'columns.orderData': [1], 'aTargets': [2] } + "destroy": true, + "columnDefs": [ + { "orderable": false, "targets": [0, 6] }, + { "visible": false, "targets": 1 }, + { "orderData": 1, "targets": 2 }, + { "type": 'num', "targets": 1 }, + { "order": [[1, 'desc']] } ], - "aLengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]], - "oLanguage": { - "sLengthMenu":"Show _MENU_ issues per page", - "sEmptyTable": "No issue information available", - "sInfo":"Showing _TOTAL_ issues", - "sInfoEmpty":"Showing 0 to 0 of 0 issues", - "sInfoFiltered":"(filtered from _MAX_ total issues)", - "sSearch": ""}, - "bStateSave": true, - "iDisplayLength": 25, - "sPaginationType": "full_numbers", - "aaSorting": [[1, 'desc'],[4,'desc']] + "lengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]], + "language": { + "lengthMenu":"Show _MENU_ issues per page", + "emptyTable": "No issue information available", + "info":"Showing _TOTAL_ issues", + "infoEmpty":"Showing 0 to 0 of 0 issues", + "infoFiltered":"(filtered from _MAX_ total issues)", + "search": ""}, + "stateSave": true, + "searching": true, + "pageLength": 25, + "pagingType": "full_numbers" }); $('#annual_table').dataTable( { - "bDestroy": true, - "aoColumnDefs": [ - { 'bSortable': false, 'aTargets': [ 0, 3 ] }, - { 'bVisible': false, 'aTargets': [1] }, - { 'sType': 'numeric', 'aTargets': [1] }, - { 'columns.orderData': [1], 'aTargets': [2] } + "destroy": true, + "columnDefs": [ + { "orderable": false, "targets": [0, 6] }, + { "visible": false, "targets": 1 }, + { "orderData": 1, "targets": 2 }, + { "type": 'num', "targets": 1 }, + { "order": [[1, 'desc']] } ], - "aLengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]], - "oLanguage": { - "sLengthMenu":"", - "sEmptyTable": "No issue information available", - "sInfo":"Showing _TOTAL_ issues", - "sInfoEmpty":"Showing 0 to 0 of 0 issues", - "sInfoFiltered":"", - "sSearch": ""}, - "bStateSave": true, - "bFilter": false, - "sPaginationType": "full_numbers", - "iDisplayLength": 10 + "lengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]], + "language": { + "lengthMenu":"Show _MENU_ annuals per page", + "emptyTable": "No annual information available", + "info":"Showing _TOTAL_ annuals", + "infoEmpty":"Showing 0 to 0 of 0 annuals", + "infoFiltered":"(filtered from _MAX_ total annuals)", + "search": ""}, + "stateSave": true, + "searching": false, + "pageLength": 10, + "pagingType": "full_numbers" }); - resetFilters("issue", "annual"); setTimeout(function(){ initFancybox(); @@ -891,8 +813,7 @@ } $(document).ready(function() { - $("issue_table").dataTable(); - $("annual_table").dataTable(); + $('table.display').DataTable(); initThisPage(); }); diff --git a/data/interfaces/default/index.html b/data/interfaces/default/index.html index 235bcd2a..8e25c6e8 100755 --- a/data/interfaces/default/index.html +++ b/data/interfaces/default/index.html @@ -32,6 +32,12 @@ if comic['percent'] < 100: css = '
' + if any([comic['haveissues'] == 'None', comic['haveissues'] is None]): + hissues = 0 + else: + hissues = comic['haveissues'] + comic_percent = comic['percent'] + hissues + if comic['Status'] == 'Paused': grade = 'X' elif comic['Status'] == 'Loading': @@ -48,7 +54,7 @@
- + - - + + @@ -131,7 +131,7 @@ issuedate = item['IssueDate'] else: if item['StoreDate'] != '0000-00-00' and item['StoreDate'] is not None: - issuedate = item['IssueDate'] + issuedate = item['StoreDate'] else: # this is needed for imported cbl's try: diff --git a/mylar/__init__.py b/mylar/__init__.py index 78295ce2..0499e73d 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -327,6 +327,7 @@ ARC_FOLDERFORMAT = None ARC_FILEOPS = 'copy' CVURL = None +CV_VERIFY = 0 CURRENT_WEEKNUMBER = None CURRENT_YEAR = None PULL_REFRESH = None @@ -501,7 +502,7 @@ def initialize(): PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, \ FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, \ FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, MAINTAINSERIESFOLDER, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, \ - STORYARCDIR, COPY2ARCDIR, ARC_FOLDERFORMAT, ARC_FILEOPS, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \ + STORYARCDIR, COPY2ARCDIR, ARC_FOLDERFORMAT, ARC_FILEOPS, CVURL, CV_VERIFY, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \ COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, PULLBYFILE, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, \ SYNO_FIX, ENFORCE_PERMS, CHMOD_FILE, CHMOD_DIR, CHOWNER, CHGROUP, ANNUALS_ON, CV_ONLY, CV_ONETIMER, CURRENT_WEEKNUMBER, CURRENT_YEAR, PULL_REFRESH, WEEKFOLDER, WEEKFOLDER_LOC, WEEKFOLDER_FORMAT, UMASK, \ TELEGRAM_ENABLED, TELEGRAM_TOKEN, TELEGRAM_USERID @@ -548,6 +549,7 @@ def initialize(): if not COMICVINE_API: COMICVINE_API = None CVAPI_RATE = check_setting_int(CFG, 'General', 'cvapi_rate', 2) + CV_VERIFY = bool(check_setting_int(CFG, 'General', 'cv_verify', 0)) HTTP_HOST = check_setting_str(CFG, 'General', 'http_host', '0.0.0.0') HTTP_USERNAME = check_setting_str(CFG, 'General', 'http_username', '') HTTP_PASSWORD = check_setting_str(CFG, 'General', 'http_password', '') @@ -1210,7 +1212,7 @@ def initialize(): logger.info('Synology Parsing Fix already implemented. No changes required at this time.') #set the default URL for ComicVine API here. - CVURL = 'http://comicvine.gamespot.com/api/' + CVURL = 'https://comicvine.gamespot.com/api/' #comictagger - force to use included version if option is enabled. if ENABLE_META: @@ -1374,6 +1376,7 @@ def config_write(): new_config['General']['comicvine_api'] = COMICVINE_API.strip() new_config['General']['cvapi_rate'] = CVAPI_RATE + new_config['General']['cv_verify'] = int(CV_VERIFY) new_config['General']['http_port'] = HTTP_PORT new_config['General']['http_host'] = HTTP_HOST new_config['General']['http_username'] = HTTP_USERNAME diff --git a/mylar/cv.py b/mylar/cv.py index 4c184348..83e642af 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -88,10 +88,9 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist #download the file: #set payload to None for now... payload = None - verify = False try: - r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS) + r = requests.get(PULLURL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception, e: logger.warn('Error fetching data from ComicVine: %s' % (e)) return diff --git a/mylar/helpers.py b/mylar/helpers.py index e048898d..25bb7f67 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -370,7 +370,9 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N for issexcept in issue_exceptions: if issexcept.lower() in issuenum.lower(): logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']') - if any(v in issuenum for v in valid_spaces): + v_chk = [v in issuenum for v in valid_spaces] + if v_chk: + iss_space = v_chk[0] logger.fdebug('character space denoted as : ' + iss_space) else: logger.fdebug('character space not denoted.') @@ -2343,7 +2345,7 @@ def spantheyears(storyarcid): lowyear = 9999 maxyear = 0 for la in totalcnt: - if la['IssueDate'] is None: + if la['IssueDate'] is None or la['IssueDate'] == '0000-00-00': continue else: if int(la['IssueDate'][:4]) > maxyear: diff --git a/mylar/importer.py b/mylar/importer.py index 881a3b10..2f24d0d8 100644 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -425,7 +425,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No logger.info('Attempting to retrieve the comic image for series') try: - r = requests.get(comic['ComicImage'], params=None, stream=True, headers=mylar.CV_HEADERS) + r = requests.get(comic['ComicImage'], params=None, stream=True, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception, e: logger.warn('Unable to download image from CV URL link: ' + comic['ComicImage'] + ' [Status Code returned: ' + str(r.status_code) + ']') @@ -462,7 +462,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No logger.info('Attempting to retrieve alternate comic image for the series.') try: - r = requests.get(comic['ComicImageALT'], params=None, stream=True, headers=mylar.CV_HEADERS) + r = requests.get(comic['ComicImageALT'], params=None, stream=True, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception, e: logger.warn('Unable to download image from CV URL link: ' + comic['ComicImageALT'] + ' [Status Code returned: ' + str(r.status_code) + ']') diff --git a/mylar/mb.py b/mylar/mb.py index 7af817e3..aa799cbb 100755 --- a/mylar/mb.py +++ b/mylar/mb.py @@ -68,10 +68,9 @@ def pullsearch(comicapi, comicquery, offset, explicit, type): #download the file: payload = None - verify = False try: - r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS) + r = requests.get(PULLURL, params=payload, verify=mylar.CV_VERIYF, headers=mylar.CV_HEADERS) except Exception, e: logger.warn('Error fetching data from ComicVine: %s' % (e)) return @@ -415,10 +414,9 @@ def storyarcinfo(xmlid): #download the file: payload = None - verify = False try: - r = requests.get(ARCPULL_URL, params=payload, verify=verify, headers=mylar.CV_HEADERS) + r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception, e: logger.warn('Error fetching data from ComicVine: %s' % (e)) return diff --git a/mylar/webserve.py b/mylar/webserve.py index 41898f31..555f761f 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -551,7 +551,8 @@ class WebInterface(object): myDB.upsert("readinglist", newVals, newCtrl) - logger.info(newVals) + #logger.info(newVals) + #run the Search for Watchlist matches now. logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.') self.ArcWatchlist(storyarcid) @@ -2483,7 +2484,7 @@ class WebInterface(object): 'IssueNumber': la['IssueNumber'], 'ReadingOrder': la['ReadingOrder']}) - if la['IssueDate'] is None: + if la['IssueDate'] is None or la['IssueDate'] == '0000-00-00': continue else: if int(la['IssueDate'][:4]) > maxyear: @@ -2801,40 +2802,41 @@ class WebInterface(object): logger.info('arcpub: ' + arcpub) dstloc = helpers.arcformat(arcdir, spanyears, arcpub) - - if not os.path.isdir(dstloc): - if mylar.STORYARCDIR: - logger.info('Story Arc Directory [' + dstloc + '] does not exist! - attempting to create now.') - else: - logger.info('Story Arc Grab-Bag Directory [' + dstloc + '] does not exist! - attempting to create now.') - checkdirectory = filechecker.validateAndCreateDirectory(dstloc, True) - if not checkdirectory: - logger.warn('Error trying to validate/create directory. Aborting this process at this time.') - return - - if all([mylar.CVINFO, mylar.STORYARCDIR]): - if not os.path.isfile(os.path.join(dstloc, "cvinfo")) or mylar.CV_ONETIMER: - logger.fdebug('Generating cvinfo file for story-arc.') - with open(os.path.join(dstloc, "cvinfo"), "w") as text_file: - if any([ArcWatch[0]['StoryArcID'] == ArcWatch[0]['CV_ArcID'], ArcWatch[0]['CV_ArcID'] is None]): - cvinfo_arcid = ArcWatch[0]['StoryArcID'] - else: - cvinfo_arcid = ArcWatch[0]['CV_ArcID'] - - text_file.write('https://comicvine.gamespot.com/storyarc/4045-' + str(cvinfo_arcid)) - if mylar.ENFORCE_PERMS: - filechecker.setperms(os.path.join(dstloc, 'cvinfo')) - - #get the list of files within the storyarc directory, if any. filelist = None - if mylar.STORYARCDIR: - fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True) - filechk = fchk.listFiles() - fccnt = filechk['comiccount'] - logger.fdebug('[STORY ARC DIRECTORY] ' + str(fccnt) + ' files exist within this directory.') - if fccnt > 0: - filelist = filechk['comiclist'] - logger.info(filechk) + + if dstloc is not None: + if not os.path.isdir(dstloc): + if mylar.STORYARCDIR: + logger.info('Story Arc Directory [' + dstloc + '] does not exist! - attempting to create now.') + else: + logger.info('Story Arc Grab-Bag Directory [' + dstloc + '] does not exist! - attempting to create now.') + checkdirectory = filechecker.validateAndCreateDirectory(dstloc, True) + if not checkdirectory: + logger.warn('Error trying to validate/create directory. Aborting this process at this time.') + return + + if all([mylar.CVINFO, mylar.STORYARCDIR]): + if not os.path.isfile(os.path.join(dstloc, "cvinfo")) or mylar.CV_ONETIMER: + logger.fdebug('Generating cvinfo file for story-arc.') + with open(os.path.join(dstloc, "cvinfo"), "w") as text_file: + if any([ArcWatch[0]['StoryArcID'] == ArcWatch[0]['CV_ArcID'], ArcWatch[0]['CV_ArcID'] is None]): + cvinfo_arcid = ArcWatch[0]['StoryArcID'] + else: + cvinfo_arcid = ArcWatch[0]['CV_ArcID'] + + text_file.write('https://comicvine.gamespot.com/storyarc/4045-' + str(cvinfo_arcid)) + if mylar.ENFORCE_PERMS: + filechecker.setperms(os.path.join(dstloc, 'cvinfo')) + + #get the list of files within the storyarc directory, if any. + if mylar.STORYARCDIR: + fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True) + filechk = fchk.listFiles() + fccnt = filechk['comiccount'] + logger.fdebug('[STORY ARC DIRECTORY] ' + str(fccnt) + ' files exist within this directory.') + if fccnt > 0: + filelist = filechk['comiclist'] + logger.info(filechk) arc_match = [] wantedlist = [] @@ -2898,18 +2900,19 @@ class WebInterface(object): matcheroso = "yes" break if matcheroso == "no": - logger.fdebug("Unable to find a match for " + arc['ComicName'] + " :#" + arc['IssueNumber']) + logger.fdebug("[NO WATCHLIST MATCH] Unable to find a match for " + arc['ComicName'] + " :#" + arc['IssueNumber']) wantedlist.append({ "ComicName": arc['ComicName'], "IssueNumber": arc['IssueNumber'], "IssueYear": arc['IssueYear']}) if filelist is not None and mylar.STORYARCDIR: + logger.fdebug("[NO WATCHLIST MATCH] Checking against lcoal Arc directory for given issue.") fn = 0 valids = [x for x in filelist if re.sub('[\|\s]','', x['dynamic_name'].lower()).strip() == re.sub('[\|\s]','', arc['DynamicComicName'].lower()).strip()] logger.info('valids: ' + str(valids)) if len(valids) > 0: - for tmpfc in filelist: + for tmpfc in valids: #filelist: haveissue = "no" issuedupe = "no" temploc = tmpfc['issue_number'].replace('_', ' ') @@ -2918,31 +2921,46 @@ class WebInterface(object): if int_iss == fcdigit: logger.fdebug(arc['ComicName'] + ' Issue #' + arc['IssueNumber'] + ' already present in StoryArc directory.') #update readinglist db to reflect status. + rr_rename = False if mylar.READ2FILENAME: readorder = helpers.renamefile_readingorder(arc['ReadingOrder']) - dfilename = str(readorder) + "-" + tmpfc['comicfilename'] + if all([tmpfc['reading_order'] is not None, int(readorder) != int(tmpfc['reading_order']['reading_sequence'])]): + logger.warn('reading order sequence has changed for this issue from ' + str(tmpfc['reading_order']['reading_sequence']) + ' to ' + str(readorder)) + rr_rename = True + dfilename = str(readorder) + '-' + tmpfc['reading_order']['filename'] + elif tmpfc['reading_order'] is None: + dfilename = str(readorder) + '-' + tmpfc['comicfilename'] + else: + dfilename = str(readorder) + '-' + tmpfc['reading_order']['filename'] else: dfilename = tmpfc['comicfilename'] if all([tmpfc['sub'] is not None, tmpfc['sub'] != 'None']): - loc_path = os.path.join(tmpfc['ComicLocation'], tmpfc['sub'], dfilename) + loc_path = os.path.join(tmpfc['comiclocation'], tmpfc['sub'], dfilename) else: - loc_path = os.path.join(tmpfc['ComicLocation'], dfilename) + loc_path = os.path.join(tmpfc['comiclocation'], dfilename) + + if rr_rename: + logger.fdebug('Now re-sequencing file to : ' + dfilename) + os.rename(os.path.join(tmpfc['comiclocation'],tmpfc['comicfilename']), loc_path) newVal = {"Status": "Downloaded", "Location": loc_path} #dfilename} ctrlVal = {"IssueArcID": arc['IssueArcID']} myDB.upsert("readinglist", newVal, ctrlVal) fn+=1 - else: - newVal = {"Status": "Skipped"} - ctrlVal = {"IssueArcID": arc['IssueArcID']} - myDB.upsert("readinglist", newVal, ctrlVal) + continue - logger.fdebug("we matched on " + str(len(arc_match)) + " issues") + newVal = {"Status": "Skipped"} + ctrlVal = {"IssueArcID": arc['IssueArcID']} + myDB.upsert("readinglist", newVal, ctrlVal) + + logger.fdebug(str(len(arc_match)) + " issues currently exist on your watchlist that are within this arc. Analyzing...") for m_arc in arc_match: #now we cycle through the issues looking for a match. - issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone() + #issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone() + issue = myDB.selectone("SELECT a.Issue_Number, a.Status, a.IssueID, a.ComicName, a.IssueDate, a.Location, b.readingorder FROM issues AS a INNER JOIN readinglist AS b ON a.comicid = b.comicid where a.comicid=? and a.issue_number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone() + if issue is None: pass else: @@ -2982,14 +3000,34 @@ class WebInterface(object): logger.fdebug('Destination location set to : ' + m_arc['destination_location']) logger.fdebug('Attempting to copy into StoryArc directory') #copy into StoryArc directory... + + #need to make sure the file being copied over isn't already present in the directory either with a different filename, + #or different reading order. + rr_rename = False if mylar.READ2FILENAME: readorder = helpers.renamefile_readingorder(m_arc['match_readingorder']) - dfilename = str(readorder) + "-" + issue['Location'] + if all([m_arc['match_readingorder'] is not None, int(readorder) != int(m_arc['match_readingorder'])]): + logger.warn('reading order sequence has changed for this issue from ' + str(m_arc['match_reading_order']) + ' to ' + str(readorder)) + rr_rename = True + dfilename = str(readorder) + '-' + issue['Location'] + elif m_arc['match_readingorder'] is None: + dfilename = str(readorder) + '-' + issue['Location'] + else: + dfilename = str(readorder) + '-' + issue['Location'] else: dfilename = issue['Location'] + #dfilename = str(readorder) + "-" + issue['Location'] + #else: + #dfilename = issue['Location'] + dstloc = os.path.join(m_arc['destination_location'], dfilename) + if rr_rename: + logger.fdebug('Now re-sequencing COPIED file to : ' + dfilename) + os.rename(issloc, dstloc) + + if not os.path.isfile(dstloc): logger.fdebug('Copying ' + issloc + ' to ' + dstloc) try: @@ -3015,10 +3053,10 @@ class WebInterface(object): else: logger.fdebug("We don't have " + issue['ComicName'] + " :# " + issue['Issue_Number']) ctrlVal = {"IssueArcID": m_arc['match_issuearcid']} - newVal = {"Status": "Wanted", + newVal = {"Status": issue['Status'], #"Wanted", "IssueID": issue['IssueID']} myDB.upsert("readinglist", newVal, ctrlVal) - logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as Wanted.") + logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as " + issue['Status']) return @@ -5087,22 +5125,14 @@ class WebInterface(object): return orderThis.exposed = True - def torrentit(self, torrent_hash): - import test - #import lib.torrent.libs.rtorrent as rTorrent - from base64 import b16encode, b32decode - #torrent_hash # Hash of the torrent - logger.fdebug("Working on torrent: " + torrent_hash) - - if len(torrent_hash) == 32: - torrent_hash = b16encode(b32decode(torrent_hash)) - - if not len(torrent_hash) == 40: - logger.error("Torrent hash is missing, or an invalid hash value has been passed") - return + def torrentit(self, issueid=None, torrent_hash=None, download=False): + #make sure it's bool'd here. + if download == 'True': + download = True else: - rp = test.RTorrent() - torrent_info = rp.main(torrent_hash, check=True) + download = False + + torrent_info = helpers.torrentinfo(issueid, torrent_hash, download) if torrent_info: torrent_name = torrent_info['name'] @@ -5127,15 +5157,20 @@ class WebInterface(object): ti = '
${comic['ComicYear']} # ${comic['LatestIssue']} ${comic['LatestDate']} ${css}
${comic['haveissues']}/${comic['totalissues']}
${comic['recentstatus']} @@ -82,26 +88,26 @@ function initThisPage() { $('#series_table').dataTable( { - "bDestroy": true, - "aoColumnDefs": [ - { 'bSortable': false, 'aTargets': [5, 9] }, - { 'bVisible': false, 'aTargets': [5, 9] }, - { 'sType': 'numeric', 'aTargets': [5] }, - { 'columns.orderData': [5], 'aTargets': [6] }, - { 'columns.orderData': [9], 'aTargets': [8] } + "destroy": true, + "columnDefs": [ + { "orderable": false, "targets": [5, 9] }, + { "visible": false, "targets": [5, 9] }, + { "type": 'num', "targets": 5 }, + { "orderData": 5, "targets": 6 }, + { "orderData": 9, "targets": 8 }, + { "order": [[7, 'asc'],[1, 'asc']] } ], - "aLengthMenu": [[10, 15, 25, 50, -1], [10, 15, 25, 50, 'All' ]], - "oLanguage": { - "sLengthMenu":"Show _MENU_ results per page", - "sEmptyTable": "No results", - "sInfo":"Showing _START_ to _END_ of _TOTAL_ results", - "sInfoEmpty":"Showing 0 to 0 of 0 results", - "sInfoFiltered":"(filtered from _MAX_ total results)", - "sSearch" : ""}, - "bStateSave": true, - "iDisplayLength": 25, - "sPaginationType": "full_numbers", - "aaSorting": [[7,'asc'],[1,'asc']], + "lengthMenu": [[10, 15, 25, 50, -1], [10, 15, 25, 50, 'All' ]], + "language": { + "lengthMenu":"Show _MENU_ results per page", + "emptyTable": "No results", + "info":"Showing _START_ to _END_ of _TOTAL_ results", + "infoEmpty":"Showing 0 to 0 of 0 results", + "infoFiltered":"(filtered from _MAX_ total results)", + "search" : ""}, + "stateSave": true, + "pageLength": 25, + "pagingType": "full_numbers", }); diff --git a/mylar/search.py b/mylar/search.py index bdd02afb..feb59d61 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -307,7 +307,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if any([allow_packs is None, allow_packs == 'None', allow_packs == 0]) and all([mylar.ENABLE_TORRENT_SEARCH, mylar.ENABLE_32P]): allow_packs = False - logger.info('allow_packs set to :' + str(allow_packs)) newznab_local = False @@ -1232,19 +1231,16 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa else: nzbprov = 'DEM' - logger.info(nzbprov) - logger.info('rss:' + RSS) - logger.info('allow_packs:' + str(allow_packs)) if nzbprov == '32P' and allow_packs and RSS == 'no': - logger.info('pack:' + entry['pack']) + logger.fdebug('pack:' + entry['pack']) if all([nzbprov == '32P', RSS == 'no', allow_packs]) and any([entry['pack'] == '1', entry['pack'] == '2']): if nzbprov == '32P': if entry['pack'] == '2': - logger.info('[PACK-QUEUE] Diamond FreeLeech Pack detected.') + logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.') elif entry['pack'] == '1': - logger.info('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.') + logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.') else: - logger.info('[PACK-QUEUE] Invalid Pack.') + logger.fdebug('[PACK-QUEUE] Invalid Pack.') #find the pack range. pack_issuelist = entry['issues'] @@ -1253,7 +1249,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if issueid_info['valid'] == True: logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.') else: - logger.info('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping') + logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping') continue #pack support. @@ -1571,7 +1567,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa #generate nzbname nzbname = nzbname_create(nzbprov, info=comicinfo, title=ComicTitle) #entry['title']) - + if nzbname is None: + logger.error('[NZBPROVIDER = NONE] Encountered an error using given provider with requested information: ' + comicinfo + '. You have a blank entry most likely in your newznabs, fix it & restart Mylar') + continue #generate the send-to and actually send the nzb / torrent. searchresult = searcher(nzbprov, nzbname, comicinfo, entry['link'], IssueID, ComicID, tmpprov, newznab=newznab_host) @@ -1878,7 +1876,7 @@ def nzbname_create(provider, title=None, info=None): # it searches nzblog which contains the nzbname to pull out the IssueID and start the post-processing # it is also used to keep the hashinfo for the nzbname in case it fails downloading, it will get put into the failed db for future exclusions - if mylar.USE_BLACKHOLE and provider != '32P' and provider != 'TPSE': + if mylar.USE_BLACKHOLE and any([provider != '32P', provider != 'TPSE', provider != 'WWT', provider != 'DEM']): if os.path.exists(mylar.BLACKHOLE_DIR): #load in the required info to generate the nzb names when required (blackhole only) ComicName = info[0]['ComicName'] @@ -1933,8 +1931,11 @@ def nzbname_create(provider, title=None, info=None): nzbname = re.sub(match.group(), '', nzbname).strip() logger.fdebug('[SEARCHER] end nzbname: ' + nzbname) - logger.fdebug("nzbname used for post-processing:" + nzbname) - return nzbname + if nzbname is None: + return None + else: + logger.fdebug("nzbname used for post-processing:" + nzbname) + return nzbname def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, directsend=None, newznab=None): alt_nzbname = None From d909435f1fa5c86d80f03c73f3867df23866c50b Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 2 Mar 2017 12:49:09 -0500 Subject: [PATCH 02/15] FIX:(#1583) Fix for errant have total sorting on main page --- data/interfaces/default/index.html | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/data/interfaces/default/index.html b/data/interfaces/default/index.html index 8e25c6e8..d58eefea 100755 --- a/data/interfaces/default/index.html +++ b/data/interfaces/default/index.html @@ -2,6 +2,7 @@ <%! from mylar import helpers, db import datetime + import decimal %> <%def name="body()"> @@ -36,7 +37,13 @@ hissues = 0 else: hissues = comic['haveissues'] - comic_percent = comic['percent'] + hissues + + if any([comic['totalissues'] == 'None', comic['totalissues'] is None]): + tissues = 0 + else: + tissues = comic['totalissues'] + + comic_percent = int(hissues) + decimal.Decimal(tissues) / decimal.Decimal('1000') if comic['Status'] == 'Paused': grade = 'X' From e400bf7a5df146f74276522b9517bf6ab636f0d9 Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 2 Mar 2017 13:12:05 -0500 Subject: [PATCH 03/15] FIX:(#1578) Fix for error when torrents are searched against, but blackhole method is enabled --- mylar/search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/search.py b/mylar/search.py index feb59d61..2e351979 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1876,7 +1876,7 @@ def nzbname_create(provider, title=None, info=None): # it searches nzblog which contains the nzbname to pull out the IssueID and start the post-processing # it is also used to keep the hashinfo for the nzbname in case it fails downloading, it will get put into the failed db for future exclusions - if mylar.USE_BLACKHOLE and any([provider != '32P', provider != 'TPSE', provider != 'WWT', provider != 'DEM']): + if mylar.USE_BLACKHOLE and all([provider != '32P', provider != 'TPSE', provider != 'WWT', provider != 'DEM']): if os.path.exists(mylar.BLACKHOLE_DIR): #load in the required info to generate the nzb names when required (blackhole only) ComicName = info[0]['ComicName'] From d94ae388de1ceedbcfdaed4cf6902e686d41155f Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 2 Mar 2017 14:29:20 -0500 Subject: [PATCH 04/15] FIX:(#1578) Fixed another error with nzbname generation with invalid options --- mylar/search.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mylar/search.py b/mylar/search.py index 2e351979..08ffe88e 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1875,6 +1875,7 @@ def nzbname_create(provider, title=None, info=None): #the nzbname here is used when post-processing # it searches nzblog which contains the nzbname to pull out the IssueID and start the post-processing # it is also used to keep the hashinfo for the nzbname in case it fails downloading, it will get put into the failed db for future exclusions + nzbname = None if mylar.USE_BLACKHOLE and all([provider != '32P', provider != 'TPSE', provider != 'WWT', provider != 'DEM']): if os.path.exists(mylar.BLACKHOLE_DIR): From 1f7f8e9add085fe929a80438bae3fcd07445bf30 Mon Sep 17 00:00:00 2001 From: evilhero Date: Sat, 4 Mar 2017 14:15:34 -0500 Subject: [PATCH 05/15] FIX:(#1585) Fix for error when displaying Pullist page on a non-English Windows machine due to date presentation --- mylar/webserve.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/mylar/webserve.py b/mylar/webserve.py index e3e64355..41898f31 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -1594,10 +1594,17 @@ class WebInterface(object): date_fmt = "%B %d, %Y" + try: + con_startweek = u"" + startweek.strftime(date_fmt).decode('utf-8') + con_endweek = u"" + endweek.strftime(date_fmt).decode('utf-8') + except: + con_startweek = u"" + startweek.strftime(date_fmt).decode('cp1252') + con_endweek = u"" + endweek.strftime(date_fmt).decode('cp1252') + weekinfo = {'weeknumber': weeknumber, - 'startweek': u"" + startweek.strftime(date_fmt).decode('utf-8'), + 'startweek': con_startweek, 'midweek': midweek.strftime('%Y-%m-%d'), - 'endweek': u"" + endweek.strftime(date_fmt).decode('utf-8'), + 'endweek': con_endweek, 'year': year, 'prev_weeknumber': prev_week, 'prev_year': prev_year, From 8891088b4d272e4c1fa048da2da1fbeaab23a59f Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 9 Mar 2017 14:47:17 -0500 Subject: [PATCH 06/15] FIX: bencode problem for deluge users when snatching torrent --- mylar/torrent/clients/deluge.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mylar/torrent/clients/deluge.py b/mylar/torrent/clients/deluge.py index 58b8aaf8..b3cd4f99 100644 --- a/mylar/torrent/clients/deluge.py +++ b/mylar/torrent/clients/deluge.py @@ -158,13 +158,13 @@ class TorrentClient(object): def get_the_hash(self, filepath): import hashlib, StringIO - from mylar import bencode + import bencode # Open torrent file torrent_file = open(filepath, "rb") - metainfo = bencode.bdecode(torrent_file.read()) + metainfo = bencode.decode(torrent_file.read()) info = metainfo['info'] - thehash = hashlib.sha1(bencode.bencode(info)).hexdigest().upper() + thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper() logger.debug('Hash: ' + thehash) return thehash From e10b2b7b9bdf53c076159a98dceb82dfd3644ed6 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 15 Mar 2017 13:05:37 -0400 Subject: [PATCH 07/15] FIX:(1564) Fake the RSS Feed generation for nzbhydra so it can poll against the last 100 entries via api, instead of rss query --- mylar/rsscheck.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index 92da1fd1..61d1a198 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -360,8 +360,12 @@ def nzbs(provider=None, forcerss=False): newznabuid = newznabuid or '1' newznabcat = newznabcat or '7030' - # 11-21-2014: added &num=100 to return 100 results (or maximum) - unsure of cross-reliablity - _parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[3].rstrip(), bool(newznab_host[2])) + if site[-10:] == '[nzbhydra]': + #to allow nzbhydra to do category search by most recent (ie. rss) + _parse_feed(site, newznab_host[1].rstrip() + '/api?t=search&cat=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&apikey=' + newznab_host[3].rstrip(), bool(newznab_host[2])) + else: + # 11-21-2014: added &num=100 to return 100 results (or maximum) - unsure of cross-reliablity + _parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[3].rstrip(), bool(newznab_host[2])) feeddata = [] From 69cc84771560b703810ced0892928bf90d04952d Mon Sep 17 00:00:00 2001 From: barbequesauce Date: Wed, 15 Mar 2017 13:19:19 -0400 Subject: [PATCH 08/15] Add random delay to avoid DDOS protection Add random delay to avoid DDOS protection (per issue 1571) --- mylar/rsscheck.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index 61d1a198..b70f0e98 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -10,6 +10,7 @@ import ftpsshup import datetime import gzip import time +import random from StringIO import StringIO import mylar @@ -145,6 +146,9 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None): if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']): payload = None +            ddos_protection = round(random.uniform(0,15),2) +            time.sleep(ddos_protection) + try: cf_cookievalue = None scraper = cfscrape.create_scraper() From d2fc2b6d02f1b451247b6f471a979b8c51d5b434 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 15 Mar 2017 13:28:59 -0400 Subject: [PATCH 09/15] FIX: just some whitespace errors --- mylar/rsscheck.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index b70f0e98..fb0a0d80 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -145,10 +145,10 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None): if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']): payload = None - -            ddos_protection = round(random.uniform(0,15),2) + +            ddos_protection = round(random.uniform(0,15),2)             time.sleep(ddos_protection) - + try: cf_cookievalue = None scraper = cfscrape.create_scraper() From 5399f6dfcca1e343513c34ca3703bac316871c0f Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 15 Mar 2017 14:40:17 -0400 Subject: [PATCH 10/15] FIX: fix for last PR with whitespaces and bad characters within --- mylar/rsscheck.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index fb0a0d80..b4f945e4 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -146,8 +146,8 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None): if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']): payload = None -            ddos_protection = round(random.uniform(0,15),2) -            time.sleep(ddos_protection) + ddos_protection = round(random.uniform(0,15),2) + time.sleep(ddos_protection) try: cf_cookievalue = None From a8bef2e7f30ac3f3cf0ddbb6840481ea813ba429 Mon Sep 17 00:00:00 2001 From: evilhero Date: Mon, 20 Mar 2017 22:03:34 -0400 Subject: [PATCH 11/15] FIX:(#1304) Fix for rename problem when series title is utf-8 encoded during an import sequence --- mylar/helpers.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mylar/helpers.py b/mylar/helpers.py index e1e09134..fc7bdc18 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -247,7 +247,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N import db, logger myDB = db.DBConnection() logger.fdebug('comicid: ' + str(comicid)) - logger.fdebug('issue#: ' + str(issue)) + logger.fdebug('issue#: ' + issue) # the issue here is a non-decimalized version, we need to see if it's got a decimal and if not, add '.00' # iss_find = issue.find('.') # if iss_find < 0: @@ -422,7 +422,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N else: iss = issuenum issueno = str(iss) - logger.fdebug('iss:' + str(iss)) + logger.fdebug('iss:' + iss) logger.fdebug('issueno:' + str(issueno)) # issue zero-suppression here if mylar.ZERO_LEVEL == "0": @@ -507,10 +507,10 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N if month_name is None: month_name = 'None' logger.fdebug('Issue Year : ' + str(issueyear)) - logger.fdebug('Publisher: ' + str(publisher)) - logger.fdebug('Series: ' + str(series)) + logger.fdebug('Publisher: ' + publisher) + logger.fdebug('Series: ' + series) logger.fdebug('Year: ' + str(seriesyear)) - logger.fdebug('Comic Location: ' + str(comlocation)) + logger.fdebug('Comic Location: ' + comlocation) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' @@ -626,15 +626,15 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR) nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower() - logger.fdebug('New Filename: ' + str(nfilename)) + logger.fdebug('New Filename: ' + nfilename) if mylar.LOWERCASE_FILENAMES: dst = os.path.join(comlocation, nfilename.lower()) else: dst = os.path.join(comlocation, nfilename) - logger.fdebug('Source: ' + str(ofilename)) - logger.fdebug('Destination: ' + str(dst)) + logger.fdebug('Source: ' + ofilename) + logger.fdebug('Destination: ' + dst) rename_this = {"destination_dir": dst, "nfilename": nfilename, From 4359de96a8509a65c4ff589c757f0687ac9b0ade Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 22 Mar 2017 13:02:05 -0400 Subject: [PATCH 12/15] FIX: Fix for filechecker not scanning in filenames that contained a 'Vol' indicator but had a space seperating the indicator from the volume numeric --- mylar/filechecker.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 968f85c2..fedac0ed 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -151,7 +151,8 @@ class FileChecker(object): 'series_volume': runresults['series_volume'], 'issue_year': runresults['issue_year'], 'issue_number': runresults['issue_number'], - 'scangroup': runresults['scangroup'] + 'scangroup': runresults['scangroup'], + 'reading_order': runresults['reading_order'] }) else: comiclist.append({ @@ -230,6 +231,7 @@ class FileChecker(object): #split the file and then get all the relevant numbers that could possibly be an issue number. #remove the extension. modfilename = re.sub(filetype, '', filename).strip() + reading_order = None #if it's a story-arc, make sure to remove any leading reading order #'s if self.sarc and mylar.READ2FILENAME: @@ -237,6 +239,8 @@ class FileChecker(object): if mylar.FOLDER_SCAN_LOG_VERBOSE: logger.fdebug('[SARC] Checking filename for Reading Order sequence - Reading Sequence Order found #: ' + str(modfilename[:removest])) if modfilename[:removest].isdigit() and removest <= 3: + reading_order = {'reading_sequence': str(modfilename[:removest]), + 'filename': filename[removest+1:]} modfilename = modfilename[removest+1:] if mylar.FOLDER_SCAN_LOG_VERBOSE: logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : ' + modfilename) @@ -261,6 +265,8 @@ class FileChecker(object): break cnt +=1 + modfilename = modfilename.replace('()','').strip() + #here we take a snapshot of the current modfilename, the intent is that we will remove characters that match #as we discover them - namely volume, issue #, years, etc #the remaining strings should be the series title and/or issue title if present (has to be detected properly) @@ -489,12 +495,12 @@ class FileChecker(object): #now we try to find the series title &/or volume lablel. if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower()] ) and sf.lower() not in {'one','two','three','four','five','six'}: - if sf[1:].isdigit() or sf[3:].isdigit():# or volumeprior == True: + if any([ split_file[split_file.index(sf)].isdigit(), split_file[split_file.index(sf)][3:].isdigit(), split_file[split_file.index(sf)][1:].isdigit() ]): volume = re.sub("[^0-9]", "", sf) if volumeprior: try: - volumetmp = split_file.index(volumeprior_label, current_pos -1) #if this passes, then we're ok, otherwise will try exception - volume_found['position'] = split_file.index(sf, current_pos) + volume_found['position'] = split_file.index(volumeprior_label, current_pos -1) #if this passes, then we're ok, otherwise will try exception + logger.fdebug('volume_found: ' + str(volume_found['position'])) except: sep_volume = False continue @@ -502,6 +508,7 @@ class FileChecker(object): volume_found['position'] = split_file.index(sf, current_pos) volume_found['volume'] = volume + logger.fdebug('volume label detected as : Volume ' + str(volume) + ' @ position: ' + str(split_file.index(sf))) volumeprior = False volumeprior_label = None elif 'vol' in sf.lower() and len(sf) == 3: @@ -509,7 +516,7 @@ class FileChecker(object): volumeprior = True volumeprior_label = sf sep_volume = True - #logger.fdebug('volume label detected, but vol. number is not adjacent, adjusting scope to include number.') + logger.fdebug('volume label detected, but vol. number is not adjacent, adjusting scope to include number.') elif 'volume' in sf.lower(): volume = re.sub("[^0-9]", "", sf) if volume.isdigit(): @@ -819,7 +826,8 @@ class FileChecker(object): 'series_volume': issue_volume, 'issue_year': issue_year, 'issue_number': issue_number, - 'scangroup': scangroup} + 'scangroup': scangroup, + 'reading_order': reading_order} series_info = {} series_info = {'sub': path_list, From 3c36d33eb761c37759d1725505c0ed2e9f9ba489 Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 23 Mar 2017 11:54:26 -0400 Subject: [PATCH 13/15] FIX: Fix for some import errors related to cbz files containing no metadata, FIX: Fixed some grouping errors on the import results page, FIX: Added in an exception check during the metadata process to gracefully capture errors --- mylar/helpers.py | 201 ++++++++++++++++++-------------- mylar/librarysync.py | 272 ++++++++++++++++++++++--------------------- 2 files changed, 253 insertions(+), 220 deletions(-) diff --git a/mylar/helpers.py b/mylar/helpers.py index fc7bdc18..e048898d 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -1370,93 +1370,114 @@ def filesafe(comic): return comicname_filesafe -def IssueDetails(filelocation, IssueID=None): +def IssueDetails(filelocation, IssueID=None, justinfo=False): import zipfile, logger from xml.dom.minidom import parseString - dstlocation = os.path.join(mylar.CACHE_DIR, 'temp.zip') - issuedetails = [] - - if filelocation.endswith('.cbz'): - logger.fdebug('CBZ file detected. Checking for .xml within file') - shutil.copy(filelocation, dstlocation) - else: - logger.fdebug('filename is not a cbz : ' + filelocation) - return - - cover = "notfound" issuetag = None - pic_extensions = ('.jpg','.png','.webp') - modtime = os.path.getmtime(dstlocation) - low_infile = 999999 - try: - with zipfile.ZipFile(dstlocation, 'r') as inzipfile: - for infile in sorted(inzipfile.namelist()): - tmp_infile = re.sub("[^0-9]","", infile).strip() - if tmp_infile == '': - pass - elif int(tmp_infile) < int(low_infile): - low_infile = tmp_infile - low_infile_name = infile - if infile == 'ComicInfo.xml': - logger.fdebug('Extracting ComicInfo.xml to display.') - dst = os.path.join(mylar.CACHE_DIR, 'ComicInfo.xml') - data = inzipfile.read(infile) - #print str(data) - issuetag = 'xml' - #looks for the first page and assumes it's the cover. (Alternate covers handled later on) - elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound": - logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.') + if justinfo is False: + dstlocation = os.path.join(mylar.CACHE_DIR, 'temp.zip') + + + if filelocation.endswith('.cbz'): + logger.fdebug('CBZ file detected. Checking for .xml within file') + shutil.copy(filelocation, dstlocation) + else: + logger.fdebug('filename is not a cbz : ' + filelocation) + return + + cover = "notfound" + pic_extensions = ('.jpg','.png','.webp') + modtime = os.path.getmtime(dstlocation) + low_infile = 999999 + + try: + with zipfile.ZipFile(dstlocation, 'r') as inzipfile: + for infile in sorted(inzipfile.namelist()): + tmp_infile = re.sub("[^0-9]","", infile).strip() + if tmp_infile == '': + pass + elif int(tmp_infile) < int(low_infile): + low_infile = tmp_infile + low_infile_name = infile + if infile == 'ComicInfo.xml': + logger.fdebug('Extracting ComicInfo.xml to display.') + dst = os.path.join(mylar.CACHE_DIR, 'ComicInfo.xml') + data = inzipfile.read(infile) + #print str(data) + issuetag = 'xml' + #looks for the first page and assumes it's the cover. (Alternate covers handled later on) + elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound": + logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.') + local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") + local_file.write(inzipfile.read(infile)) + local_file.close + cover = "found" + elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound": + logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.') + altlist = ('00a', '00b', '00c', '00d', '00e') + for alt in altlist: + if alt in infile: + local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") + local_file.write(inzipfile.read(infile)) + local_file.close + cover = "found" + break + + elif any(['001.jpg' in infile, '001.png' in infile, '001.webp' in infile, '01.jpg' in infile, '01.png' in infile, '01.webp' in infile]) and cover == "notfound": + logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.') + local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") + local_file.write(inzipfile.read(infile)) + local_file.close + cover = "found" + + if cover != "found": + logger.fdebug('Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : ' + str(low_infile)) local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") - local_file.write(inzipfile.read(infile)) + local_file.write(inzipfile.read(low_infile_name)) local_file.close - cover = "found" - elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound": - logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.') - altlist = ('00a', '00b', '00c', '00d', '00e') - for alt in altlist: - if alt in infile: - local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") - local_file.write(inzipfile.read(infile)) - local_file.close - cover = "found" - break + cover = "found" - elif any(['001.jpg' in infile, '001.png' in infile, '001.webp' in infile, '01.jpg' in infile, '01.png' in infile, '01.webp' in infile]) and cover == "notfound": - logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.') - local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") - local_file.write(inzipfile.read(infile)) - local_file.close - cover = "found" + except: + logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.') + return - if cover != "found": - logger.fdebug('Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : ' + str(low_infile)) - local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb") - local_file.write(inzipfile.read(low_infile_name)) - local_file.close - cover = "found" + ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime)) + IssueImage = replacetheslash(ComicImage) - except: - logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.') - return - - ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime)) - IssueImage = replacetheslash(ComicImage) + else: + IssueImage = "None" + try: + with zipfile.ZipFile(filelocation, 'r') as inzipfile: + for infile in sorted(inzipfile.namelist()): + if infile == 'ComicInfo.xml': + logger.fdebug('Found ComicInfo.xml - now retrieving information.') + data = inzipfile.read(infile) + issuetag = 'xml' + break + except: + logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.') + return if issuetag is None: - import subprocess - from subprocess import CalledProcessError, check_output - unzip_cmd = "/usr/bin/unzip" + data = None try: - #unzip -z will extract the zip comment field. - data = subprocess.check_output([unzip_cmd, '-z', dstlocation]) - # return data is encoded in bytes, not unicode. Need to figure out how to run check_output returning utf-8 - issuetag = 'comment' - except CalledProcessError as e: + dz = zipfile.ZipFile(filelocation, 'r') + data = dz.comment + except: logger.warn('Unable to extract comment field from zipfile.') + return + else: + if data: + issuetag = 'comment' + else: + logger.warn('No metadata available in zipfile comment field.') + return + + logger.info('Tag returned as being: ' + str(issuetag)) #logger.info('data:' + str(data)) @@ -1549,28 +1570,30 @@ def IssueDetails(filelocation, IssueID=None): except: pagecount = 0 - i = 0 + #not used atm. + #to validate a front cover if it's tagged as one within the zip (some do this) + #i = 0 + #try: + # pageinfo = result.getElementsByTagName('Page')[0].attributes + # if pageinfo: pageinfo_test == True + #except: + # pageinfo_test = False - try: - pageinfo = result.getElementsByTagName('Page')[0].attributes - if pageinfo: pageinfo_test == True - except: - pageinfo_test = False + #if pageinfo_test: + # while (i < int(pagecount)): + # pageinfo = result.getElementsByTagName('Page')[i].attributes + # attrib = pageinfo.getNamedItem('Image') + # #logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value)) + # att = pageinfo.getNamedItem('Type') + # #logger.fdebug('pageinfo: ' + str(pageinfo)) + # if att.value == 'FrontCover': + # #logger.fdebug('FrontCover detected. Extracting.') + # break + # i+=1 - if pageinfo_test: - while (i < int(pagecount)): - pageinfo = result.getElementsByTagName('Page')[i].attributes - attrib = pageinfo.getNamedItem('Image') - #logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value)) - att = pageinfo.getNamedItem('Type') - logger.fdebug('pageinfo: ' + str(pageinfo)) - if att.value == 'FrontCover': - #logger.fdebug('FrontCover detected. Extracting.') - break - i+=1 elif issuetag == 'comment': logger.info('CBL Tagging.') - stripline = 'Archive: ' + dstlocation + stripline = 'Archive: ' + filelocation data = re.sub(stripline, '', data.encode("utf-8")).strip() if data is None or data == '': return diff --git a/mylar/librarysync.py b/mylar/librarysync.py index 8c504866..813b356f 100755 --- a/mylar/librarysync.py +++ b/mylar/librarysync.py @@ -271,146 +271,156 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None, if i['ComicLocation'].endswith('.cbz'): logger.fdebug('[IMPORT-CBZ] Metatagging checking enabled.') logger.info('[IMPORT-CBZ} Attempting to read tags present in filename: ' + i['ComicLocation']) - issueinfo = helpers.IssueDetails(i['ComicLocation']) - logger.info('issueinfo: ' + str(issueinfo)) - if issueinfo is None: - logger.fdebug('[IMPORT-CBZ] No valid metadata contained within filename. Dropping down to parsing the filename itself.') + try: + issueinfo = helpers.IssueDetails(i['ComicLocation'], justinfo=True) + except: + logger.fdebug('[IMPORT-CBZ] Unable to retrieve metadata - possibly doesn\'t exist. Ignoring meta-retrieval') pass else: - issuenotes_id = None - logger.info('[IMPORT-CBZ] Successfully retrieved some tags. Lets see what I can figure out.') - comicname = issueinfo[0]['series'] - if comicname is not None: - logger.fdebug('[IMPORT-CBZ] Series Name: ' + comicname) - as_d = filechecker.FileChecker() - as_dyninfo = as_d.dynamic_replace(comicname) - logger.fdebug('Dynamic-ComicName: ' + as_dyninfo['mod_seriesname']) - else: - logger.fdebug('[IMPORT-CBZ] No series name found within metadata. This is bunk - dropping down to file parsing for usable information.') - issueinfo = None - issue_number = None + logger.info('issueinfo: ' + str(issueinfo)) - if issueinfo is not None: - try: - issueyear = issueinfo[0]['year'] - except: - issueyear = None - - #if the issue number is a non-numeric unicode string, this will screw up along with impID - issue_number = issueinfo[0]['issue_number'] - if issue_number is not None: - logger.fdebug('[IMPORT-CBZ] Issue Number: ' + issue_number) - else: - issue_number = i['parsed']['issue_number'] - - if 'annual' in comicname.lower() or 'annual' in comfilename.lower(): - if issue_number is None or issue_number == 'None': - logger.info('Annual detected with no issue number present within metadata. Assuming year as issue.') - try: - issue_number = 'Annual ' + str(issueyear) - except: - issue_number = 'Annual ' + i['parsed']['issue_year'] - else: - logger.info('Annual detected with issue number present within metadata.') - if 'annual' not in issue_number.lower(): - issue_number = 'Annual ' + issue_number - mod_series = re.sub('annual', '', comicname, flags=re.I).strip() - else: - mod_series = comicname - - logger.fdebug('issue number SHOULD Be: ' + issue_number) - - try: - issuetitle = issueinfo[0]['title'] - except: - issuetitle = None - try: - issueyear = issueinfo[0]['year'] - except: - issueyear = None - try: - issuevolume = str(issueinfo[0]['volume']) - if all([issuevolume is not None, issuevolume != 'None']) and not issuevolume.lower().startswith('v'): - issuevolume = 'v' + str(issuevolume) - logger.fdebug('[TRY]issue volume is: ' + str(issuevolume)) - except: - logger.fdebug('[EXCEPT]issue volume is: ' + str(issuevolume)) - issuevolume = None - - if any([comicname is None, comicname == 'None', issue_number is None, issue_number == 'None']): - logger.fdebug('[IMPORT-CBZ] Improperly tagged file as the metatagging is invalid. Ignoring meta and just parsing the filename.') - issueinfo = None + if issueinfo is None: + logger.fdebug('[IMPORT-CBZ] No valid metadata contained within filename. Dropping down to parsing the filename itself.') pass else: - # if used by ComicTagger, Notes field will have the IssueID. - issuenotes = issueinfo[0]['notes'] - logger.fdebug('[IMPORT-CBZ] Notes: ' + issuenotes) - if issuenotes is not None and issuenotes != 'None': - if 'Issue ID' in issuenotes: - st_find = issuenotes.find('Issue ID') - tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip() - if tmp_issuenotes_id.isdigit(): - issuenotes_id = tmp_issuenotes_id - logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']') - elif 'CVDB' in issuenotes: - st_find = issuenotes.find('CVDB') - tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip() - if tmp_issuenotes_id.isdigit(): - issuenotes_id = tmp_issuenotes_id - logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']') - else: - logger.fdebug('[IMPORT-CBZ] Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.') + issuenotes_id = None + logger.info('[IMPORT-CBZ] Successfully retrieved some tags. Lets see what I can figure out.') + comicname = issueinfo[0]['series'] + if comicname is not None: + logger.fdebug('[IMPORT-CBZ] Series Name: ' + comicname) + as_d = filechecker.FileChecker() + as_dyninfo = as_d.dynamic_replace(comicname) + logger.fdebug('Dynamic-ComicName: ' + as_dyninfo['mod_seriesname']) + else: + logger.fdebug('[IMPORT-CBZ] No series name found within metadata. This is bunk - dropping down to file parsing for usable information.') + issueinfo = None + issue_number = None - logger.fdebug('[IMPORT-CBZ] Adding ' + comicname + ' to the import-queue!') - #impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss) - impid = str(random.randint(1000000,99999999)) - logger.fdebug('[IMPORT-CBZ] impid: ' + str(impid)) - #make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids) - issuepopulated = False - if cvinfo_CID is None: - if issuenotes_id is None: - logger.info('[IMPORT-CBZ] No ComicID detected where it should be. Bypassing this metadata entry and going the parsing route [' + comfilename + ']') - else: - #we need to store the impid here as well so we can look it up. - issueid_list.append({'issueid': issuenotes_id, - 'importinfo': {'impid': impid, - 'comicid': None, - 'comicname': comicname, - 'dynamicname': as_dyninfo['mod_seriesname'], - 'comicyear': issueyear, - 'issuenumber': issue_number, - 'volume': issuevolume, - 'comfilename': comfilename, - 'comlocation': comlocation.decode(mylar.SYS_ENCODING)} - }) - mylar.IMPORT_CID_COUNT +=1 - issuepopulated = True + if issueinfo is not None: + try: + issueyear = issueinfo[0]['year'] + except: + issueyear = None - if issuepopulated == False: - if cvscanned_loc == os.path.dirname(comlocation): - cv_cid = cvinfo_CID - logger.fdebug('[IMPORT-CBZ] CVINFO_COMICID attached : ' + str(cv_cid)) + #if the issue number is a non-numeric unicode string, this will screw up along with impID + issue_number = issueinfo[0]['issue_number'] + if issue_number is not None: + logger.fdebug('[IMPORT-CBZ] Issue Number: ' + issue_number) else: - cv_cid = None - import_by_comicids.append({ - "impid": impid, - "comicid": cv_cid, - "watchmatch": None, - "displayname": mod_series, - "comicname": comicname, - "dynamicname": as_dyninfo['mod_seriesname'], - "comicyear": issueyear, - "issuenumber": issue_number, - "volume": issuevolume, - "issueid": issuenotes_id, - "comfilename": comfilename, - "comlocation": comlocation.decode(mylar.SYS_ENCODING) - }) + issue_number = i['parsed']['issue_number'] - mylar.IMPORT_CID_COUNT +=1 - else: - pass - #logger.fdebug(i['ComicFilename'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags') + if 'annual' in comicname.lower() or 'annual' in comfilename.lower(): + if issue_number is None or issue_number == 'None': + logger.info('Annual detected with no issue number present within metadata. Assuming year as issue.') + try: + issue_number = 'Annual ' + str(issueyear) + except: + issue_number = 'Annual ' + i['parsed']['issue_year'] + else: + logger.info('Annual detected with issue number present within metadata.') + if 'annual' not in issue_number.lower(): + issue_number = 'Annual ' + issue_number + mod_series = re.sub('annual', '', comicname, flags=re.I).strip() + else: + mod_series = comicname + + logger.fdebug('issue number SHOULD Be: ' + issue_number) + + try: + issuetitle = issueinfo[0]['title'] + except: + issuetitle = None + try: + issueyear = issueinfo[0]['year'] + except: + issueyear = None + try: + issuevolume = str(issueinfo[0]['volume']) + if all([issuevolume is not None, issuevolume != 'None', not issuevolume.lower().startswith('v')]): + issuevolume = 'v' + str(issuevolume) + if any([issuevolume is None, issuevolume == 'None']): + logger.info('EXCEPT] issue volume is NONE') + issuevolume = None + else: + logger.fdebug('[TRY]issue volume is: ' + str(issuevolume)) + except: + logger.fdebug('[EXCEPT]issue volume is: ' + str(issuevolume)) + issuevolume = None + + if any([comicname is None, comicname == 'None', issue_number is None, issue_number == 'None']): + logger.fdebug('[IMPORT-CBZ] Improperly tagged file as the metatagging is invalid. Ignoring meta and just parsing the filename.') + issueinfo = None + pass + else: + # if used by ComicTagger, Notes field will have the IssueID. + issuenotes = issueinfo[0]['notes'] + logger.fdebug('[IMPORT-CBZ] Notes: ' + issuenotes) + if issuenotes is not None and issuenotes != 'None': + if 'Issue ID' in issuenotes: + st_find = issuenotes.find('Issue ID') + tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip() + if tmp_issuenotes_id.isdigit(): + issuenotes_id = tmp_issuenotes_id + logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']') + elif 'CVDB' in issuenotes: + st_find = issuenotes.find('CVDB') + tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip() + if tmp_issuenotes_id.isdigit(): + issuenotes_id = tmp_issuenotes_id + logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']') + else: + logger.fdebug('[IMPORT-CBZ] Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.') + + logger.fdebug('[IMPORT-CBZ] Adding ' + comicname + ' to the import-queue!') + #impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss) + impid = str(random.randint(1000000,99999999)) + logger.fdebug('[IMPORT-CBZ] impid: ' + str(impid)) + #make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids) + issuepopulated = False + if cvinfo_CID is None: + if issuenotes_id is None: + logger.info('[IMPORT-CBZ] No ComicID detected where it should be. Bypassing this metadata entry and going the parsing route [' + comfilename + ']') + else: + #we need to store the impid here as well so we can look it up. + issueid_list.append({'issueid': issuenotes_id, + 'importinfo': {'impid': impid, + 'comicid': None, + 'comicname': comicname, + 'dynamicname': as_dyninfo['mod_seriesname'], + 'comicyear': issueyear, + 'issuenumber': issue_number, + 'volume': issuevolume, + 'comfilename': comfilename, + 'comlocation': comlocation.decode(mylar.SYS_ENCODING)} + }) + mylar.IMPORT_CID_COUNT +=1 + issuepopulated = True + + if issuepopulated == False: + if cvscanned_loc == os.path.dirname(comlocation): + cv_cid = cvinfo_CID + logger.fdebug('[IMPORT-CBZ] CVINFO_COMICID attached : ' + str(cv_cid)) + else: + cv_cid = None + import_by_comicids.append({ + "impid": impid, + "comicid": cv_cid, + "watchmatch": None, + "displayname": mod_series, + "comicname": comicname, + "dynamicname": as_dyninfo['mod_seriesname'], + "comicyear": issueyear, + "issuenumber": issue_number, + "volume": issuevolume, + "issueid": issuenotes_id, + "comfilename": comfilename, + "comlocation": comlocation.decode(mylar.SYS_ENCODING) + }) + + mylar.IMPORT_CID_COUNT +=1 + else: + pass + #logger.fdebug(i['ComicFilename'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags') if issueinfo is None: if i['parsedinfo']['issue_number'] is None: From 4630a5b6e679f7249d6d937bc22dfd93b7cbc761 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 24 Mar 2017 11:42:41 -0400 Subject: [PATCH 14/15] FIX: Changed CV url to https to get ahead of the change (verify option is available in config.ini as cv_verify), FIX:(#1598) Renaming comics with alphanumeric characters was not working, FIX: Story-arc issue status were not getting exact status as watchlisted issues during an add/refresh, FIX: Fix some problems related to not using a story arc directory or grab-bag directory (just monitoring) --- data/interfaces/default/storyarc_detail.html | 6 +- mylar/__init__.py | 7 +- mylar/cv.py | 3 +- mylar/helpers.py | 6 +- mylar/importer.py | 4 +- mylar/mb.py | 6 +- mylar/webserve.py | 165 +++++++++++-------- 7 files changed, 117 insertions(+), 80 deletions(-) diff --git a/data/interfaces/default/storyarc_detail.html b/data/interfaces/default/storyarc_detail.html index 0bb856bf..778e4ade 100755 --- a/data/interfaces/default/storyarc_detail.html +++ b/data/interfaces/default/storyarc_detail.html @@ -75,8 +75,8 @@ ComicName Issue Pub DateStatusOptionsStatusOptions
' + ti += '' + ti += '
' ti += '' ti += '' ti += '' ti += '
' ti += '
' + torrent_name + '

' - ti += '
' + torrent_info['hash'] + '
' + if torrent_info['completed'] and download is True: + ti += '
AUTO-SNATCH ENABLED: ' + torrent_info['snatch_status'] + '
Hash: ' + torrent_info['hash'] + '
Location: ' + os.path.join(torrent_info['folder'], torrent_name) + '
Filesize: ' + torrent_info['filesize'] + '
' + torrent_info['download'] + ' DOWN / ' + torrent_info['upload'] + ' UP
Ratio: ' + str(torrent_info['ratio']) + '
Seedtime: ' + torrent_info['seedtime'] + '
' ti += '
' + else: torrent_name = 'Not Found' ti = 'Torrent not found (' + str(torrent_hash) + return ti torrentit.exposed = True From e7f4a5b8adcf5e2266dc93efa85810ccb920ec57 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 24 Mar 2017 11:54:18 -0400 Subject: [PATCH 15/15] FIX: Typo and removed unused module --- mylar/albumart.py | 35 ----------------------------------- mylar/mb.py | 2 +- 2 files changed, 1 insertion(+), 36 deletions(-) delete mode 100755 mylar/albumart.py diff --git a/mylar/albumart.py b/mylar/albumart.py deleted file mode 100755 index 39f88c7f..00000000 --- a/mylar/albumart.py +++ /dev/null @@ -1,35 +0,0 @@ -# This file is part of Headphones. -# -# Headphones is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Headphones is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Headphones. If not, see . - -from mylar import db - - -def getCachedArt(albumid): - - from mylar import cache - - c = cache.Cache() - - artwork_path = c.get_artwork_from_cache(ComicID=comicid) - - if not artwork_path: - return None - - if artwork_path.startswith('http://'): - artwork = urllib.urlopen(artwork_path).read() - return artwork - else: - artwork = open(artwork_path, "r").read() - return artwork diff --git a/mylar/mb.py b/mylar/mb.py index aa799cbb..2832ce38 100755 --- a/mylar/mb.py +++ b/mylar/mb.py @@ -70,7 +70,7 @@ def pullsearch(comicapi, comicquery, offset, explicit, type): payload = None try: - r = requests.get(PULLURL, params=payload, verify=mylar.CV_VERIYF, headers=mylar.CV_HEADERS) + r = requests.get(PULLURL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception, e: logger.warn('Error fetching data from ComicVine: %s' % (e)) return