From 52788daf26160b3f1b363ab2850125af245d2b39 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 12:10:28 -0500 Subject: [PATCH 01/54] FIX:(#2163) Disabled configuration options on the Story Arc Details page since they can only be set from the Story Arc main page, FIX: Removed Update button from Story Arc Detail pages as was no longer needed --- data/interfaces/default/storyarc_detail.html | 10 +++++++--- data/interfaces/default/storyarc_detail.poster.html | 7 +++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/data/interfaces/default/storyarc_detail.html b/data/interfaces/default/storyarc_detail.html index ad737ccb..687c5ca3 100755 --- a/data/interfaces/default/storyarc_detail.html +++ b/data/interfaces/default/storyarc_detail.html @@ -60,14 +60,14 @@

--> -
+
<% if mylar.CONFIG.STORYARCDIR: carcdir = 'StoryArc' else: carcdir = 'GrabBag' %> - + @@ -75,7 +75,10 @@
+   +

${storyarcname}

@@ -111,8 +114,9 @@
+ diff --git a/data/interfaces/default/storyarc_detail.poster.html b/data/interfaces/default/storyarc_detail.poster.html index 3b88cf53..3026a70d 100755 --- a/data/interfaces/default/storyarc_detail.poster.html +++ b/data/interfaces/default/storyarc_detail.poster.html @@ -62,7 +62,7 @@ %endif
- + <% if mylar.CONFIG.STORYARCDIR: carcdir = 'StoryArc' @@ -71,12 +71,15 @@ %>
- +
+   +
From ba011cc65904aa48e48d0ea07f6ec7e86406cf21 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 14:23:04 -0500 Subject: [PATCH 02/54] FIX:(#1519) Allow for scanning/checking of files whose issue numbers indicate seasons (must be numbered as #Winter, #Summer, #Spring, or #Fall) --- mylar/filechecker.py | 11 +++++++++-- mylar/helpers.py | 12 ++++++++++-- mylar/importer.py | 13 +++++++++++-- mylar/updater.py | 1 + 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/mylar/filechecker.py b/mylar/filechecker.py index eb373d06..88f00962 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -390,7 +390,7 @@ class FileChecker(object): lastmod_position = 0 booktype = 'issue' #exceptions that are considered alpha-numeric issue numbers - exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU') + exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU', 'SUMMER', 'SPRING', 'FALL', 'WINTER') #unicode characters, followed by int value # num_exceptions = [{iss:u'\xbd',val:.5},{iss:u'\xbc',val:.25}, {iss:u'\xe',val:.75}, {iss:u'\221e',val:'infinity'}] @@ -444,7 +444,14 @@ class FileChecker(object): 'position': split_file.index(sf), 'mod_position': self.char_file_position(modfilename, sf, lastmod_position), 'validcountchk': validcountchk}) - + else: + test_position = modfilename[self.char_file_position(modfilename, sf,lastmod_position)-1] + if test_position == '#': + possible_issuenumbers.append({'number': sf, + 'position': split_file.index(sf), + 'mod_position': self.char_file_position(modfilename, sf, lastmod_position), + 'validcountchk': validcountchk}) + if sf == 'XCV': # new 2016-09-19 \ attempt to check for XCV which replaces any unicode above for x in list(wrds): diff --git a/mylar/helpers.py b/mylar/helpers.py index 17223ff2..852421ca 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -1092,8 +1092,16 @@ def issuedigits(issnum): a+=1 int_issnum = (int(issno) * 1000) + ordtot elif invchk == "true": - logger.fdebug('this does not have an issue # that I can parse properly.') - return 999999999999999 + if any([issnum.lower() == 'fall', issnum.lower() == 'spring', issnum.lower() == 'summer', issnum.lower() == 'winter']): + inu = 0 + ordtot = 0 + while (inu < len(issnum)): + ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty + inu+=1 + int_issnum = ordtot + else: + logger.fdebug('this does not have an issue # that I can parse properly.') + return 999999999999999 else: if issnum == '9-5': issnum = u'9\xbd' diff --git a/mylar/importer.py b/mylar/importer.py index e6717b1e..bdbfcaa4 100644 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -1195,8 +1195,17 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call a+=1 int_issnum = (int(issno) * 1000) + ordtot elif invchk == "true": - logger.fdebug('this does not have an issue # that I can parse properly.') - return + if any([issnum.lower() == 'fall 2005', issnum.lower() == 'spring 2005', issnum.lower() == 'summer 2006', issnum.lower() == 'winter 2009']): + issnum = re.sub('[0-9]+', '', issnum).strip() + inu = 0 + ordtot = 0 + while (inu < len(issnum)): + ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty + inu+=1 + int_issnum = ordtot + else: + logger.fdebug('this does not have an issue # that I can parse properly.') + return else: if int_issnum is not None: pass diff --git a/mylar/updater.py b/mylar/updater.py index d63c7c22..cd88e380 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -1119,6 +1119,7 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False): while True: try: reiss = reissues[n] + int_iss = None except IndexError: break int_iss = helpers.issuedigits(reiss['Issue_Number']) From 0fe9a5a800f5d5b9deb44109ec4b47aa4e68c3ab Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 14:32:37 -0500 Subject: [PATCH 03/54] IMP: Added DDL option to available download provider options. --- data/interfaces/default/config.html | 2 - mylar/getcomics.py | 274 ++++++++++++++++++++++++++++ mylar/search.py | 27 ++- mylar/webserve.py | 2 +- 4 files changed, 294 insertions(+), 11 deletions(-) create mode 100644 mylar/getcomics.py diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 1f649f42..6db9f27f 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -763,13 +763,11 @@ Note: this is an experimental search - results may be better/worse. -
Torrents diff --git a/mylar/getcomics.py b/mylar/getcomics.py new file mode 100644 index 00000000..ecf20b41 --- /dev/null +++ b/mylar/getcomics.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# This file is part of Mylar. +# +# Mylar is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Mylar is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Mylar. If not, see . + +from StringIO import StringIO +import urllib +from threading import Thread +from Queue import Queue +import os +import sys +import re +import gzip +import time +import datetime +import json +from bs4 import BeautifulSoup +import requests +import cfscrape +import mylar +from mylar import logger + +class GC(object): + + def __init__(self, query): + + self.queue = Queue() + + self.valreturn = [] + + self.url = 'https://getcomics.info' + + self.query = query + + self.local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, "getcomics.html") + + self.headers = {'Accept-encoding': 'gzip', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Referer': 'https://getcomics.info/'} + + def search(self): + + with cfscrape.create_scraper() as s: + cf_cookievalue, cf_user_agent = s.get_tokens(self.url, headers=self.headers) + + t = s.get(self.url+'/', params={'s': self.query}, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True) + + with open(self.local_filename, 'wb') as f: + for chunk in t.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() + + return self.search_results() + + def loadsite(self, title, link): + with cfscrape.create_scraper() as s: + self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers) + + t = s.get(link, verify=True, cookies=self.cf_cookievalue, headers=self.headers, stream=True) + + with open(title+'.html', 'wb') as f: + for chunk in t.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() + + def search_results(self): + results = {} + resultlist = [] + soup = BeautifulSoup(open(self.local_filename), 'html.parser') + + resultline = soup.find("span", {"class": "cover-article-count"}).get_text(strip=True) + logger.info('There are %s results' % re.sub('Articles', '', resultline).strip()) + + for f in soup.findAll("article"): + id = f['id'] + lk = f.find('a') + link = lk['href'] + titlefind = f.find("h1", {"class": "post-title"}) + title = titlefind.get_text(strip=True) + option_find = f.find("p", {"style": "text-align: center;"}) + i = 0 + while i <= 2: + option_find = option_find.findNext(text=True) + if 'Year' in option_find: + year = option_find.findNext(text=True) + year = re.sub('|', '', year).strip() + else: + size = option_find.findNext(text=True) + if 'MB' in size: + size = re.sub('MB', 'M', size).strip() + elif 'GB' in size: + size = re.sub('GB', 'G', size).strip() + i+=1 + dateline = f.find('time') + datefull = dateline['datetime'] + datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d")) + resultlist.append({"title": title, + "pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'), + "size": re.sub(' ', '', size).strip(), + "link": link, + "year": year, + "id": re.sub('post-', '', id).strip(), + "site": 'DDL'}) + + logger.fdebug('%s [%s]' % (title, size)) + + results['entries'] = resultlist + + return results + #self.loadsite(title, link) + #self.parse_downloadresults(title) + + def parse_downloadresults(self, title): + + soup = BeautifulSoup(open(title+'.html'), 'html.parser') + orig_find = soup.find("p", {"style": "text-align: center;"}) + i = 0 + option_find = orig_find + while True: #i <= 10: + prev_option = option_find + option_find = option_find.findNext(text=True) + if i == 0: + series = option_find + elif 'Year' in option_find: + year = option_find.findNext(text=True) + else: + if 'Size' in prev_option: + size = option_find #.findNext(text=True) + possible_more = orig_find.next_sibling + break + i+=1 + + logger.fdebug('%s [%s] / %s' % (series, year, size)) + + link = None + for f in soup.findAll("div", {"class": "aio-pulse"}): + lk = f.find('a') + if lk['title'] == 'Download Now': + link = lk['href'] + site = lk['title'] + break #get the first link just to test + + if link is None: + logger.warn('Unable to retrieve any valid immediate download links. They might not exist.') + return + + links = [] + + if possible_more.name == 'ul': + bb = possible_more.findAll('li') + for x in bb: + volume = x.findNext(text=True) + if u'\u2013' in volume: + volume = re.sub(u'\u2013', '-', volume) + linkline = x.find('a') + link = linkline['href'] + site = linkline.findNext(text=True) + links.append({"volume": volume, + "site": site, + "link": link}) + else: + check_extras = soup.findAll("h3") + for sb in check_extras: + header = sb.findNext(text=True) + if header == 'TPBs': + nxt = sb.next_sibling + if nxt.name == 'ul': + bb = nxt.findAll('li') + for x in bb: + volume = x.findNext(text=True) + if u'\u2013' in volume: + volume = re.sub(u'\u2013', '-', volume) + linkline = x.find('a') + link = linkline['href'] + site = linkline.findNext(text=True) + links.append({"volume": volume, + "site": site, + "link": link}) + + if link is None: + logger.warn('Unable to retrieve any valid immediate download links. They might not exist.') + return + + for x in links: + logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link'])) + + thread_ = Thread(target=self.downloadit, args=[link]) + thread_.start() + thread_.join() + chk = self.queue.get() + while True: + if chk[0]['mode'] == 'stop': + return {"filename": chk[0]['filename'], + "status": 'fail'} + elif chk[0]['mode'] == 'success': + try: + if os.path.isfile(os.path.join(mylar.CONFIG.DDL_LOCATION, chk[0]['filename'])): + logger.fdebug('Finished downloading %s [%s]' % (path, size)) + except: + pass + return {"filename": chk[0]['filename'], + "status": 'success'} + + def downloadit(self, link): + filename = None + try: + t = requests.get(link, verify=True, cookies=self.cf_cookievalue, headers=self.headers, stream=True) + + filename = os.path.basename(urllib.unquote(t.url).decode('utf-8')) + + path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename) + + if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip': + buf = StringIO(t.content) + f = gzip.GzipFile(fileobj=buf) + + + with open(path, 'wb') as f: + for chunk in t.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() + except: + self.valreturn.append({"mode": "stop", + "filename": filename}) + return self.queue.put(self.valreturn) + + else: + self.valreturn.append({"mode": "success", + "filename": filename}) + return self.queue.put(self.valreturn) + + def issue_list(self, pack): + #packlist = [x.strip() for x in pack.split(',)] + packlist = pack.replace('+', ' ').replace(',', ' ').split() + print packlist + plist = [] + pack_issues = [] + for pl in packlist: + if '-' in pl: + plist.append(range(int(pl[:pl.find('-')]),int(pl[pl.find('-')+1:])+1)) + else: + if 'TPBs' not in pl: + plist.append(int(pl)) + else: + plist.append('TPBs') + + for pi in plist: + if type(pi) == list: + for x in pi: + pack_issues.append(x) + else: + pack_issues.append(pi) + + pack_issues.sort() + print "pack_issues: %s" % pack_issues + +#if __name__ == '__main__': +# ab = GC(sys.argv[1]) #'justice league aquaman') #sys.argv[0]) +# #c = ab.search() +# b = ab.loadsite('test', sys.argv[2]) +# c = ab.parse_downloadresults('test', '60MB') +# #c = ab.issue_list(sys.argv[2]) diff --git a/mylar/search.py b/mylar/search.py index d98c8fc4..5aeef710 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -16,7 +16,7 @@ from __future__ import division import mylar -from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker, auth32p, sabnzbd, nzbget, wwt #, getcomics +from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker, auth32p, sabnzbd, nzbget, wwt, getcomics import feedparser import requests @@ -181,10 +181,14 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD #fix for issue dates between Nov-Dec/(Jan-Feb-Mar) IssDt = str(IssueDate)[5:7] - if IssDt == "12" or IssDt == "11" or IssDt == "01" or IssDt == "02" or IssDt == "03": + if any([IssDt == "12", IssDt == "11", IssDt == "01", IssDt == "02", IssDt == "03"]): IssDateFix = IssDt else: IssDateFix = "no" + if StoreDate is not None: + StDt = str(StoreDate)[5:7] + if any([StDt == "10", StDt == "12", StDt == "11", StDt == "01", StDt == "02", StDt == "03"]): + IssDateFix = StDt searchcnt = 0 srchloop = 1 @@ -615,9 +619,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if nzbprov == 'ddl': cmname = re.sub("%20", " ", str(comsrc)) logger.fdebug('Sending request to DDL site for : %s %s' % (findcomic, isssearch)) - #b = getcomics.GC(query=findcomic + ' ' + isssearch) - #bb = b.search() - logger.info('bb returned from DDL: %s' % bb) + b = getcomics.GC(query=findcomic + ' ' + isssearch) + bb = b.search() + #logger.info('bb returned from DDL: %s' % bb) elif RSS == "yes": if nzbprov == '32P' or nzbprov == 'Public Torrents': cmname = re.sub("%20", " ", str(comsrc)) @@ -2293,9 +2297,16 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc sendsite = ggc.loadsite(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid)) logger.info("ddl status response: %s" % ddl_it) - if ddl_it[0]['status'] == 'success': - nzbname = ddl_it[0]['filename'] - logger.info('Successfully retrieved %s from DDL site' % (nzbname)) + if ddl_it['status'] == 'success': + nzbname = ddl_it['filename'] + logger.info('Successfully retrieved %s from DDL site. Now submitting for post-processing...' % (nzbname)) + mylar.PP_QUEUE.put({'nzb_name': nzbname, + 'nzb_folder': mylar.CONFIG.DDL_LOCATION, + 'issueid': IssueID, + 'failed': False, + 'comicid': ComicID, + 'apicall': True}) + sent_to = "is downloading it directly via DDL" elif mylar.USE_BLACKHOLE and all([nzbprov != '32P', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != 'torznab']): diff --git a/mylar/webserve.py b/mylar/webserve.py index 3ed987cd..0952cf4e 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -5064,7 +5064,7 @@ class WebInterface(object): 'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify', 'prowl_enabled', 'prowl_onsnatch', 'nma_enabled', 'nma_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled', 'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch', - 'opds_enable', 'opds_authentication', 'opds_metainfo'] #, 'enable_ddl'] + 'opds_enable', 'opds_authentication', 'opds_metainfo', 'enable_ddl'] for checked_config in checked_configs: if checked_config not in kwargs: From 2509c43b2ebb05ef4b18c57a00f4d875b0a8fed6 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 15:01:33 -0500 Subject: [PATCH 04/54] FIX: Fix for using DDL and attempting to poll against RSS, FIX: Fix for using DDL and having it iterate over all possible permutations of the issue number instead of just using one --- mylar/search.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mylar/search.py b/mylar/search.py index 5aeef710..d29692dc 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -330,10 +330,13 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD #sure it's not disabled (it gets auto-disabled on maxing out the API hits) prov_count+=1 continue - elif all([searchprov == '32P', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]): + elif all([searchprov == '32P', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]): prov_count+=1 continue if searchmode == 'rss': + if searchprov.lower() == 'ddl': + prov_count+=1 + continue findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate, booktype=booktype) if findit['status'] is False: if AlternateSearch is not None and AlternateSearch != "None": @@ -355,7 +358,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD else: findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate, booktype=booktype) - if all([searchprov == '32P', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]) or all([searchprov == 'experimental', checked_once is False]): + if all([searchprov == '32P', checked_once is False]) or all([searchprov.lower() == 'ddl', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]) or all([searchprov == 'experimental', checked_once is False]): checked_once = True if findit['status'] is False: if AlternateSearch is not None and AlternateSearch != "None": From 4af50e44a2259cce6593e7da981c74b9e8e9df34 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 16:03:33 -0500 Subject: [PATCH 05/54] FIX: removal of the 'i:' logging line when searching --- mylar/search.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mylar/search.py b/mylar/search.py index d29692dc..21ad6d31 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1023,7 +1023,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa i = 1 while i <= 1: - logger.info('i: %s' % i) if i == 0: usedate = digitaldate else: From 74767215fb9cb35291f3df845a3194b1b4bf0c5e Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 17:09:51 -0500 Subject: [PATCH 06/54] Various fixes for DDL option --- mylar/PostProcessor.py | 51 +++++++++++++++++++++++++----------------- mylar/api.py | 8 ++++++- mylar/getcomics.py | 3 ++- mylar/helpers.py | 8 +++++-- mylar/nzbget.py | 3 ++- mylar/process.py | 5 +++-- mylar/sabnzbd.py | 6 +++-- mylar/search.py | 19 +++++++++++----- 8 files changed, 67 insertions(+), 36 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index fa9d0929..8dc38ebd 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -45,7 +45,7 @@ class PostProcessor(object): FOLDER_NAME = 2 FILE_NAME = 3 - def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False): + def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False ,dll=False): """ Creates a new post processor with the given file path and optionally an NZB name. @@ -72,6 +72,11 @@ class PostProcessor(object): else: self.apicall = False + if ddl is True: + self.ddl = True + else: + self.ddl = False + if mylar.CONFIG.FILE_OPTS == 'copy': self.fileop = shutil.copy else: @@ -344,27 +349,31 @@ class PostProcessor(object): self._log("nzb folder: " + self.nzb_folder) logger.fdebug(module + ' nzb name: ' + self.nzb_name) logger.fdebug(module + ' nzb folder: ' + self.nzb_folder) - if mylar.USE_SABNZBD==0: - logger.fdebug(module + ' Not using SABnzbd') - elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run': - logger.fdebug(module + ' Not using SABnzbd : Manual Run') - else: - # if the SAB Directory option is enabled, let's use that folder name and append the jobname. - if all([mylar.CONFIG.SAB_TO_MYLAR, mylar.CONFIG.SAB_DIRECTORY is not None, mylar.CONFIG.SAB_DIRECTORY != 'None']): - self.nzb_folder = os.path.join(mylar.CONFIG.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) - logger.fdebug(module + ' SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder) + if self.ddl is False: + if mylar.USE_SABNZBD==0: + logger.fdebug(module + ' Not using SABnzbd') + elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run': + logger.fdebug(module + ' Not using SABnzbd : Manual Run') + else: + # if the SAB Directory option is enabled, let's use that folder name and append the jobname. + if all([mylar.CONFIG.SAB_TO_MYLAR, mylar.CONFIG.SAB_DIRECTORY is not None, mylar.CONFIG.SAB_DIRECTORY != 'None']): + self.nzb_folder = os.path.join(mylar.CONFIG.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) + logger.fdebug(module + ' SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder) + + if mylar.USE_NZBGET==1: + if self.nzb_name != 'Manual Run': + logger.fdebug(module + ' Using NZBGET') + logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) + # if the NZBGet Directory option is enabled, let's use that folder name and append the jobname. + if self.nzb_name == 'Manual Run': + logger.fdebug(module + ' Manual Run Post-Processing enabled.') + elif all([mylar.CONFIG.NZBGET_DIRECTORY is not None, mylar.CONFIG.NZBGET_DIRECTORY is not 'None']): + logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) + self.nzb_folder = os.path.join(mylar.CONFIG.NZBGET_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) + logger.fdebug(module + ' NZBGET Download folder option enabled. Directory set to : ' + self.nzb_folder) + else: + logger.fdebug('%s Now performing post-processing of %s sent from DDL' % (module, nzb_name)) - if mylar.USE_NZBGET==1: - if self.nzb_name != 'Manual Run': - logger.fdebug(module + ' Using NZBGET') - logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) - # if the NZBGet Directory option is enabled, let's use that folder name and append the jobname. - if self.nzb_name == 'Manual Run': - logger.fdebug(module + ' Manual Run Post-Processing enabled.') - elif all([mylar.CONFIG.NZBGET_DIRECTORY is not None, mylar.CONFIG.NZBGET_DIRECTORY is not 'None']): - logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) - self.nzb_folder = os.path.join(mylar.CONFIG.NZBGET_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) - logger.fdebug(module + ' NZBGET Download folder option enabled. Directory set to : ' + self.nzb_folder) myDB = db.DBConnection() self.oneoffinlist = False diff --git a/mylar/api.py b/mylar/api.py index 1e898df3..9cf6ad44 100644 --- a/mylar/api.py +++ b/mylar/api.py @@ -358,6 +358,11 @@ class Api(object): else: comicid = kwargs['comicid'] + if 'ddl' not in kwargs: + ddl = False + else: + ddl = True + if 'apc_version' not in kwargs: logger.info('Received API Request for PostProcessing %s [%s]. Queueing...' % (self.nzb_name, self.nzb_folder)) mylar.PP_QUEUE.put({'nzb_name': self.nzb_name, @@ -365,7 +370,8 @@ class Api(object): 'issueid': issueid, 'failed': failed, 'comicid': comicid, - 'apicall': True}) + 'apicall': True, + 'ddl': ddl}) self.data = 'Successfully submitted request for post-processing for %s' % self.nzb_name #fp = process.Process(self.nzb_name, self.nzb_folder, issueid=issueid, failed=failed, comicid=comicid, apicall=True) #self.data = fp.post_process() diff --git a/mylar/getcomics.py b/mylar/getcomics.py index ecf20b41..59615723 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -134,6 +134,7 @@ class GC(object): series = option_find elif 'Year' in option_find: year = option_find.findNext(text=True) + year = re.sub('|', '', year).strip() else: if 'Size' in prev_option: size = option_find #.findNext(text=True) @@ -141,7 +142,7 @@ class GC(object): break i+=1 - logger.fdebug('%s [%s] / %s' % (series, year, size)) + logger.fdebug('Now downloading: %s [%s] / %s ... this can take a while (go get some take-out)...' % (series, year, size)) link = None for f in soup.findAll("div", {"class": "aio-pulse"}): diff --git a/mylar/helpers.py b/mylar/helpers.py index 852421ca..95fb192e 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -3040,7 +3040,10 @@ def postprocess_main(queue): break if mylar.APILOCK is False: - pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall']) + try: + pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'], item['ddl']) + except: + pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall']) pp = pprocess.post_process() time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing @@ -3122,7 +3125,8 @@ def nzb_monitor(queue): 'failed': nzstat['failed'], 'issueid': nzstat['issueid'], 'comicid': nzstat['comicid'], - 'apicall': nzstat['apicall']}) + 'apicall': nzstat['apicall'], + 'ddl': False}) #cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed']) #nzpp = cc.post_process() except Exception as e: diff --git a/mylar/nzbget.py b/mylar/nzbget.py index ee6c2100..01fa73eb 100644 --- a/mylar/nzbget.py +++ b/mylar/nzbget.py @@ -228,7 +228,8 @@ class NZBGet(object): 'failed': False, 'issueid': nzbinfo['issueid'], 'comicid': nzbinfo['comicid'], - 'apicall': True} + 'apicall': True, + 'ddl': False} else: logger.warn('Could not find completed NZBID %s in history' % nzbid) return {'status': False} diff --git a/mylar/process.py b/mylar/process.py index 0e50b444..4063030c 100644 --- a/mylar/process.py +++ b/mylar/process.py @@ -21,13 +21,14 @@ import logger class Process(object): - def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False): + def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False, ddl=False): self.nzb_name = nzb_name self.nzb_folder = nzb_folder self.failed = failed self.issueid = issueid self.comicid = comicid self.apicall = apicall + self.ddl = ddl def post_process(self): if self.failed == '0': @@ -39,7 +40,7 @@ class Process(object): retry_outside = False if self.failed is False: - PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall) + PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall, ddl=self.ddl) if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.apicall is True, self.issueid is not None]): threading.Thread(target=PostProcess.Process).start() else: diff --git a/mylar/sabnzbd.py b/mylar/sabnzbd.py index 22e81153..c330e81c 100644 --- a/mylar/sabnzbd.py +++ b/mylar/sabnzbd.py @@ -131,7 +131,8 @@ class SABnzbd(object): 'failed': False, 'issueid': nzbinfo['issueid'], 'comicid': nzbinfo['comicid'], - 'apicall': True} + 'apicall': True, + 'ddl': False} break else: logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage']) @@ -152,7 +153,8 @@ class SABnzbd(object): 'failed': True, 'issueid': sendresponse['issueid'], 'comicid': sendresponse['comicid'], - 'apicall': True} + 'apicall': True, + 'ddl': False} break break diff --git a/mylar/search.py b/mylar/search.py index 21ad6d31..d825cff4 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1463,10 +1463,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa links = entry['link'] searchresult = searcher(nzbprov, nzbname, mylar.COMICINFO, links, IssueID, ComicID, tmpprov, newznab=newznab_host, torznab=torznab_host, rss=RSS) - if searchresult == 'downloadchk-fail' or searchresult == 'double-pp': + if any([searchresult == 'downloadchk-fail', searchresult == 'double-pp']): foundc['status'] = False continue - elif searchresult == 'torrent-fail' or searchresult == 'nzbget-fail' or searchresult == 'sab-fail' or searchresult == 'blackhole-fail': + elif any([searchresult == 'torrent-fail', searchresult == 'nzbget-fail', searchresult == 'sab-fail', searchresult == 'blackhole-fail', searchresult == 'ddl-fail']): foundc['status'] = False return foundc @@ -1498,9 +1498,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if 'Public Torrents' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']): tmpprov = re.sub('Public Torrents', nzbprov, tmpprov) foundcomic.append("yes") - - if mylar.COMICINFO[0]['pack']: - issinfo = mylar.COMICINFO[0]['pack_issuelist'] + logger.info('mylar.COMICINFO: %s' % mylar.COMICINFO) + if mylar.COMICINFO[0]['pack'] is True: + try: + issinfo = mylar.COMICINFO[0]['pack_issuelist'] + except: + issinfo = mylar.COMICINFO['pack_issuelist'] if issinfo is not None: #we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack. logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname)) @@ -2307,7 +2310,11 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc 'issueid': IssueID, 'failed': False, 'comicid': ComicID, - 'apicall': True}) + 'apicall': True, + 'ddl': True}) + else: + logger.info('Failed to retrieve %s from the DDL site.' %s (nzbname)) + return "ddl-fail" sent_to = "is downloading it directly via DDL" From d08b56a4fffb5d957056f0f813a422f8acae5b61 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 16 Jan 2019 17:43:38 -0500 Subject: [PATCH 07/54] FIX: Fix for manual post-processing failing due to previous commits --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 8dc38ebd..8b3aade6 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -45,7 +45,7 @@ class PostProcessor(object): FOLDER_NAME = 2 FILE_NAME = 3 - def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False ,dll=False): + def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False ,ddl=False): """ Creates a new post processor with the given file path and optionally an NZB name. From 464c8c69b135f11b2f92c998f828fb450fbc558e Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 17 Jan 2019 09:25:17 -0500 Subject: [PATCH 08/54] FIX: small fix for DDL post-processing --- mylar/PostProcessor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 8b3aade6..eb8ce219 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -45,7 +45,7 @@ class PostProcessor(object): FOLDER_NAME = 2 FILE_NAME = 3 - def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False ,ddl=False): + def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False, ddl=False): """ Creates a new post processor with the given file path and optionally an NZB name. @@ -372,7 +372,7 @@ class PostProcessor(object): self.nzb_folder = os.path.join(mylar.CONFIG.NZBGET_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) logger.fdebug(module + ' NZBGET Download folder option enabled. Directory set to : ' + self.nzb_folder) else: - logger.fdebug('%s Now performing post-processing of %s sent from DDL' % (module, nzb_name)) + logger.fdebug('%s Now performing post-processing of %s sent from DDL' % (module, self.nzb_name)) myDB = db.DBConnection() From 319d7f7f547a91d068299b04379d0c08a861f7e5 Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 17 Jan 2019 13:22:36 -0500 Subject: [PATCH 09/54] FIX: more general fixes for DDL option, IMP: Added DDL Queue so items hitting the DDL provider option will be queued in sequence for downloading & immediate post-processing thereafter --- mylar/__init__.py | 40 +++++++++++++++++++- mylar/getcomics.py | 94 ++++++++++++++++++++++++---------------------- mylar/helpers.py | 34 ++++++++++++++++- mylar/search.py | 16 ++------ 4 files changed, 124 insertions(+), 60 deletions(-) diff --git a/mylar/__init__.py b/mylar/__init__.py index cba0acc0..98c45076 100644 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -123,10 +123,13 @@ USE_WATCHDIR = False SNPOOL = None NZBPOOL = None SEARCHPOOL = None +PPPOOL = None +DDLPOOL = None SNATCHED_QUEUE = Queue.Queue() NZB_QUEUE = Queue.Queue() PP_QUEUE = Queue.Queue() SEARCH_QUEUE = Queue.Queue() +DDL_QUEUE = Queue.Queue() SEARCH_TIER_DATE = None COMICSORT = None PULLBYFILE = False @@ -142,6 +145,7 @@ LOCAL_IP = None DOWNLOAD_APIKEY = None APILOCK = False SEARCHLOCK = False +DDL_LOCK = False CMTAGGER_PATH = None STATIC_COMICRN_VERSION = "1.01" STATIC_APC_VERSION = "2.04" @@ -162,11 +166,11 @@ def initialize(config_file): with INIT_LOCK: global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \ - DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \ + DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, DDL_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \ IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \ USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \ PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \ - MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, LOG_LEVEL, \ + MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, DDL_LOCK, LOG_LEVEL, \ SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO, SEARCH_TIER_DATE cc = mylar.config.Config(config_file) @@ -367,6 +371,9 @@ def start(): search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60) - (duration_diff*60))) logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL)) SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC')) + else: + ss = searchit.CurrentSearcher() + SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=None, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC')) if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]): logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....') @@ -396,6 +403,12 @@ def start(): PPPOOL.start() logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....') + if CONFIG.ENABLE_DDL is True: + logger.info('[DDL-QUEUE] DDL Download queue enabled & monitoring for requests....') + DDLPOOL = threading.Thread(target=helpers.ddl_downloader, args=(DDL_QUEUE,), name="DDL-QUEUE") + DDLPOOL.start() + logger.info('[DDL-QUEUE] Succesfully started DDL Download Queuer....') + helpers.latestdate_fix() if CONFIG.ALT_PULL == 2: @@ -1223,6 +1236,29 @@ def halt(): SEARCHPOOL.join(5) except AssertionError: os._exit(0) + + if PPPOOL is not None: + logger.info('Terminating the post-processing queue thread.') + try: + PPPOOL.join(10) + logger.info('Joined pool for termination - successful') + except KeyboardInterrupt: + PP_QUEUE.put('exit') + PPPOOL.join(5) + except AssertionError: + os._exit(0) + + if DDLPOOL is not None: + logger.info('Terminating the DDL download queue thread.') + try: + DDLPOOL.join(10) + logger.info('Joined pool for termination - successful') + except KeyboardInterrupt: + DDL_QUEUE.put('exit') + DDLPOOL.join(5) + except AssertionError: + os._exit(0) + _INITIALIZED = False def shutdown(restart=False, update=False, maintenance=False): diff --git a/mylar/getcomics.py b/mylar/getcomics.py index 59615723..052f2fb7 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -17,7 +17,6 @@ from StringIO import StringIO import urllib from threading import Thread -from Queue import Queue import os import sys import re @@ -28,14 +27,12 @@ import json from bs4 import BeautifulSoup import requests import cfscrape +import logger import mylar -from mylar import logger class GC(object): - def __init__(self, query): - - self.queue = Queue() + def __init__(self, query=None, issueid=None, comicid=None): self.valreturn = [] @@ -43,6 +40,10 @@ class GC(object): self.query = query + self.comicid = comicid + + self.issueid = issueid + self.local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, "getcomics.html") self.headers = {'Accept-encoding': 'gzip', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Referer': 'https://getcomics.info/'} @@ -94,7 +95,7 @@ class GC(object): option_find = option_find.findNext(text=True) if 'Year' in option_find: year = option_find.findNext(text=True) - year = re.sub('|', '', year).strip() + year = re.sub('\|', '', year).strip() else: size = option_find.findNext(text=True) if 'MB' in size: @@ -118,10 +119,8 @@ class GC(object): results['entries'] = resultlist return results - #self.loadsite(title, link) - #self.parse_downloadresults(title) - def parse_downloadresults(self, title): + def parse_downloadresults(self, title, mainlink): soup = BeautifulSoup(open(title+'.html'), 'html.parser') orig_find = soup.find("p", {"style": "text-align: center;"}) @@ -191,56 +190,61 @@ class GC(object): if link is None: logger.warn('Unable to retrieve any valid immediate download links. They might not exist.') - return + return {'success': False} for x in links: logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link'])) - thread_ = Thread(target=self.downloadit, args=[link]) - thread_.start() - thread_.join() - chk = self.queue.get() - while True: - if chk[0]['mode'] == 'stop': - return {"filename": chk[0]['filename'], - "status": 'fail'} - elif chk[0]['mode'] == 'success': - try: - if os.path.isfile(os.path.join(mylar.CONFIG.DDL_LOCATION, chk[0]['filename'])): - logger.fdebug('Finished downloading %s [%s]' % (path, size)) - except: - pass - return {"filename": chk[0]['filename'], - "status": 'success'} + mylar.DDL_QUEUE.put({'link': link, + 'mainlink': mainlink, + 'series': series, + 'year': year, + 'size': size, + 'comicid': self.comicid, + 'issueid': self.issueid}) + + return {'success': True} + + def downloadit(self, link, mainlink): + if mylar.DDL_LOCK is True: + logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.') + return + else: + mylar.DDL_LOCK = True - def downloadit(self, link): filename = None try: - t = requests.get(link, verify=True, cookies=self.cf_cookievalue, headers=self.headers, stream=True) + with cfscrape.create_scraper() as s: + cf_cookievalue, cf_user_agent = s.get_tokens(mainlink, headers=self.headers) + t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True) - filename = os.path.basename(urllib.unquote(t.url).decode('utf-8')) + filename = os.path.basename(urllib.unquote(t.url).decode('utf-8')) - path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename) + path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename) - if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip': - buf = StringIO(t.content) - f = gzip.GzipFile(fileobj=buf) + if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip': + buf = StringIO(t.content) + f = gzip.GzipFile(fileobj=buf) + with open(path, 'wb') as f: + for chunk in t.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() - with open(path, 'wb') as f: - for chunk in t.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - f.write(chunk) - f.flush() - except: - self.valreturn.append({"mode": "stop", - "filename": filename}) - return self.queue.put(self.valreturn) + except exception as e: + logger.error('[ERROR] %s' % e) + mylar.DDL_LOCK = False + return ({"success": False, + "filename": filename, + "path": None}) else: - self.valreturn.append({"mode": "success", - "filename": filename}) - return self.queue.put(self.valreturn) + mylar.DDL_LOCK = False + if os.path.isfile(path): + return ({"success": True, + "filename": filename, + "path": path}) def issue_list(self, pack): #packlist = [x.strip() for x in pack.split(',)] diff --git a/mylar/helpers.py b/mylar/helpers.py index 95fb192e..ace20ec0 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -37,7 +37,7 @@ from apscheduler.triggers.interval import IntervalTrigger import mylar import logger -from mylar import sabnzbd, nzbget, process +from mylar import sabnzbd, nzbget, process, getcomics def multikeysort(items, columns): @@ -3027,6 +3027,38 @@ def latestdate_update(): logger.info('updating latest date for : ' + a['ComicID'] + ' to ' + a['LatestDate'] + ' #' + a['LatestIssue']) myDB.upsert("comics", newVal, ctrlVal) +def ddl_downloader(queue): + while True: + if mylar.DDL_LOCK is True: + time.sleep(5) + + elif mylar.DDL_LOCK is False and queue.qsize() >= 1: + item = queue.get(True) + logger.info('Now loading request from DDL queue: %s (%s)' % item['series']) + if item == 'exit': + logger.info('Cleaning up workers for shutdown') + break + + ddz = getcomics.GC() + ddzstat = ddz.downloadit(item['link'], item['mainlink']) + + if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]): + logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename'])) + try: + mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'], + 'nzb_folder': ddzstat['path'], + 'failed': False, + 'issueid': item['issueid'], + 'comicid': item['comicid'], + 'apicall': True, + 'ddl': True}) + except Exception as e: + logger.info('process error: %s [%s]' %(e, ddzstat)) + elif mylar.CONFIG.POST_PROCESSING is True: + logger.info('File successfully downloaded. Post Processing is not enabled - item retained here: %s' % os.path.join(ddzstat['path'],ddzstat['filename'])) + else: + logger.info('[Status: %s] Failed to download: %s ' % (ddzstat['success'], ddzstat)) + def postprocess_main(queue): while True: if mylar.APILOCK is True: diff --git a/mylar/search.py b/mylar/search.py index d825cff4..5d8e2d81 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -2298,20 +2298,12 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc sent_to = None t_hash = None if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl': - ggc = getcomics.GC('nope') + ggc = getcomics.GC(issueid=IssueID, comicid=ComicID) sendsite = ggc.loadsite(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) - ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid)) + ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) logger.info("ddl status response: %s" % ddl_it) - if ddl_it['status'] == 'success': - nzbname = ddl_it['filename'] - logger.info('Successfully retrieved %s from DDL site. Now submitting for post-processing...' % (nzbname)) - mylar.PP_QUEUE.put({'nzb_name': nzbname, - 'nzb_folder': mylar.CONFIG.DDL_LOCATION, - 'issueid': IssueID, - 'failed': False, - 'comicid': ComicID, - 'apicall': True, - 'ddl': True}) + if ddl_it['success'] is True: + logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize())) else: logger.info('Failed to retrieve %s from the DDL site.' %s (nzbname)) return "ddl-fail" From 340f4b62b7f766e81d4b402d352ce5a802bc5262 Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 17 Jan 2019 14:42:51 -0500 Subject: [PATCH 10/54] FIX: Fix for lone unicode character being mistakingly represented as an issue number during some post-processing scans, FIX: set INFO logging lines to DEBUG when performing RSS search --- mylar/PostProcessor.py | 6 ++++-- mylar/search.py | 14 +++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index eb8ce219..645316e9 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -612,8 +612,10 @@ class PostProcessor(object): issuechk = myDB.select("SELECT * from issues WHERE ComicID=?", [cs['ComicID']]) if not issuechk: - logger.fdebug('%s No corresponding issue #%s found for %s' % (module, temploc, cs['ComicID'])) - + try: + logger.fdebug('%s No corresponding issue #%s found for %s' % (module, temploc, cs['ComicID'])) + except: + continue #check the last refresh date of the series, and if > than an hr try again: c_date = cs['LastUpdated'] if c_date is None: diff --git a/mylar/search.py b/mylar/search.py index 5d8e2d81..c56b62b0 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -44,7 +44,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD unaltered_ComicName = None if filesafe: if filesafe != ComicName and mode != 'want_ann': - logger.info('[SEARCH] Special Characters exist within Series Title. Enabling search-safe Name : ' + filesafe) + logger.info('[SEARCH] Special Characters exist within Series Title. Enabling search-safe Name : %s' % filesafe) if AlternateSearch is None or AlternateSearch == 'None': AlternateSearch = filesafe else: @@ -60,7 +60,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD if Publisher: if Publisher == 'IDW Publishing': Publisher = 'IDW' - logger.fdebug('Publisher is : ' + Publisher) + logger.fdebug('Publisher is : %s' % Publisher) if IssueArcID and not IssueID: issuetitle = helpers.get_issue_title(IssueArcID) @@ -68,7 +68,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD issuetitle = helpers.get_issue_title(IssueID) if issuetitle: - logger.info('Issue Title given as : ' + issuetitle) + logger.fdebug('Issue Title given as : %s' % issuetitle) else: logger.fdebug('Issue Title not found. Setting to None.') @@ -91,8 +91,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD oneoff = True if SARC: logger.fdebug("Story-ARC Search parameters:") - logger.fdebug("Story-ARC: " + str(SARC)) - logger.fdebug("IssueArcID: " + str(IssueArcID)) + logger.fdebug("Story-ARC: %s" % SARC) + logger.fdebug("IssueArcID: %s" % IssueArcID) torprovider = [] torp = 0 @@ -177,7 +177,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD prov_order, torznab_info, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts, torznab_hosts, ddlprovider) # end provider order sequencing - logger.info('search provider order is ' + str(prov_order)) + logger.fdebug('search provider order is ' + str(prov_order)) #fix for issue dates between Nov-Dec/(Jan-Feb-Mar) IssDt = str(IssueDate)[5:7] @@ -424,7 +424,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD searchprov = mylar.TMP_PROV return findit, searchprov else: - logger.info('findit: %s' % findit) + logger.fdebug('findit: %s' % findit) #if searchprov == '32P': # pass if manualsearch is None: From 2bee5d46574e0f0bc7d0c067d8231444affd356d Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 17 Jan 2019 21:35:27 -0500 Subject: [PATCH 11/54] FIX: When loading item from ddl queue, would throw an error due to an invalid reference --- mylar/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/helpers.py b/mylar/helpers.py index ace20ec0..bd35f4f4 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -3034,7 +3034,7 @@ def ddl_downloader(queue): elif mylar.DDL_LOCK is False and queue.qsize() >= 1: item = queue.get(True) - logger.info('Now loading request from DDL queue: %s (%s)' % item['series']) + logger.info('Now loading request from DDL queue: %s' % item['series']) if item == 'exit': logger.info('Cleaning up workers for shutdown') break From 05f3eb4ad89ce81cd9fb662eb74561bae968c4dc Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 22 Jan 2019 10:16:13 -0500 Subject: [PATCH 12/54] FIX:(#2156) On Windows, git would return error when viewing configuration page and display None for branch history, FIX: When manual post-processing and SABnzbd was enabled, in some cases would mistakingly try to post-process using some sabnzbd options, IMP: Added the [__ISSUEID__] option for post-processing files that will not pp/scan/etc. Add the issueid to the end of the filename in the format of [__123456__] and Mylar will post-process the issue directly to that issueid and ignore any verification checking (it will also remove the [__123456__] from the filename after post-processing), IMP: Added sabnzbd version to Test SABnzbd output, and if SAB is too low of a version, will remove Completed Download Handling as an option and provide warning indicating as such --- data/interfaces/default/config.html | 48 +++++++- mylar/PostProcessor.py | 183 +++++++++++++++++++++------- mylar/config.py | 8 +- mylar/filechecker.py | 27 +++- mylar/webserve.py | 18 ++- 5 files changed, 227 insertions(+), 57 deletions(-) diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 6db9f27f..bd6ef60b 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -397,20 +397,36 @@
-
+
- ComicRN script cannot be used with this enabled + ComicRN script cannot be used with this enabled & required SAB version > 0.8.0 +
+
+
+ -

+
+ <% + if mylar.CONFIG.SAB_VERSION is not None: + sabv = 'last tested version: %s' % mylar.CONFIG.SAB_VERSION + else: + sabv = '' + %> + ${sabv} +
@@ -1949,7 +1965,9 @@ function numberWithCommas(x) { return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); }; - + function numberWithDecimals(x) { + return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, "."); + }; $("#test_32p").click(function(){ var imagechk = document.getElementById("test32p_statusicon"); $.get('test_32p', @@ -1990,8 +2008,26 @@ alert(data.error); return; } - $('#sabstatus').val(data); - $('#ajaxMsg').html("
"+data+"
"); + var obj = JSON.parse(data); + var versionsab = obj['version']; + vsab = numberWithDecimals(versionsab); + alert(vsab); + $('#sabstatus').val(obj['status']); + $('#sabversion span').text('SABnzbd version: '+versionsab); + if ( vsab < "0.8.0" ){ + scdh = document.getElementById("sab_cdh"); + scdh.style.display = "none"; + nocdh = document.getElementById("sab_nocdh"); + nocdh.style.display = "unset"; + scdh_line = document.getElementById("sab_client_post_processing"); + scdh_line.value = 0; + } else { + scdh = document.getElementById("sab_cdh"); + scdh.style.display = "unset"; + nocdh = document.getElementById("sab_nocdh"); + nocdh.style.display = "none"; + } + $('#ajaxMsg').html("
"+obj['status']+"
"); if ( data.indexOf("Successfully") > -1){ imagechk.src = ""; imagechk.src = "interfaces/default/images/success.png"; diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 645316e9..0e727b5e 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -350,15 +350,18 @@ class PostProcessor(object): logger.fdebug(module + ' nzb name: ' + self.nzb_name) logger.fdebug(module + ' nzb folder: ' + self.nzb_folder) if self.ddl is False: - if mylar.USE_SABNZBD==0: - logger.fdebug(module + ' Not using SABnzbd') - elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run': - logger.fdebug(module + ' Not using SABnzbd : Manual Run') - else: - # if the SAB Directory option is enabled, let's use that folder name and append the jobname. - if all([mylar.CONFIG.SAB_TO_MYLAR, mylar.CONFIG.SAB_DIRECTORY is not None, mylar.CONFIG.SAB_DIRECTORY != 'None']): - self.nzb_folder = os.path.join(mylar.CONFIG.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) - logger.fdebug(module + ' SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder) + if mylar.USE_SABNZBD==1: + if self.nzb_name != 'Manual Run': + logger.fdebug(module + ' Using SABnzbd') + logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) + + if self.nzb_name == 'Manual Run': + logger.fdebug(module + ' Manual Run Post-Processing enabled.') + else: + # if the SAB Directory option is enabled, let's use that folder name and append the jobname. + if all([mylar.CONFIG.SAB_TO_MYLAR, mylar.CONFIG.SAB_DIRECTORY is not None, mylar.CONFIG.SAB_DIRECTORY != 'None']): + self.nzb_folder = os.path.join(mylar.CONFIG.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) + logger.fdebug(module + ' SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder) if mylar.USE_NZBGET==1: if self.nzb_name != 'Manual Run': @@ -471,8 +474,50 @@ class PostProcessor(object): if any([self.issueid is not None, self.comicid is not None]): comicseries = myDB.select('SELECT * FROM comics WHERE ComicID=?', [self.comicid]) else: - tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(seq=','.join('?' * len(loopchk))) - comicseries = myDB.select(tmpsql, tuple(loopchk)) + if fl['issueid'] is not None: + logger.info('issueid detected in filename: %s' % fl['issueid']) + csi = myDB.selectone('SELECT i.ComicID, i.IssueID, i.Issue_Number, c.ComicName FROM comics as c JOIN issues as i ON c.ComicID = i.ComicID WHERE i.IssueID=?', [fl['issueid']]).fetchone() + if csi is None: + csi = myDB.selectone('SELECT i.ComicID as comicid, i.IssueID, i.Issue_Number, a.ReleaseComicName, c.ComicName FROM comics as c JOIN annuals as a ON c.ComicID = a.ComicID WHERE a.IssueID=?', [fl['issueid']]).fetchone() + if csi is not None: + annchk = 'yes' + else: + continue + else: + annchk = 'no' + if fl['sub']: + logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, fl['sub'], fl['comiclocation'])) + clocation = os.path.join(fl['comiclocation'], fl['sub'], helpers.conversion(fl['comicfilename'])) + else: + logger.fdebug('%s[CLOCATION] %s' % (module, fl['comiclocation'])) + clocation = os.path.join(fl['comiclocation'],helpers.conversion(fl['comicfilename'])) + annualtype = None + if annchk == 'yes': + if 'Annual' in csi['ReleaseComicName']: + annualtype = 'Annual' + elif 'Special' in csi['ReleaseComicName']: + annualtype = 'Special' + else: + if 'Annual' in csi['ComicName']: + annualtype = 'Annual' + elif 'Special' in csi['ComicName']: + annualtype = 'Special' + manual_list.append({"ComicLocation": clocation, + "ComicID": csi['ComicID'], + "IssueID": csi['IssueID'], + "IssueNumber": csi['Issue_Number'], + "AnnualType": annualtype, + "ComicName": csi['ComicName'], + "Series": fl['series_name'], + "AltSeries": fl['alt_series'], + "One-Off": False, + "ForcedMatch": True}) + logger.info('manual_list: %s' % manual_list) + break + + else: + tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(seq=','.join('?' * len(loopchk))) + comicseries = myDB.select(tmpsql, tuple(loopchk)) if not comicseries or orig_seriesname != mod_seriesname: if all(['special' in orig_seriesname.lower(), mylar.CONFIG.ANNUALS_ON, orig_seriesname != mod_seriesname]): @@ -787,7 +832,8 @@ class PostProcessor(object): "ComicName": cs['ComicName'], "Series": watchmatch['series_name'], "AltSeries": watchmatch['alt_series'], - "One-Off": False}) + "One-Off": False, + "ForcedMatch": False}) break else: logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing') @@ -894,7 +940,7 @@ class PostProcessor(object): "ComicVersion": 'v' + str(av['SeriesYear']), "Publisher": av['IssuePublisher'], "Total": av['TotalIssues'], # this will return the total issues in the arc (not needed for this) - "ComicID": av['ComicID'], + "Type": av['Type'], "IsArc": True} }) @@ -923,28 +969,50 @@ class PostProcessor(object): if arcmatch['process_status'] == 'fail': nm+=1 else: - temploc= arcmatch['justthedigits'].replace('_', ' ') - temploc = re.sub('[\#\']', '', temploc) - if helpers.issuedigits(temploc) != helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): + try: + if v[i]['ArcValues']['Type'] == 'TPB' and v[i]['ArcValues']['Total'] > 1: + if watchmatch['series_volume'] is not None: + just_the_digits = re.sub('[^0-9]', '', arcmatch['series_volume']).strip() + else: + just_the_digits = re.sub('[^0-9]', '', arcmatch['justthedigits']).strip() + else: + just_the_digits = arcmatch['justthedigits'] + except Exception as e: + logger.warn('[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s] [WATCHMATCH: %s]' % (e, v[i]['ArcValues'], v[i]['WatchValues'])) + nm+=1 + continue + + if just_the_digits is not None: + temploc= just_the_digits.replace('_', ' ') + temploc = re.sub('[\#\']', '', temploc) + logger.fdebug('temploc: %s' % temploc) + else: + temploc = None + + if temploc is not None and helpers.issuedigits(temploc) != helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): #logger.fdebug('issues dont match. Skipping') i+=1 continue - if 'annual' in temploc.lower(): - biannchk = re.sub('-', '', temploc.lower()).strip() - if 'biannual' in biannchk: - logger.fdebug(module + ' Bi-Annual detected.') - fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) - else: - fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) - logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(v[i]['WatchValues']['ComicID'])) - annchk = "yes" - issuechk = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]).fetchone() else: - fcdigit = helpers.issuedigits(temploc) - issuechk = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]).fetchone() + if 'annual' in temploc.lower(): + biannchk = re.sub('-', '', temploc.lower()).strip() + if 'biannual' in biannchk: + logger.fdebug(module + ' Bi-Annual detected.') + fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) + else: + fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) + logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(v[i]['WatchValues']['ComicID'])) + annchk = "yes" + issuechk = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]).fetchone() + else: + fcdigit = helpers.issuedigits(temploc) + issuechk = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]).fetchone() if issuechk is None: - logger.fdebug(module + ' No corresponding issue # found for ' + str(v[i]['WatchValues']['ComicID'])) + try: + logger.fdebug(module + ' No corresponding issue # found for ' + str(v[i]['WatchValues']['ComicID'])) + except: + continue else: datematch = "True" if len(arcmatch) >= 1 and arcmatch['issue_year'] is not None: @@ -1037,7 +1105,7 @@ class PostProcessor(object): if all(['0-Day Week' in self.nzb_name, mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is True]): pass else: - oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;") #(s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;") + oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.format, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;") #(s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;") #oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID and s.Hash is not NULL inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;") #(s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;") if not oneofflist: pass #continue @@ -1057,6 +1125,7 @@ class PostProcessor(object): "ComicVersion": None, "Publisher": ofl['PUBLISHER'], "Total": None, + "Type": ofl['format'], "ComicID": ofl['ComicID'], "IsArc": False}}) @@ -1074,23 +1143,41 @@ class PostProcessor(object): nm+=1 continue else: - temploc= watchmatch['justthedigits'].replace('_', ' ') - temploc = re.sub('[\#\']', '', temploc) + try: + if ofv['WatchValues']['Type'] is not None and ofv['WatchValues']['Total'] > 1: + if watchmatch['series_volume'] is not None: + just_the_digits = re.sub('[^0-9]', '', watchmatch['series_volume']).strip() + else: + just_the_digits = re.sub('[^0-9]', '', watchmatch['justthedigits']).strip() + else: + just_the_digits = watchmatch['justthedigits'] + except Exception as e: + logger.warn('[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s] [WATCHMATCH: %s]' % (e, cs, watchmatch)) + nm+=1 + continue + + if just_the_digits is not None: + temploc= just_the_digits.replace('_', ' ') + temploc = re.sub('[\#\']', '', temploc) + logger.fdebug('temploc: %s' % temploc) + else: + temploc = None logger.info('watchmatch: %s' % watchmatch) - if 'annual' in temploc.lower(): - biannchk = re.sub('-', '', temploc.lower()).strip() - if 'biannual' in biannchk: - logger.fdebug(module + ' Bi-Annual detected.') - fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) + if temploc is not None: + if 'annual' in temploc.lower(): + biannchk = re.sub('-', '', temploc.lower()).strip() + if 'biannual' in biannchk: + logger.fdebug(module + ' Bi-Annual detected.') + fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) + else: + fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) + logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(ofv['ComicID'])) + annchk = "yes" else: - fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) - logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(ofv['ComicID'])) - annchk = "yes" - else: - fcdigit = helpers.issuedigits(temploc) + fcdigit = helpers.issuedigits(temploc) - if fcdigit == helpers.issuedigits(ofv['Issue_Number']): + if temploc is not None and fcdigit == helpers.issuedigits(ofv['Issue_Number']) or all([temploc is None, helpers.issuedigits(ofv['Issue_Number']) == '1']): if watchmatch['sub']: clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename'])) else: @@ -2260,6 +2347,16 @@ class PostProcessor(object): nfilename = nfilename.replace(' ', mylar.CONFIG.REPLACE_CHAR) nfilename = re.sub('[\,\:\?\"\']', '', nfilename) nfilename = re.sub('[\/\*]', '-', nfilename) + if ml['ForcedMatch'] is True: + xyb = nfilename.find('[__') + if xyb != -1: + yyb = nfilename.find('__]', xyb) + if yyb != -1: + rem_issueid = nfilename[xyb+3:yyb] + logger.fdebug('issueid: %s' % rem_issueid) + nfilename = '%s %s'.strip() % (nfilename[:xyb], nfilename[yyb+3:]) + logger.fdebug('issueid information [%s] removed successsfully: %s' % (rem_issueid, nfilename)) + self._log("New Filename: " + nfilename) logger.fdebug(module + ' New Filename: ' + nfilename) diff --git a/mylar/config.py b/mylar/config.py index 10b5eb8d..8f9d009c 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -208,6 +208,7 @@ _CONFIG_DEFINITIONS = OrderedDict({ 'SAB_PRIORITY': (str, 'SABnzbd', "Default"), 'SAB_TO_MYLAR': (bool, 'SABnzbd', False), 'SAB_DIRECTORY': (str, 'SABnzbd', None), + 'SAB_VERSION': (str, 'SABnzbd', None), 'SAB_CLIENT_POST_PROCESSING': (bool, 'SABnzbd', False), #0/False: ComicRN.py, #1/True: Completed Download Handling 'NZBGET_HOST': (str, 'NZBGet', None), @@ -795,7 +796,6 @@ class Config(object): mylar.RSS_STATUS = 'Waiting' elif self.ENABLE_RSS is False and mylar.RSS_STATUS == 'Waiting': mylar.RSS_STATUS = 'Paused' - logger.info('self.enable_rss is %s [%s]' % (self.ENABLE_RSS, mylar.RSS_STATUS)) if not helpers.is_number(self.CHMOD_DIR): logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777") @@ -875,6 +875,12 @@ class Config(object): elif self.SAB_PRIORITY == "4": self.SAB_PRIORITY = "Paused" else: self.SAB_PRIORITY = "Default" + if self.SAB_VERSION is not None: + config.set('SABnzbd', 'sab_version', self.SAB_VERSION) + if int(re.sub("[^0-9]", '', self.SAB_VERSION).strip()) < int(re.sub("[^0-9]", '', '0.8.0').strip()) and self.SAB_CLIENT_POST_PROCESSING is True: + logger.warn('Your SABnzbd client is less than 0.8.0, and does not support Completed Download Handling which is enabled. Disabling CDH.') + self.SAB_CLIENT_POST_PROCESSING = False + mylar.USE_WATCHDIR = False mylar.USE_UTORRENT = False mylar.USE_RTORRENT = False diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 88f00962..99c477d9 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -50,7 +50,8 @@ class FileChecker(object): self.watchcomic = re.sub('\?', '', watchcomic).strip() #strip the ? sepearte since it affects the regex. self.watchcomic = re.sub(u'\u2014', ' - ', watchcomic).strip() #replace the \u2014 with a normal - because this world is f'd up enough to have something like that. self.watchcomic = re.sub(u'\u2013', ' - ', watchcomic).strip() #replace the \u2013 with a normal - because again, people are dumb. - self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore') + if type(self.watchcomic) != str: + self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore') else: self.watchcomic = None @@ -107,7 +108,6 @@ class FileChecker(object): self.AS_Alt = AS_Alternates['AS_Alt'] self.AS_Tuple = AS_Alternates['AS_Tuple'] - def listFiles(self): comiclist = [] watchmatch = {} @@ -122,6 +122,7 @@ class FileChecker(object): 'comiclocation': runresults['comiclocation'], 'series_name': runresults['series_name'], 'series_name_decoded': runresults['series_name_decoded'], + 'issueid': runresults['issueid'], 'dynamic_name': runresults['dynamic_name'], 'series_volume': runresults['series_volume'], 'alt_series': runresults['alt_series'], @@ -159,6 +160,7 @@ class FileChecker(object): 'comiclocation': runresults['comiclocation'], 'series_name': runresults['series_name'], 'series_name_decoded': runresults['series_name_decoded'], + 'issueid': runresults['issueid'], 'alt_series': runresults['alt_series'], 'alt_issue': runresults['alt_issue'], 'dynamic_name': runresults['dynamic_name'], @@ -179,6 +181,7 @@ class FileChecker(object): 'IssueYear': runresults['issue_year'], 'JusttheDigits': runresults['justthedigits'], 'AnnualComicID': runresults['annual_comicid'], + 'issueid': runresults['issueid'], 'scangroup': runresults['scangroup'] }) comiccnt +=1 @@ -194,6 +197,7 @@ class FileChecker(object): 'alt_issue': runresults['alt_issue'], 'issue_year': runresults['issue_year'], 'issue_number': runresults['issue_number'], + 'issueid': runresults['issueid'], 'scangroup': runresults['scangroup'] }) @@ -283,6 +287,16 @@ class FileChecker(object): modfilename = modfilename.replace('()','').strip() + issueid = None + x = modfilename.find('[__') + if x != -1: + y = modfilename.find('__]', x) + if y != -1: + issueid = modfilename[x+3:y] + logger.fdebug('issueid: %s' % issueid) + modfilename = '%s %s'.strip() % (modfilename[:x], modfilename[y+3:]) + logger.fdebug('issueid %s removed successsfully: %s' % (issueid, modfilename)) + #here we take a snapshot of the current modfilename, the intent is that we will remove characters that match #as we discover them - namely volume, issue #, years, etc #the remaining strings should be the series title and/or issue title if present (has to be detected properly) @@ -1059,6 +1073,7 @@ class FileChecker(object): 'comiclocation': self.dir, 'series_name': series_name, 'series_name_decoded': series_name_decoded, + 'issueid': issueid, 'alt_series': alt_series, 'alt_issue': alt_issue, 'dynamic_name': dreplace, @@ -1078,6 +1093,7 @@ class FileChecker(object): 'comiclocation': self.dir, 'series_name': series_name, 'series_name_decoded': series_name_decoded, + 'issueid': issueid, 'alt_series': alt_series, 'alt_issue': alt_issue, 'dynamic_name': self.dynamic_replace(series_name)['mod_seriesname'], @@ -1094,6 +1110,7 @@ class FileChecker(object): 'comiclocation': self.dir, 'series_name': series_name, 'series_name_decoded': series_name_decoded, + 'issueid': issueid, 'alt_series': alt_series, 'alt_issue': alt_issue, 'series_volume': issue_volume, @@ -1259,6 +1276,7 @@ class FileChecker(object): 'alt_series': series_info['alt_series'], 'alt_issue': series_info['alt_issue'], 'issue_year': series_info['issue_year'], + 'issueid': series_info['issueid'], 'justthedigits': justthedigits, 'annual_comicid': annual_comicid, 'scangroup': series_info['scangroup']} @@ -1270,11 +1288,12 @@ class FileChecker(object): 'sub': series_info['sub'], 'comiclocation': series_info['comiclocation'], 'series_name': series_info['series_name'], - 'alt_series': series_info['alt_series'], - 'alt_issue': series_info['alt_issue'], + 'alt_series': series_info['alt_series'], + 'alt_issue': series_info['alt_issue'], 'issue_number': series_info['issue_number'], 'series_volume': series_info['series_volume'], 'issue_year': series_info['issue_year'], + 'issueid': series_info['issueid'], 'scangroup': series_info['scangroup']} diff --git a/mylar/webserve.py b/mylar/webserve.py index 0952cf4e..d7026950 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -4545,7 +4545,7 @@ class WebInterface(object): #---- # to be implemented in the future. if mylar.INSTALL_TYPE == 'git': - branch_history, err = mylar.versioncheck.runGit("log --pretty=format:'%h - %cr - %an - %s' -n 5") + branch_history, err = mylar.versioncheck.runGit('log --pretty=format:"%h - %cr - %an - %s" -n 5') #here we pass the branch_history to the pretty_git module to break it down if branch_history: br_hist = self.pretty_git(branch_history) @@ -4649,6 +4649,7 @@ class WebInterface(object): "sab_priority": mylar.CONFIG.SAB_PRIORITY, "sab_directory": mylar.CONFIG.SAB_DIRECTORY, "sab_to_mylar": helpers.checked(mylar.CONFIG.SAB_TO_MYLAR), + "sab_version": mylar.CONFIG.SAB_VERSION, "sab_client_post_processing": helpers.checked(mylar.CONFIG.SAB_CLIENT_POST_PROCESSING), "nzbget_host": mylar.CONFIG.NZBGET_HOST, "nzbget_port": mylar.CONFIG.NZBGET_PORT, @@ -5168,7 +5169,12 @@ class WebInterface(object): else: verify = False + version = 'Unknown' try: + v = requests.get(querysab, params={'mode': 'version'}, verify=verify) + if str(v.status_code) == '200': + logger.fdebug('sabnzbd version: %s' % v.content) + version = v.text r = requests.get(querysab, params=payload, verify=verify) except Exception, e: logger.warn('Error fetching data from %s: %s' % (querysab, e)) @@ -5183,6 +5189,10 @@ class WebInterface(object): verify = False try: + v = requests.get(querysab, params={'mode': 'version'}, verify=verify) + if str(v.status_code) == '200': + logger.fdebug('sabnzbd version: %s' % v.text) + version = v.text r = requests.get(querysab, params=payload, verify=verify) except Exception, e: logger.warn('Error fetching data from %s: %s' % (sabhost, e)) @@ -5191,7 +5201,7 @@ class WebInterface(object): return 'Unable to retrieve data from SABnzbd' - logger.info('status code: ' + str(r.status_code)) + logger.fdebug('status code: ' + str(r.status_code)) if str(r.status_code) != '200': logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']') @@ -5215,7 +5225,9 @@ class WebInterface(object): mylar.CONFIG.SAB_APIKEY = q_apikey logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.') logger.info('Connection to SABnzbd tested sucessfully') - return "Successfully verified APIkey" + mylar.CONFIG.SAB_VERSION = version + return json.dumps({"status": "Successfully verified APIkey.", "version": str(version)}) + SABtest.exposed = True def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None): From f53ad0fe71d31cfad3bc3837cbc728028502df86 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 22 Jan 2019 22:08:40 -0500 Subject: [PATCH 13/54] FIX:(#2173) Post-Processing via ComicRN/CDH would fail due to invalid reference --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 0e727b5e..a63ddeea 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -2347,7 +2347,7 @@ class PostProcessor(object): nfilename = nfilename.replace(' ', mylar.CONFIG.REPLACE_CHAR) nfilename = re.sub('[\,\:\?\"\']', '', nfilename) nfilename = re.sub('[\/\*]', '-', nfilename) - if ml['ForcedMatch'] is True: + if ml is not None and ml['ForcedMatch'] is True: xyb = nfilename.find('[__') if xyb != -1: yyb = nfilename.find('__]', xyb) From 0cfe3e22e0c963335e1c9d9dc63df86d76fbb151 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 23 Jan 2019 13:12:54 -0500 Subject: [PATCH 14/54] FIX: When adding TPB/GN series that collects other TPB series, would incorrectly attempt to find an issue number resulting in an error --- mylar/cv.py | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/mylar/cv.py b/mylar/cv.py index af588563..788fc9d5 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -374,8 +374,10 @@ def GetComicInfo(comicid, dom, safechk=None): #if it's point form bullets, ignore it cause it's not the current volume stuff. test_it = desc_soup.find('ul') if test_it: - for x in test_it.findAll('a'): - micdrop.append(x['data-ref-id']) + for x in test_it.findAll('li'): + if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]): + mic_check = x.find('a') + micdrop.append(mic_check['data-ref-id']) for fc in desclinks: #logger.info('fc: %s' % fc) @@ -394,17 +396,24 @@ def GetComicInfo(comicid, dom, safechk=None): fc_cid = fc_id fc_isid = None issuerun = fc.next_sibling - lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ') - if len(lines) > 0: - for x in sorted(lines, reverse=True): - srchline = issuerun.rfind(x) - if srchline != -1: - try: - if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ': - issuerun = issuerun[:srchline+len(x)] - break - except: - continue + if issuerun is not None: + lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ') + if len(lines) > 0: + for x in sorted(lines, reverse=True): + srchline = issuerun.rfind(x) + if srchline != -1: + try: + if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ': + issuerun = issuerun[:srchline+len(x)] + break + except Exception as e: + logger.warn('[ERROR] %s' % e) + continue + else: + iss_start = fc_name.find('#') + issuerun = fc_name[iss_start:].strip() + fc_name = fc_name[:iss_start].strip() + if issuerun.endswith('.') or issuerun.endswith(','): #logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1])) issuerun = issuerun[:-1] @@ -412,7 +421,8 @@ def GetComicInfo(comicid, dom, safechk=None): issuerun = issuerun[:-4].strip() elif issuerun.endswith(' and'): issuerun = issuerun[:-3].strip() - + else: + continue # except: # pass issue_list.append({'series': fc_name, From 02d805d4bd2ca94114479032f6e9d4e16ae35703 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 23 Jan 2019 13:15:28 -0500 Subject: [PATCH 15/54] FIX: Fix for DDL attempting to parse issue year from results that did not contain an issue year, FIX: (#2172) Attempt to fix config page not displaying branch history due to non-English locale which resulted in unicode error --- mylar/search.py | 5 +++-- mylar/webserve.py | 21 ++++++++++++++------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/mylar/search.py b/mylar/search.py index c56b62b0..cd4bd735 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -896,7 +896,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa comsize_b = entry['size'] elif entry['site'] == 'DDL': comsize_b = helpers.human2bytes(entry['size']) - except: + except Exception as e: + logger.warn('[ERROR] %s' % e) tmpsz = entry.enclosures[0] comsize_b = tmpsz['length'] @@ -1178,7 +1179,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa elif ComVersChk == 0: logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check") yearmatch = "true" - elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no": + elif any([UseFuzzy == "0", UseFuzzy == "2", UseFuzzy is None, IssDateFix != "no"]) and parsed_comic['issue_year'] is not None: if parsed_comic['issue_year'][:-2] == '19' or parsed_comic['issue_year'][:-2] == '20': logger.fdebug('year detected: %s' % parsed_comic['issue_year']) result_comyear = parsed_comic['issue_year'] diff --git a/mylar/webserve.py b/mylar/webserve.py index d7026950..032eee79 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -4545,13 +4545,20 @@ class WebInterface(object): #---- # to be implemented in the future. if mylar.INSTALL_TYPE == 'git': - branch_history, err = mylar.versioncheck.runGit('log --pretty=format:"%h - %cr - %an - %s" -n 5') - #here we pass the branch_history to the pretty_git module to break it down - if branch_history: - br_hist = self.pretty_git(branch_history) - #br_hist = branch_history.replace("\n", "
\n") - else: - br_hist = err + try: + branch_history, err = mylar.versioncheck.runGit('log --encoding=UTF-8 --pretty=format:"%h - %cr - %an - %s" -n 5') + #here we pass the branch_history to the pretty_git module to break it down + if branch_history: + br_hist = self.pretty_git(branch_history) + try: + br_hist = u"" + br_hist.decode('utf-8') + except: + br_hist = br_hist + else: + br_hist = err + except Exception as e: + logger.fdebug('[ERROR] Unable to retrieve git revision history for some reason: %s' % e) + br_hist = 'This would be a nice place to see revision history...' else: br_hist = 'This would be a nice place to see revision history...' #---- From bb9f64172772d7ef3c5eb0fc0fc5a41febfb5910 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 23 Jan 2019 13:42:39 -0500 Subject: [PATCH 16/54] FIX: Fix for DDL trying to parse a pack result and not recognizing the 2 size formats --- mylar/getcomics.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mylar/getcomics.py b/mylar/getcomics.py index 052f2fb7..ac5bbbe1 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -102,6 +102,9 @@ class GC(object): size = re.sub('MB', 'M', size).strip() elif 'GB' in size: size = re.sub('GB', 'G', size).strip() + if '//' in size: + nwsize = size.find('//') + size = re.sub('\[', '', size[:nwsize]).strip() i+=1 dateline = f.find('time') datefull = dateline['datetime'] From da2de2feab451b5a13ad6c38275bb4bcfbf98db3 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 23 Jan 2019 14:17:53 -0500 Subject: [PATCH 17/54] FIX: Fix for DDL Size incorrectly being parsed --- mylar/getcomics.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mylar/getcomics.py b/mylar/getcomics.py index ac5bbbe1..c1e77dfc 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -105,6 +105,8 @@ class GC(object): if '//' in size: nwsize = size.find('//') size = re.sub('\[', '', size[:nwsize]).strip() + if any([size == '-M', size == '-G']): + size = '0 M' i+=1 dateline = f.find('time') datefull = dateline['datetime'] From 42f7e065da0addfe96fb65fe0931d4c37763ed42 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 23 Jan 2019 15:05:02 -0500 Subject: [PATCH 18/54] FIX: fix for DDL not parsing pack size properly which resulted in search error --- mylar/getcomics.py | 17 +++++++++-------- mylar/helpers.py | 15 +++++++++------ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/mylar/getcomics.py b/mylar/getcomics.py index c1e77dfc..7ac6db00 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -98,14 +98,15 @@ class GC(object): year = re.sub('\|', '', year).strip() else: size = option_find.findNext(text=True) - if 'MB' in size: - size = re.sub('MB', 'M', size).strip() - elif 'GB' in size: - size = re.sub('GB', 'G', size).strip() - if '//' in size: - nwsize = size.find('//') - size = re.sub('\[', '', size[:nwsize]).strip() - if any([size == '-M', size == '-G']): + if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]): + if 'MB' in size: + size = re.sub('MB', 'M', size).strip() + elif 'GB' in size: + size = re.sub('GB', 'G', size).strip() + if '//' in size: + nwsize = size.find('//') + size = re.sub('\[', '', size[:nwsize]).strip() + else: size = '0 M' i+=1 dateline = f.find('time') diff --git a/mylar/helpers.py b/mylar/helpers.py index bd35f4f4..75262506 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -183,12 +183,15 @@ def human2bytes(s): num = re.sub(',', '', s[:-1]) #assert num.isdigit() and letter in symbols #use below assert statement to handle sizes with decimal places - assert float(num) and letter in symbols - num = float(num) - prefix = {symbols[0]: 1} - for i, s in enumerate(symbols[1:]): - prefix[s] = 1 << (i +1) *10 - return int(num * prefix[letter]) + if num != '0': + assert float(num) and letter in symbols + num = float(num) + prefix = {symbols[0]: 1} + for i, s in enumerate(symbols[1:]): + prefix[s] = 1 << (i +1) *10 + return int(num * prefix[letter]) + else: + return 0 def replace_all(text, dic): for i, j in dic.iteritems(): From b5e87992f821ace0ac2543c18702303c1efb83d3 Mon Sep 17 00:00:00 2001 From: evilhero Date: Sun, 27 Jan 2019 11:46:59 -0500 Subject: [PATCH 19/54] FIX:(#2088) Fix for search erroring out when searching for issue numbers with decimal places, and/or alpha-numerics within, FIX: Fixed a DDL post-processing problem where it would not pass the full path back to the post-processing call, FIX: Added some more exception logging to aid in future DDL search-related problems --- mylar/cmtagmylar.py | 4 ++-- mylar/getcomics.py | 6 +++--- mylar/search.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mylar/cmtagmylar.py b/mylar/cmtagmylar.py index e0bc86aa..29080e42 100644 --- a/mylar/cmtagmylar.py +++ b/mylar/cmtagmylar.py @@ -58,8 +58,8 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen else: shutil.copy(filepath, new_filepath) filepath = new_filepath - except: - logger.warn(module + ' Unexpected Error: %s' % sys.exc_info()[0]) + except Exception as e: + logger.warn('%s Unexpected Error: %s [%s]' % (module, sys.exc_info()[0], e)) logger.warn(module + ' Unable to create temporary directory to perform meta-tagging. Processing without metatagging.') tidyup(og_filepath, new_filepath, new_folder, manualmeta) return "fail" diff --git a/mylar/getcomics.py b/mylar/getcomics.py index 7ac6db00..aaad9302 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -139,7 +139,7 @@ class GC(object): series = option_find elif 'Year' in option_find: year = option_find.findNext(text=True) - year = re.sub('|', '', year).strip() + year = re.sub('\|', '', year).strip() else: if 'Size' in prev_option: size = option_find #.findNext(text=True) @@ -238,7 +238,7 @@ class GC(object): f.write(chunk) f.flush() - except exception as e: + except Exception as e: logger.error('[ERROR] %s' % e) mylar.DDL_LOCK = False return ({"success": False, @@ -250,7 +250,7 @@ class GC(object): if os.path.isfile(path): return ({"success": True, "filename": filename, - "path": path}) + "path": mylar.CONFIG.DDL_LOCATION}) def issue_list(self, pack): #packlist = [x.strip() for x in pack.split(',)] diff --git a/mylar/search.py b/mylar/search.py index cd4bd735..d64ff50f 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -897,7 +897,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa elif entry['site'] == 'DDL': comsize_b = helpers.human2bytes(entry['size']) except Exception as e: - logger.warn('[ERROR] %s' % e) + logger.warn('[ERROR] %s [%s]' % (e, entry)) tmpsz = entry.enclosures[0] comsize_b = tmpsz['length'] @@ -1369,7 +1369,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if parsed_comic['issue_number'] is None: pc_in = None else: - pc_in = int(parsed_comic['issue_number']) + pc_in = helpers.issuedigits(parsed_comic['issue_number']) #issue comparison now as well if int(intIss) == int(comintIss) or all([cmloopit == 4, findcomiciss is None, pc_in is None]) or all([cmloopit == 4, findcomiciss is None, pc_in == 1]): From c3370af4aef8f6f750a09f2b6f20e2a0a71bdd0c Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 29 Jan 2019 11:19:58 -0500 Subject: [PATCH 20/54] Update issue templates for bug reporting --- .github/ISSUE_TEMPLATE/bug_report.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..3f6688c5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,23 @@ +--- +name: Bug report +about: Help us to help you. Drop your problem with the requested info - or we just + drop the issue. +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**Screenshots/Logs** +Include a DEBUG log using the log files and do not paste from the GUI log. +Do not just paste a traceback/error. + +**Environment (please complete the following information):** + - OS: [e.g. Windows10] + - Branch / commit: [e.g. Dev/38aksi1] + +**Additional information** +Add any other information about the problem here. From d9aacb147d86f2b5342b392cd113a793a5cbbfde Mon Sep 17 00:00:00 2001 From: Mark Beznos Date: Fri, 25 Jan 2019 00:14:14 -0500 Subject: [PATCH 21/54] Update login.css switched to using https to avoid console warning when mylar served over http --- data/css/login.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/css/login.css b/data/css/login.css index 79d2842c..9f2c3407 100755 --- a/data/css/login.css +++ b/data/css/login.css @@ -2,7 +2,7 @@ font-family: 'Lato'; font-style: normal; font-weight: 400; - src: local('Lato Regular'), local('Lato-Regular'), url(http://themes.googleusercontent.com/static/fonts/lato/v7/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff'); + src: local('Lato Regular'), local('Lato-Regular'), url(https://themes.googleusercontent.com/static/fonts/lato/v7/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff'); } body { From ccbe865f6845d451e1bb56743ac23eb2f435d370 Mon Sep 17 00:00:00 2001 From: Toby Nieboer Date: Thu, 22 Nov 2018 12:14:24 +1100 Subject: [PATCH 22/54] Misc typos in init-scripts/systemd/read.me --- init-scripts/systemd/read.me | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/init-scripts/systemd/read.me b/init-scripts/systemd/read.me index 3d245788..f0447422 100644 --- a/init-scripts/systemd/read.me +++ b/init-scripts/systemd/read.me @@ -1,11 +1,11 @@ -Instructions on setting up mylar as a systemd serivce that will run on startup/via systemctl commands... +Instructions on setting up mylar as a systemd service that will run on startup/via systemctl commands... 1 - copy the mylar.service to /lib/systemd/system/mylar.service 2 - create a symbolic link to it: ln -s /lib/systemd/system/mylar.service /etc/systemd/system/mylar.service 3 - copy mylar.default to /etc/default/mylar (make sure it's renamed from mylar.default to just mylar) 4 - copy mylar.initd to /etc/init.d/mylar (rename it to just mylar) and then 'sudo chmod +x /etc/init.d/mylar' 5 - edit the /etc/default/mylar file to your defaults (make sure to set MYLAR_USER & MYLAR_HOME as they're required) -6 - make systemd aware of new services: sudo sytemctl daemon-reload +6 - make systemd aware of new services: sudo systemctl daemon-reload 7 - sudo systemctl enable mylar 8 - sudo systemctl start mylar -9 - to check to see if running/status - sudo sytemctl status mylar +9 - to check to see if running/status - sudo systemctl status mylar From 43ca4825e5067ee8f2bc8ea21c46fbb4ad70d8b9 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 30 Jan 2019 17:08:30 -0500 Subject: [PATCH 23/54] FIX:(#2179) Post-processing item would fail if match would occur on story-arc check, FIX: Fixed some sub-directory problems when doing various types of scans, IMP: Added booktype to filechecker parsing results, FIX: When downloading via DDL, would not adhere to the booktype as a search-result constraint with regards to matching --- mylar/PostProcessor.py | 34 +++++++++++++++++++++------------- mylar/__init__.py | 10 ++++++++-- mylar/filechecker.py | 29 +++++++++++++++++++---------- mylar/search.py | 8 ++++++-- 4 files changed, 54 insertions(+), 27 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index a63ddeea..5adaf203 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -345,35 +345,35 @@ class PostProcessor(object): def Process(self): module = self.module - self._log("nzb name: " + self.nzb_name) - self._log("nzb folder: " + self.nzb_folder) - logger.fdebug(module + ' nzb name: ' + self.nzb_name) - logger.fdebug(module + ' nzb folder: ' + self.nzb_folder) + self._log('nzb name: %s' % self.nzb_name) + self._log('nzb folder: %s' % self.nzb_folder) + logger.fdebug('%s nzb name: %s' % (module, self.nzb_name)) + logger.fdebug('%s nzb folder: %s' % (module, self.nzb_folder)) if self.ddl is False: if mylar.USE_SABNZBD==1: if self.nzb_name != 'Manual Run': - logger.fdebug(module + ' Using SABnzbd') - logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) + logger.fdebug('%s Using SABnzbd' % module) + logger.fdebug('%s NZB name as passed from NZBGet: %s' % (module, self.nzb_name)) if self.nzb_name == 'Manual Run': - logger.fdebug(module + ' Manual Run Post-Processing enabled.') + logger.fdebug('%s Manual Run Post-Processing enabled.' % module) else: # if the SAB Directory option is enabled, let's use that folder name and append the jobname. if all([mylar.CONFIG.SAB_TO_MYLAR, mylar.CONFIG.SAB_DIRECTORY is not None, mylar.CONFIG.SAB_DIRECTORY != 'None']): self.nzb_folder = os.path.join(mylar.CONFIG.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) - logger.fdebug(module + ' SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder) + logger.fdebug('%s SABnzbd Download folder option enabled. Directory set to : %s' % (module, self.nzb_folder)) if mylar.USE_NZBGET==1: if self.nzb_name != 'Manual Run': - logger.fdebug(module + ' Using NZBGET') - logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) + logger.fdebug('%s Using NZBGET' % module) + logger.fdebug('%s NZB name as passed from NZBGet: %s' % (module, self.nzb_name)) # if the NZBGet Directory option is enabled, let's use that folder name and append the jobname. if self.nzb_name == 'Manual Run': - logger.fdebug(module + ' Manual Run Post-Processing enabled.') + logger.fdebug('%s Manual Run Post-Processing enabled.' % module) elif all([mylar.CONFIG.NZBGET_DIRECTORY is not None, mylar.CONFIG.NZBGET_DIRECTORY is not 'None']): - logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name) + logger.fdebug('%s NZB name as passed from NZBGet: %s' % (module, self.nzb_name)) self.nzb_folder = os.path.join(mylar.CONFIG.NZBGET_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING) - logger.fdebug(module + ' NZBGET Download folder option enabled. Directory set to : ' + self.nzb_folder) + logger.fdebug('%s NZBGET Download folder option enabled. Directory set to : %s' % (module, self.nzb_folder)) else: logger.fdebug('%s Now performing post-processing of %s sent from DDL' % (module, self.nzb_name)) @@ -806,6 +806,13 @@ class PostProcessor(object): if watchmatch['sub']: logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, watchmatch['sub'], watchmatch['comiclocation'])) clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename'])) + if not os.path.exists(clocation): + scrubs = re.sub(watchmatch['comiclocation'], '', watchmatch['sub']).strip() + if scrubs[:2] == '//' or scrubs[:2] == '\\': + scrubs = scrubs[1:] + if os.path.exists(scrubs): + logger.fdebug('[MODIFIED CLOCATION] %s' % scrubs) + clocation = scrubs else: logger.fdebug('%s[CLOCATION] %s' % (module, watchmatch['comiclocation'])) if self.issueid is not None and os.path.isfile(watchmatch['comiclocation']): @@ -938,6 +945,7 @@ class PostProcessor(object): "WatchValues": {"SeriesYear": av['SeriesYear'], "LatestDate": av['IssueDate'], "ComicVersion": 'v' + str(av['SeriesYear']), + "ComicID": av['ComicID'], "Publisher": av['IssuePublisher'], "Total": av['TotalIssues'], # this will return the total issues in the arc (not needed for this) "Type": av['Type'], diff --git a/mylar/__init__.py b/mylar/__init__.py index 98c45076..876b9ed9 100644 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -504,7 +504,7 @@ def dbcheck(): c.execute('SELECT ReleaseDate from storyarcs') except sqlite3.OperationalError: try: - c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)') c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist') c.execute('DROP TABLE readinglist') except sqlite3.OperationalError: @@ -527,7 +527,8 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT)') conn.commit c.close @@ -1037,6 +1038,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT') + try: + c.execute('SELECT Type from storyarcs') + except sqlite3.OperationalError: + c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT') + ## -- searchresults Table -- try: c.execute('SELECT SRID from searchresults') diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 99c477d9..798891ea 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -130,7 +130,8 @@ class FileChecker(object): 'issue_year': runresults['issue_year'], 'issue_number': runresults['issue_number'], 'scangroup': runresults['scangroup'], - 'reading_order': runresults['reading_order'] + 'reading_order': runresults['reading_order'], + 'booktype': runresults['booktype'] } else: filelist = self.traverse_directories(self.dir) @@ -168,7 +169,8 @@ class FileChecker(object): 'issue_year': runresults['issue_year'], 'issue_number': runresults['issue_number'], 'scangroup': runresults['scangroup'], - 'reading_order': runresults['reading_order'] + 'reading_order': runresults['reading_order'], + 'booktype': runresults['booktype'] }) else: comiclist.append({ @@ -182,7 +184,8 @@ class FileChecker(object): 'JusttheDigits': runresults['justthedigits'], 'AnnualComicID': runresults['annual_comicid'], 'issueid': runresults['issueid'], - 'scangroup': runresults['scangroup'] + 'scangroup': runresults['scangroup'], + 'booktype': runresults['booktype'] }) comiccnt +=1 else: @@ -198,7 +201,8 @@ class FileChecker(object): 'issue_year': runresults['issue_year'], 'issue_number': runresults['issue_number'], 'issueid': runresults['issueid'], - 'scangroup': runresults['scangroup'] + 'scangroup': runresults['scangroup'], + 'booktype': runresults['booktype'] }) watchmatch['comiccount'] = comiccnt @@ -233,9 +237,9 @@ class FileChecker(object): tmppath = re.sub(path, '', subpath).strip() path_list = os.path.normpath(tmppath) - if '/' == path_list[0] or '\\' == path_list[0]: - #need to remove any leading slashes so the os join can properly join the components - path_list = path_list[1:] + #if '/' == path_list[0] or '\\' == path_list[0]: + # #need to remove any leading slashes so the os join can properly join the components + # path_list = path_list[1:] #path_list = tmppath.split(os.sep)[-1] logger.fdebug('[SUB-PATH] subpath set to : ' + path_list) @@ -1083,6 +1087,7 @@ class FileChecker(object): 'issue_year': issue_year, 'annual_comicid': None, 'scangroup': scangroup, + 'booktype': booktype, 'reading_order': None} if self.justparse: @@ -1101,6 +1106,7 @@ class FileChecker(object): 'issue_year': issue_year, 'issue_number': issue_number, 'scangroup': scangroup, + 'booktype': booktype, 'reading_order': reading_order} series_info = {} @@ -1116,7 +1122,8 @@ class FileChecker(object): 'series_volume': issue_volume, 'issue_year': issue_year, 'issue_number': issue_number, - 'scangroup': scangroup} + 'scangroup': scangroup, + 'booktype': booktype} return self.matchIT(series_info) @@ -1279,7 +1286,8 @@ class FileChecker(object): 'issueid': series_info['issueid'], 'justthedigits': justthedigits, 'annual_comicid': annual_comicid, - 'scangroup': series_info['scangroup']} + 'scangroup': series_info['scangroup'], + 'booktype': series_info['booktype']} else: #logger.fdebug('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']') @@ -1294,7 +1302,8 @@ class FileChecker(object): 'series_volume': series_info['series_volume'], 'issue_year': series_info['issue_year'], 'issueid': series_info['issueid'], - 'scangroup': series_info['scangroup']} + 'scangroup': series_info['scangroup'], + 'booktype': series_info['booktype']} def char_file_position(self, file, findchar, lastpos): diff --git a/mylar/search.py b/mylar/search.py index d64ff50f..1d783fb6 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1113,7 +1113,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa parsed_comic = p_comic.listFiles() logger.fdebug('parsed_info: %s' % parsed_comic) - if parsed_comic['parse_status'] == 'success': + if parsed_comic['parse_status'] == 'success' and (all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']): try: fcomic = filechecker.FileChecker(watchcomic=ComicName) filecomic = fcomic.matchIT(parsed_comic) @@ -1122,8 +1122,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa continue else: logger.fdebug('match_check: %s' % filecomic) + elif booktype != parsed_comic['booktype']: + logger.fdebug('Booktypes do not match. Looking for %s, this is a %s. Ignoring this result.' % (booktype, parsed_comic['booktype'])) + continue else: - logger.fdebug('Unable to parse name properly: %s' % filecomic) + logger.fdebug('Unable to parse name properly: %s. Ignoring this result' % filecomic) + continue #adjust for covers only by removing them entirely... vers4year = "no" From 5016fe1b022374f46f0fb2af93ce9a291fa4e874 Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 31 Jan 2019 09:30:33 -0500 Subject: [PATCH 24/54] FIX:(#2179) Fix for post-processing failing when comparing against story arcs --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 5adaf203..9325c3cd 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -978,7 +978,7 @@ class PostProcessor(object): nm+=1 else: try: - if v[i]['ArcValues']['Type'] == 'TPB' and v[i]['ArcValues']['Total'] > 1: + if v[i]['WatchValues']['Type'] == 'TPB' and v[i]['ArcValues']['Total'] > 1: if watchmatch['series_volume'] is not None: just_the_digits = re.sub('[^0-9]', '', arcmatch['series_volume']).strip() else: From 2220d047555acb8a19ae0684ff61ca45cd10a22c Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 31 Jan 2019 13:40:15 -0500 Subject: [PATCH 25/54] FIX:(#2181) Fix for incorrect sub-path when manually post-processing issues --- mylar/PostProcessor.py | 4 ++-- mylar/filechecker.py | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 9325c3cd..b4072913 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -630,7 +630,7 @@ class PostProcessor(object): if just_the_digits is not None: temploc= just_the_digits.replace('_', ' ') temploc = re.sub('[\#\']', '', temploc) - logger.fdebug('temploc: %s' % temploc) + #logger.fdebug('temploc: %s' % temploc) else: temploc = None datematch = "False" @@ -993,7 +993,7 @@ class PostProcessor(object): if just_the_digits is not None: temploc= just_the_digits.replace('_', ' ') temploc = re.sub('[\#\']', '', temploc) - logger.fdebug('temploc: %s' % temploc) + #logger.fdebug('temploc: %s' % temploc) else: temploc = None diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 798891ea..6c13be35 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -237,10 +237,9 @@ class FileChecker(object): tmppath = re.sub(path, '', subpath).strip() path_list = os.path.normpath(tmppath) - #if '/' == path_list[0] or '\\' == path_list[0]: - # #need to remove any leading slashes so the os join can properly join the components - # path_list = path_list[1:] - #path_list = tmppath.split(os.sep)[-1] + if '/' == path_list[0] or '\\' == path_list[0]: + #need to remove any leading slashes so the os join can properly join the components + path_list = path_list[1:] logger.fdebug('[SUB-PATH] subpath set to : ' + path_list) From 41e5f42471b700a89fdf7bb058cea9e86f35cff3 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 1 Feb 2019 16:25:24 -0500 Subject: [PATCH 26/54] FIX:(#2179) Post-processing item would fail if match would occur on story-arc check, FIX: Fixed some sub-directory problems when doing various types of scans, IMP: Added booktype to filechecker parsing results, FIX: When downloading via DDL, would not adhere to the booktype as a restraint, IMP: Pack support added for DDL (available as a per series option), IMP: Added BookType & Aliases to the arc's section which will impact how issues/series are searched/post-processed/cheked when they're an issue from an arc, IMP: Initial codebase for the a queue manager section, IMP: Write DDL-Queue data to the sql table so that stalled/broken downloads can be resumed/deleted etc eventually, FIX: If a filename didn't have a valid issue number and it is a Print Edition, will now throw a warning indicating other options to try instead of causing a traceback, IMP: Updated snatch notifications so the notification header will just say 'Issue Snatched' with a brief description, FIX: Removed multiple import db lines from the helpers module, IMP: cleanup_cache variable (true/false) added to config section which will initiate a cleanup of items in the cache directory on startup which will remove items that are no longer needed, IMP: Changed some logging string concatenation lines to try and avoid traceback errors due to logging --- mylar/PostProcessor.py | 642 +++++++++++++++++++++-------------------- mylar/__init__.py | 9 +- mylar/config.py | 26 +- mylar/cv.py | 157 +++++++++- mylar/db.py | 3 +- mylar/filechecker.py | 2 +- mylar/getcomics.py | 81 +++++- mylar/helpers.py | 102 ++++--- mylar/search.py | 103 ++++--- mylar/updater.py | 2 + mylar/webserve.py | 52 +++- 11 files changed, 750 insertions(+), 429 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index b4072913..752c8ce0 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -197,12 +197,12 @@ class PostProcessor(object): file_to_move = os.path.split(path_to_move)[1] if dupeinfo['action'] == 'dupe_src' and mylar.CONFIG.FILE_OPTS == 'move': - logger.info('[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') + logger.info('[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [%s] to Duplicate Dump Folder for manual intervention.' % path_to_move) else: if mylar.CONFIG.FILE_OPTS == 'move': - logger.info('[DUPLICATE-CLEANUP][MOVE-MODE] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.') + logger.info('[DUPLICATE-CLEANUP][MOVE-MODE] New File will not be post-processed. Moving duplicate [%s] to Duplicate Dump Folder for manual intervention.' % path_to_move) else: - logger.info('[DUPLICATE-CLEANUP][COPY-MODE] NEW File will not be post-processed. Retaining file in original location [' + path_to_move + ']') + logger.info('[DUPLICATE-CLEANUP][COPY-MODE] NEW File will not be post-processed. Retaining file in original location [%s]' % path_to_move) return True #this gets tricky depending on if it's the new filename or the existing filename, and whether or not 'copy' or 'move' has been selected. @@ -212,10 +212,10 @@ class PostProcessor(object): try: shutil.move(path_to_move, os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move)) except (OSError, IOError): - logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move)) + logger.warn('[DUPLICATE-CLEANUP] Failed to move ... to ... %s' % (path_to_move, os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move))) return False - logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move)) + logger.warn('[DUPLICATE-CLEANUP] Successfully moved ... to ... %s' % (path_to_move, os.path.join(mylar.CONFIG.DUPLICATE_DUMP, file_to_move))) return True def tidyup(self, odir=None, del_nzbdir=False, sub_path=None, cacheonly=False, filename=None): @@ -232,7 +232,7 @@ class PostProcessor(object): #if sub_path exists, then we need to use that in place of self.nzb_folder since the file was in a sub-directory within self.nzb_folder if all([sub_path is not None, sub_path != self.nzb_folder]): #, self.issueid is not None]): if self.issueid is None: - logger.fdebug('Sub-directory detected during cleanup. Will attempt to remove if empty: ' + sub_path) + logger.fdebug('Sub-directory detected during cleanup. Will attempt to remove if empty: %s' % sub_path) orig_folder = sub_path else: logger.fdebug('Direct post-processing was performed against specific issueid. Using supplied filepath for deletion.') @@ -255,9 +255,9 @@ class PostProcessor(object): if all([mylar.CONFIG.FILE_OPTS == 'move', self.nzb_name == 'Manual Run', tmp_folder != self.nzb_folder]): if not os.listdir(tmp_folder): - logger.fdebug(self.module + ' Tidying up. Deleting sub-folder location : ' + tmp_folder) + logger.fdebug('%s Tidying up. Deleting sub-folder location : %s' % (self.module, tmp_folder)) shutil.rmtree(tmp_folder) - self._log("Removed temporary directory : " + tmp_folder) + self._log("Removed temporary directory : %s" % tmp_folder) else: if filename is not None: if os.path.isfile(os.path.join(tmp_folder,filename)): @@ -275,13 +275,13 @@ class PostProcessor(object): logger.warn('%s [%s] Unable to delete original folder location: %s' % (self.module, e, tmp_folder)) else: logger.fdebug('%s Removed original folder location: %s' % (self.module, tmp_folder)) - self._log("Removed temporary directory : " + tmp_folder) + self._log("Removed temporary directory : %s" % tmp_folder) else: - self._log('Failed to remove temporary directory: ' + tmp_folder) + self._log('Failed to remove temporary directory: %s' % tmp_folder) logger.error('%s %s not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted.' % (self.module, tmp_folder)) else: self._log('Failed to remove temporary directory: ' + tmp_folder) - logger.error(self.module + ' ' + tmp_folder + ' not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted.') + logger.error('%s %s not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted.' % (self.module, tmp_folder)) elif all([mylar.CONFIG.FILE_OPTS == 'move', self.nzb_name == 'Manual Run', filename is not None]): if os.path.isfile(os.path.join(tmp_folder,filename)): @@ -293,9 +293,9 @@ class PostProcessor(object): elif mylar.CONFIG.FILE_OPTS == 'move' and all([del_nzbdir is True, self.nzb_name != 'Manual Run']): #tmp_folder != self.nzb_folder]): if not os.listdir(tmp_folder): - logger.fdebug(self.module + ' Tidying up. Deleting original folder location : ' + tmp_folder) + logger.fdebug('%s Tidying up. Deleting original folder location : %s' % (self.module, tmp_folder)) shutil.rmtree(tmp_folder) - self._log("Removed temporary directory : " + tmp_folder) + self._log("Removed temporary directory : %s" % tmp_folder) else: if filename is not None: if os.path.isfile(os.path.join(tmp_folder,filename)): @@ -331,15 +331,15 @@ class PostProcessor(object): except OSError: pass if not os.listdir(odir): - logger.fdebug(self.module + ' Tidying up. Deleting temporary cache directory : ' + odir) + logger.fdebug('%s Tidying up. Deleting temporary cache directory : %s' % (self.module, odir)) shutil.rmtree(odir) - self._log("Removed temporary directory : " + odir) + self._log("Removed temporary directory : %s" % odir) else: - self._log('Failed to remove temporary directory: ' + odir) - logger.error(self.module + ' ' + odir + ' not empty. Skipping removal of temporary cache directory - this will either be caught in further post-processing or have to be manually deleted.') + self._log('Failed to remove temporary directory: %s' % odir) + logger.error('%s %s not empty. Skipping removal of temporary cache directory - this will either be caught in further post-processing or have to be manually deleted.' % (self.module, odir)) except (OSError, IOError): - logger.fdebug(self.module + ' Failed to remove directory - Processing will continue, but manual removal is necessary') + logger.fdebug('%s Failed to remove directory - Processing will continue, but manual removal is necessary' % self.module) self._log('Failed to remove temporary directory') @@ -388,7 +388,7 @@ class PostProcessor(object): elif all([self.apicall is True, self.issueid is None, self.comicid is None, self.nzb_name.startswith('0-Day')]): logger.fdebug('%s Now post-processing 0-day pack: %s' % (module, self.nzb_name)) else: - logger.fdebug(module + ' Manual Run initiated') + logger.fdebug('%s Manual Run initiated' % module) #Manual postprocessing on a folder. #first we get a parsed results list of the files being processed, and then poll against the sql to get a short list of hits. flc = filechecker.FileChecker(self.nzb_folder, justparse=True, pp_mode=True) @@ -396,7 +396,7 @@ class PostProcessor(object): if filelist['comiccount'] == 0: # is None: logger.warn('There were no files located - check the debugging logs if you think this is in error.') return - logger.info('I have located ' + str(filelist['comiccount']) + ' files that I should be able to post-process. Continuing...') + logger.info('I have located %s files that I should be able to post-process. Continuing...' % filelist['comiccount']) else: if all([self.comicid is None, '_' not in self.issueid]): cid = myDB.selectone('SELECT ComicID FROM issues where IssueID=?', [str(self.issueid)]).fetchone() @@ -526,9 +526,8 @@ class PostProcessor(object): tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(seq=','.join('?' * len(loopchk))) comicseries = myDB.select(tmpsql, tuple(loopchk)) if not comicseries: - logger.error(module + ' No Series in Watchlist - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?') + logger.error('%s No Series in Watchlist - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?' % module) break - watchvals = [] for wv in comicseries: logger.info('Now checking: %s [%s]' % (wv['ComicName'], wv['ComicID'])) @@ -536,7 +535,7 @@ class PostProcessor(object): #check for Paused status / #check for Ended status and 100% completion of issues. if wv['Status'] == 'Paused' or (wv['Have'] == wv['Total'] and not any(['Present' in wv['ComicPublished'], helpers.now()[:4] in wv['ComicPublished']])): - logger.warn(wv['ComicName'] + ' [' + wv['ComicYear'] + '] is either Paused or in an Ended status with 100% completion. Ignoring for match.') + logger.warn('%s [%s] is either Paused or in an Ended status with 100% completion. Ignoring for match.' % (wv['ComicName'], wv['ComicYear'])) continue wv_comicname = wv['ComicName'] wv_comicpublisher = wv['ComicPublisher'] @@ -551,7 +550,7 @@ class PostProcessor(object): wv_publisher = wv['ComicPublisher'] wv_total = wv['Total'] if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE: - logger.fdebug('Queuing to Check: ' + wv['ComicName'] + ' [' + str(wv['ComicYear']) + '] -- ' + str(wv['ComicID'])) + logger.fdebug('Queuing to Check: %s [%s] -- %s' % (wv['ComicName'], wv['ComicYear'], wv['ComicID'])) #force it to use the Publication Date of the latest issue instead of the Latest Date (which could be anything) latestdate = myDB.select('SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC', [wv['ComicID']]) @@ -568,9 +567,9 @@ class PostProcessor(object): latestdate = wv['LatestDate'] if latestdate == '0000-00-00' or latestdate == 'None' or latestdate is None: - logger.fdebug('Forcing a refresh of series: ' + wv_comicname + ' as it appears to have incomplete issue dates.') + logger.fdebug('Forcing a refresh of series: %s as it appears to have incomplete issue dates.' % wv_comicname) updater.dbUpdate([wv_comicid]) - logger.fdebug('Refresh complete for ' + wv_comicname + '. Rechecking issue dates for completion.') + logger.fdebug('Refresh complete for %s. Rechecking issue dates for completion.' % wv_comicname) latestdate = myDB.select('SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC', [wv['ComicID']]) if latestdate: tmplatestdate = latestdate[0][0] @@ -587,7 +586,7 @@ class PostProcessor(object): logger.fdebug('Latest Date (after forced refresh) set to :' + str(latestdate)) if latestdate == '0000-00-00' or latestdate == 'None' or latestdate is None: - logger.fdebug('Unable to properly attain the Latest Date for series: ' + wv_comicname + '. Cannot check against this series for post-processing.') + logger.fdebug('Unable to properly attain the Latest Date for series: %s. Cannot check against this series for post-processing.' % wv_comicname) continue watchvals.append({"ComicName": wv_comicname, @@ -638,14 +637,14 @@ class PostProcessor(object): if temploc is not None and (any(['annual' in temploc.lower(), 'special' in temploc.lower()]) and mylar.CONFIG.ANNUALS_ON is True): biannchk = re.sub('-', '', temploc.lower()).strip() if 'biannual' in biannchk: - logger.fdebug(module + ' Bi-Annual detected.') + logger.fdebug('%s Bi-Annual detected.' % module) fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) else: if 'annual' in temploc.lower(): fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) else: fcdigit = helpers.issuedigits(re.sub('special', '', str(temploc.lower())).strip()) - logger.fdebug(module + ' Annual/Special detected [' + str(fcdigit) +']. ComicID assigned as ' + str(cs['ComicID'])) + logger.fdebug('%s Annual/Special detected [%s]. ComicID assigned as %s' % (module, fcdigit, cs['ComicID'])) annchk = "yes" issuechk = myDB.select("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit]) else: @@ -714,7 +713,7 @@ class PostProcessor(object): #compare the ReleaseDate for the issue, to the found issue date in the filename. #if ReleaseDate doesn't exist, use IssueDate #if no issue date was found, then ignore. - logger.fdebug(module + '[ISSUE-VERIFY] Now checking against ' + cs['ComicName'] + '-' + cs['ComicID']) + logger.fdebug('%s[ISSUE-VERIFY] Now checking against %s - %s' % (module, cs['ComicName'], cs['ComicID'])) issyr = None #logger.fdebug(module + ' issuedate:' + str(isc['IssueDate'])) #logger.fdebug(module + ' isc: ' + str(isc['IssueDate'][5:7])) @@ -723,33 +722,33 @@ class PostProcessor(object): #logger.info(module + ' IssueDate: ' + str(isc['IssueDate'])) if isc['DigitalDate'] is not None and isc['DigitalDate'] != '0000-00-00': if int(isc['DigitalDate'][:4]) < int(watchmatch['issue_year']): - logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['DigitalDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename') + logger.fdebug('%s[ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename' % (module, isc['DigitalDate'], watchmatch['issue_year'])) datematch = "False" elif isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00': if int(isc['ReleaseDate'][:4]) < int(watchmatch['issue_year']): - logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['ReleaseDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename') + logger.fdebug('%s[ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename' % (module, isc['ReleaseDate'], watchmatch['issue_year'])) datematch = "False" else: if int(isc['IssueDate'][:4]) < int(watchmatch['issue_year']): - logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['IssueDate']) + ' is before the issue year ' + str(watchmatch['issue_year']) + ' that was discovered in the filename') + logger.fdebug('%s[ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename' % (isc['IssueDate'], watchmatch['issue_year'])) datematch = "False" if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12: issyr = int(monthval[:4]) + 1 - logger.fdebug(module + '[ISSUE-VERIFY] IssueYear (issyr) is ' + str(issyr)) + logger.fdebug('%s[ISSUE-VERIFY] IssueYear (issyr) is %s' % (module, issyr)) elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3: issyr = int(monthval[:4]) - 1 if datematch == "False" and issyr is not None: - logger.fdebug(module + '[ISSUE-VERIFY] ' + str(issyr) + ' comparing to ' + str(watchmatch['issue_year']) + ' : rechecking by month-check versus year.') + logger.fdebug('%s[ISSUE-VERIFY] %s comparing to %s : rechecking by month-check versus year.' % (issyr, watchmatch['issue_year'])) datematch = "True" if int(issyr) != int(watchmatch['issue_year']): - logger.fdebug(module + '[ISSUE-VERIFY][.:FAIL:.] Issue is before the modified issue year of ' + str(issyr)) + logger.fdebug('%s[ISSUE-VERIFY][.:FAIL:.] Issue is before the modified issue year of %s' % (module, issyr)) datematch = "False" else: - logger.info(module + '[ISSUE-VERIFY] Found matching issue # ' + str(fcdigit) + ' for ComicID: ' + str(cs['ComicID']) + ' / IssueID: ' + str(isc['IssueID'])) + logger.info('%s[ISSUE-VERIFY] Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, cs['ComicID'], isc['IssueID'])) if datematch == "True": # if we get to here, we need to do some more comparisons just to make sure we have the right volume @@ -766,40 +765,40 @@ class PostProcessor(object): tmp_watchmatch_vol = re.sub("[^0-9]","", watchmatch['series_volume']).strip() if len(tmp_watchmatch_vol) == 4: if int(tmp_watchmatch_vol) == int(watch_values['SeriesYear']): - logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of ' + str(watch_values['SeriesYear']) + ' matched to volume/year label of ' + str(tmp_watchmatch_vol)) + logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of %s matched to volume/year label of %s' % (module, watch_values['SeriesYear'], tmp_watchmatch_vol)) else: - logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of ' + str(watch_values['SeriesYear']) + ' DID NOT match to volume/year label of ' + tmp_watchmatch_vol) + logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of %s DID NOT match to volume/year label of %s' % (module, watch_values['SeriesYear'], tmp_watchmatch_vol)) datematch = "False" if len(watchvals) > 1 and int(tmp_watchmatch_vol) > 1: if int(tmp_watchmatch_vol) == int(tmp_watchlist_vol): - logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of ' + str(watch_values['ComicVersion']) + ' matched to volume label of ' + str(watchmatch['series_volume'])) + logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s' % (module, watch_values['ComicVersion'], watchmatch['series_volume'])) else: - logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of ' + str(watch_values['ComicVersion']) + ' DID NOT match to volume label of ' + str(watchmatch['series_volume'])) + logger.fdebug('%s[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s' % (module, watch_values['ComicVersion'], watchmatch['series_volume'])) continue #datematch = "False" else: if any([tmp_watchlist_vol is None, tmp_watchlist_vol == 'None', tmp_watchlist_vol == '']): - logger.fdebug(module + '[ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.') + logger.fdebug('%s[ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.' % module) datematch = "False" elif len(watchvals) == 1 and int(tmp_watchlist_vol) == 1: - logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume MATCH] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates only volume for this series on your watchlist.') + logger.fdebug('%s[ISSUE-VERIFY][Lone Volume MATCH] Volume label of %s indicates only volume for this series on your watchlist.' % (module, watch_values['ComicVersion'])) elif int(tmp_watchlist_vol) > 1: - logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.') + logger.fdebug('%s[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of %s indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.' % (module, watch_values['ComicVersion'])) datematch = "False" if datematch == "False" and all([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None', watch_issueyear is not None]): #now we see if the issue year matches exactly to what we have within Mylar. if int(watch_issueyear) == int(watchmatch['issue_year']): - logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Issue Year of ' + str(watch_issueyear) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year'])) + logger.fdebug('%s[ISSUE-VERIFY][Issue Year MATCH] Issue Year of %s is a match to the year found in the filename of : %s' % (module, watch_issueyear, watchmatch['issue_year'])) datematch = 'True' else: - logger.fdebug(module + '[ISSUE-VERIFY][Issue Year FAILURE] Issue Year of ' + str(watch_issueyear) + ' does NOT match the year found in the filename of : ' + str(watchmatch['issue_year'])) - logger.fdebug(module + '[ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year.') + logger.fdebug('%s[ISSUE-VERIFY][Issue Year FAILURE] Issue Year of %s does NOT match the year found in the filename of : %s' % (module, watch_issueyear, watchmatch['issue_year'])) + logger.fdebug('%s[ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year.' % module) if issyr is not None: if int(issyr) != int(watchmatch['issue_year']): - logger.fdebug(module + '[ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of ' + str(issyr) + ' is before the modified issue year of ' + str(issyr)) + logger.fdebug('%s[ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of %s is before the modified issue year of %s' % (module, issyr, watchmatch['issue_year'])) else: - logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of ' + str(issyr) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year'])) + logger.fdebug('%s[ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of %s is a match to the year found in the filename of : %s' % (module, issyr, watchmatch['issue_year'])) datematch = 'True' if datematch == 'True': @@ -843,10 +842,10 @@ class PostProcessor(object): "ForcedMatch": False}) break else: - logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing') + logger.fdebug('%s[NON-MATCH: %s-%s] Incorrect series - not populating..continuing post-processing' % (module, cs['ComicName'], cs['ComicID'])) continue else: - logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing') + logger.fdebug('%s[NON-MATCH: %s-%s] Incorrect series - not populating..continuing post-processing' % (module, cs['ComicName'], cs['ComicID'])) continue if datematch == 'True': @@ -865,10 +864,10 @@ class PostProcessor(object): continue #break if datematch == 'True': - logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename'])) + logger.fdebug('%s[SUCCESSFUL MATCH: %s-%s] Match verified for %s' % (module, cs['ComicName'], cs['ComicID'], helpers.conversion(fl['comicfilename']))) break elif self.matched is True: - logger.warn(module + '[MATCH: %s - %s] We matched by name for this series, but cannot find a corresponding issue number in the series list.' % (cs['ComicName'], cs['ComicID'])) + logger.warn('%s[MATCH: %s - %s] We matched by name for this series, but cannot find a corresponding issue number in the series list.' % (module, cs['ComicName'], cs['ComicID'])) #mlp = [] @@ -922,7 +921,7 @@ class PostProcessor(object): arc_series = myDB.select("SELECT * FROM storyarcs WHERE IssueArcID=?", [self.issuearcid]) if arc_series is None: - logger.error(module + ' No Story Arcs in Watchlist that contain that particular series - aborting Manual Post Processing. Maybe you should be running Import?') + logger.error('%s No Story Arcs in Watchlist that contain that particular series - aborting Manual Post Processing. Maybe you should be running Import?' % module) return else: arcvals = [] @@ -957,10 +956,14 @@ class PostProcessor(object): from collections import defaultdict res = defaultdict(list) for acv in arcvals: - acv_check = [x for x in manual_list if x['ComicID'] == acv['WatchValues']['ComicID']] - if acv_check: + if len(manual_list) == 0: res[acv['ComicName']].append({"ArcValues": acv['ArcValues'], "WatchValues": acv['WatchValues']}) + else: + acv_check = [x for x in manual_list if x['ComicID'] == acv['WatchValues']['ComicID']] + if acv_check: + res[acv['ComicName']].append({"ArcValues": acv['ArcValues'], + "WatchValues": acv['WatchValues']}) if len(res) > 0: logger.fdebug('%s Now Checking if %s issue(s) may also reside in one of the storyarc\'s that I am watching.' % (module, len(res))) for k,v in res.items(): @@ -978,7 +981,7 @@ class PostProcessor(object): nm+=1 else: try: - if v[i]['WatchValues']['Type'] == 'TPB' and v[i]['ArcValues']['Total'] > 1: + if all([v[i]['WatchValues']['Type'] == 'TPB', v[i]['WatchValues']['Total'] > 1]) or all([v[i]['WatchValues']['Type'] == 'One-Shot', v[i]['WatchValues']['Total'] == 1]): if watchmatch['series_volume'] is not None: just_the_digits = re.sub('[^0-9]', '', arcmatch['series_volume']).strip() else: @@ -995,117 +998,128 @@ class PostProcessor(object): temploc = re.sub('[\#\']', '', temploc) #logger.fdebug('temploc: %s' % temploc) else: - temploc = None + if any([v[i]['WatchValues']['Type'] == 'TPB', v[i]['WatchValues']['Type'] == 'One-Shot']): + temploc = '1' + else: + temploc = None if temploc is not None and helpers.issuedigits(temploc) != helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): #logger.fdebug('issues dont match. Skipping') i+=1 continue else: - if 'annual' in temploc.lower(): + if temploc is not None and (any(['annual' in temploc.lower(), 'special' in temploc.lower()]) and mylar.CONFIG.ANNUALS_ON is True): biannchk = re.sub('-', '', temploc.lower()).strip() if 'biannual' in biannchk: - logger.fdebug(module + ' Bi-Annual detected.') + logger.fdebug('%s Bi-Annual detected.' % module) fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) else: - fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) - logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(v[i]['WatchValues']['ComicID'])) + if 'annual' in temploc.lower(): + fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) + else: + fcdigit = helpers.issuedigits(re.sub('special', '', str(temploc.lower())).strip()) + logger.fdebug('%s Annual detected [%s]. ComicID assigned as %s' % (module, fcdigit, v[i]['WatchValues']['ComicID'])) annchk = "yes" issuechk = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]).fetchone() else: - fcdigit = helpers.issuedigits(temploc) - issuechk = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]).fetchone() + annchk = "no" + if temploc is not None: + fcdigit = helpers.issuedigits(temploc) + issuechk = myDB.select("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]) + else: + issuechk = myDB.select("SELECT * from storyarcs WHERE ComicID=?", [v[i]['WatchValues']['ComicID']]) if issuechk is None: try: - logger.fdebug(module + ' No corresponding issue # found for ' + str(v[i]['WatchValues']['ComicID'])) + logger.fdebug('%s No corresponding issue # found for %s' % (module, v[i]['WatchValues']['ComicID'])) except: continue else: - datematch = "True" - if len(arcmatch) >= 1 and arcmatch['issue_year'] is not None: - #if the # of matches is more than 1, we need to make sure we get the right series - #compare the ReleaseDate for the issue, to the found issue date in the filename. - #if ReleaseDate doesn't exist, use IssueDate - #if no issue date was found, then ignore. - issyr = None - logger.fdebug('issuedate:' + str(issuechk['IssueDate'])) - logger.fdebug('issuechk: ' + str(issuechk['IssueDate'][5:7])) + for isc in issuechk: + datematch = "True" + if len(arcmatch) >= 1 and arcmatch['issue_year'] is not None: + #if the # of matches is more than 1, we need to make sure we get the right series + #compare the ReleaseDate for the issue, to the found issue date in the filename. + #if ReleaseDate doesn't exist, use IssueDate + #if no issue date was found, then ignore. + issyr = None + logger.fdebug('issuedate:' + str(isc['IssueDate'])) + logger.fdebug('issuechk: ' + str(isc['IssueDate'][5:7])) + + logger.fdebug('StoreDate ' + str(isc['ReleaseDate'])) + logger.fdebug('IssueDate: ' + str(isc['IssueDate'])) + if all([isc['ReleaseDate'] is not None, isc['ReleaseDate'] != '0000-00-00']) or all([isc['IssueDate'] is not None, isc['IssueDate'] != '0000-00-00']): + if isc['ReleaseDate'] == '0000-00-00': + datevalue = isc['IssueDate'] + if int(datevalue[:4]) < int(arcmatch['issue_year']): + logger.fdebug('%s %s is before the issue year %s that was discovered in the filename' % (module, datevalue[:4], arcmatch['issue_year'])) + datematch = "False" + else: + datevalue = isc['ReleaseDate'] + if int(datevalue[:4]) < int(arcmatch['issue_year']): + logger.fdebug('%s %s is before the issue year of %s that was discovered in the filename' % (module, datevalue[:4], arcmatch['issue_year'])) + datematch = "False" + + monthval = datevalue + + if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12: + issyr = int(monthval[:4]) + 1 + logger.fdebug('%s IssueYear (issyr) is %s' % (module, issyr)) + elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3: + issyr = int(monthval[:4]) - 1 + + if datematch == "False" and issyr is not None: + logger.fdebug('%s %s comparing to %s : rechecking by month-check versus year.' % (module, issyr, arcmatch['issue_year'])) + datematch = "True" + if int(issyr) != int(arcmatch['issue_year']): + logger.fdebug('%s[.:FAIL:.] Issue is before the modified issue year of %s' % (module, issyr)) + datematch = "False" - logger.fdebug('StoreDate ' + str(issuechk['ReleaseDate'])) - logger.fdebug('IssueDate: ' + str(issuechk['IssueDate'])) - if all([issuechk['ReleaseDate'] is not None, issuechk['ReleaseDate'] != '0000-00-00']) or all([issuechk['IssueDate'] is not None, issuechk['IssueDate'] != '0000-00-00']): - if issuechk['ReleaseDate'] == '0000-00-00': - datevalue = issuechk['IssueDate'] - if int(datevalue[:4]) < int(arcmatch['issue_year']): - logger.fdebug(module + ' ' + str(datevalue[:4]) + ' is before the issue year ' + str(arcmatch['issue_year']) + ' that was discovered in the filename') - datematch = "False" else: - datevalue = issuechk['ReleaseDate'] - if int(datevalue[:4]) < int(arcmatch['issue_year']): - logger.fdebug(module + ' ' + str(datevalue[:4]) + ' is before the issue year of ' + str(arcmatch['issue_year']) + ' that was discovered in the filename') - datematch = "False" + logger.info('%s Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, v[i]['WatchValues']['ComicID'], isc['IssueID'])) - monthval = datevalue + logger.fdebug('datematch: %s' % datematch) + logger.fdebug('temploc: %s' % helpers.issuedigits(temploc)) + logger.fdebug('arcissue: %s' % helpers.issuedigits(v[i]['ArcValues']['IssueNumber'])) + if datematch == "True" and helpers.issuedigits(temploc) == helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): + passit = False + if len(manual_list) > 0: + if any([ v[i]['ArcValues']['IssueID'] == x['IssueID'] for x in manual_list ]): + logger.info('[STORY-ARC POST-PROCESSING] IssueID %s exists in your watchlist. Bypassing Story-Arc post-processing performed later.' % v[i]['ArcValues']['IssueID']) + #add in the storyarcid into the manual list so it will perform story-arc functions after normal manual PP is finished. + for a in manual_list: + if a['IssueID'] == v[i]['ArcValues']['IssueID']: + a['IssueArcID'] = v[i]['ArcValues']['IssueArcID'] + break + passit = True + if passit == False: + tmpfilename = helpers.conversion(arcmatch['comicfilename']) + if arcmatch['sub']: + clocation = os.path.join(arcmatch['comiclocation'], arcmatch['sub'], tmpfilename) + else: + clocation = os.path.join(arcmatch['comiclocation'], tmpfilename) + logger.info('[%s %s#] MATCH: %s / %s / %s' % (k, isc['IssueNumber'], clocation, isc['IssueID'], v[i]['ArcValues']['IssueID'])) + if v[i]['ArcValues']['Publisher'] is None: + arcpublisher = v[i]['ArcValues']['ComicPublisher'] + else: + arcpublisher = v[i]['ArcValues']['Publisher'] - if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12: - issyr = int(monthval[:4]) + 1 - logger.fdebug(module + ' IssueYear (issyr) is ' + str(issyr)) - elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3: - issyr = int(monthval[:4]) - 1 - - if datematch == "False" and issyr is not None: - logger.fdebug(module + ' ' + str(issyr) + ' comparing to ' + str(arcmatch['issue_year']) + ' : rechecking by month-check versus year.') - datematch = "True" - if int(issyr) != int(arcmatch['issue_year']): - logger.fdebug(module + '[.:FAIL:.] Issue is before the modified issue year of ' + str(issyr)) - datematch = "False" - - else: - logger.info(module + ' Found matching issue # ' + str(fcdigit) + ' for ComicID: ' + str(v[i]['WatchValues']['ComicID']) + ' / IssueID: ' + str(issuechk['IssueID'])) - - logger.fdebug('datematch: ' + str(datematch)) - logger.fdebug('temploc: ' + str(helpers.issuedigits(temploc))) - logger.fdebug('arcissue: ' + str(helpers.issuedigits(v[i]['ArcValues']['IssueNumber']))) - if datematch == "True" and helpers.issuedigits(temploc) == helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): - passit = False - if len(manual_list) > 0: - if any([ v[i]['ArcValues']['IssueID'] == x['IssueID'] for x in manual_list ]): - logger.info('[STORY-ARC POST-PROCESSING] IssueID ' + str(v[i]['ArcValues']['IssueID']) + ' exists in your watchlist. Bypassing Story-Arc post-processing performed later.') - #add in the storyarcid into the manual list so it will perform story-arc functions after normal manual PP is finished. - for a in manual_list: - if a['IssueID'] == v[i]['ArcValues']['IssueID']: - a['IssueArcID'] = v[i]['ArcValues']['IssueArcID'] - break - passit = True - if passit == False: - tmpfilename = helpers.conversion(arcmatch['comicfilename']) - if arcmatch['sub']: - clocation = os.path.join(arcmatch['comiclocation'], arcmatch['sub'], tmpfilename) - else: - clocation = os.path.join(arcmatch['comiclocation'], tmpfilename) - logger.info('[' + k + ' #' + issuechk['IssueNumber'] + '] MATCH: ' + clocation + ' / ' + str(issuechk['IssueID']) + ' / ' + str(v[i]['ArcValues']['IssueID'])) - if v[i]['ArcValues']['Publisher'] is None: - arcpublisher = v[i]['ArcValues']['ComicPublisher'] - else: - arcpublisher = v[i]['ArcValues']['Publisher'] - - manual_arclist.append({"ComicLocation": clocation, - "Filename": tmpfilename, - "ComicID": v[i]['WatchValues']['ComicID'], - "IssueID": v[i]['ArcValues']['IssueID'], - "IssueNumber": v[i]['ArcValues']['IssueNumber'], - "StoryArc": v[i]['ArcValues']['StoryArc'], - "StoryArcID": v[i]['ArcValues']['StoryArcID'], - "IssueArcID": v[i]['ArcValues']['IssueArcID'], - "Publisher": arcpublisher, - "ReadingOrder": v[i]['ArcValues']['ReadingOrder'], - "ComicName": k}) - logger.info(module + '[SUCCESSFUL MATCH: ' + k + '-' + v[i]['WatchValues']['ComicID'] + '] Match verified for ' + arcmatch['comicfilename']) - self.matched = True - break - else: - logger.fdebug(module + '[NON-MATCH: ' + k + '-' + v[i]['WatchValues']['ComicID'] + '] Incorrect series - not populating..continuing post-processing') + manual_arclist.append({"ComicLocation": clocation, + "Filename": tmpfilename, + "ComicID": v[i]['WatchValues']['ComicID'], + "IssueID": v[i]['ArcValues']['IssueID'], + "IssueNumber": v[i]['ArcValues']['IssueNumber'], + "StoryArc": v[i]['ArcValues']['StoryArc'], + "StoryArcID": v[i]['ArcValues']['StoryArcID'], + "IssueArcID": v[i]['ArcValues']['IssueArcID'], + "Publisher": arcpublisher, + "ReadingOrder": v[i]['ArcValues']['ReadingOrder'], + "ComicName": k}) + logger.info('%s[SUCCESSFUL MATCH: %s-%s] Match verified for %s' % (module, k, v[i]['WatchValues']['ComicID'], arcmatch['comicfilename'])) + self.matched = True + break + else: + logger.fdebug('%s[NON-MATCH: %s-%s] Incorrect series - not populating..continuing post-processing' % (module, k, v[i]['WatchValues']['ComicID'])) i+=1 if self.matched is False: @@ -1118,7 +1132,7 @@ class PostProcessor(object): if not oneofflist: pass #continue else: - logger.fdebug(module + '[ONEOFF-SELECTION][self.nzb_name: %s]' % self.nzb_name) + logger.fdebug('%s[ONEOFF-SELECTION][self.nzb_name: %s]' % (module, self.nzb_name)) oneoffvals = [] for ofl in oneofflist: #logger.info('[ONEOFF-SELECTION] ofl: %s' % ofl) @@ -1176,11 +1190,11 @@ class PostProcessor(object): if 'annual' in temploc.lower(): biannchk = re.sub('-', '', temploc.lower()).strip() if 'biannual' in biannchk: - logger.fdebug(module + ' Bi-Annual detected.') + logger.fdebug('%s Bi-Annual detected.' % module) fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip()) else: fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip()) - logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(ofv['ComicID'])) + logger.fdebug('%s Annual detected [%s]. ComicID assigned as %s' % (module, fcdigit, ofv['ComicID'])) annchk = "yes" else: fcdigit = helpers.issuedigits(temploc) @@ -1198,10 +1212,10 @@ class PostProcessor(object): "One-Off": True}) self.oneoffinlist = True else: - logger.fdebug(module + ' No corresponding issue # in dB found for %s # %s' % (ofv['ComicName'],ofv['Issue_Number'])) + logger.fdebug('%s No corresponding issue # in dB found for %s # %s' % (module, ofv['ComicName'], ofv['Issue_Number'])) continue - logger.fdebug(module + '[SUCCESSFUL MATCH: ' + ofv['ComicName'] + '-' + ofv['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename'])) + logger.fdebug('%s[SUCCESSFUL MATCH: %s-%s] Match Verified for %s' % (module, ofv['ComicName'], ofv['ComicID'], helpers.conversion(fl['comicfilename']))) self.matched = True break @@ -1210,11 +1224,11 @@ class PostProcessor(object): delete_arc = [] if len(manual_arclist) > 0: - logger.info('[STORY-ARC MANUAL POST-PROCESSING] I have found ' + str(len(manual_arclist)) + ' issues that belong to Story Arcs. Flinging them into the correct directories.') + logger.info('[STORY-ARC MANUAL POST-PROCESSING] I have found %s issues that belong to Story Arcs. Flinging them into the correct directories.' % len(manual_arclist)) for ml in manual_arclist: issueid = ml['IssueID'] ofilename = orig_filename = ml['ComicLocation'] - logger.info('[STORY-ARC POST-PROCESSING] Enabled for ' + ml['StoryArc']) + logger.info('[STORY-ARC POST-PROCESSING] Enabled for %s' % ml['StoryArc']) grdst = helpers.arcformat(ml['StoryArc'], helpers.spantheyears(ml['StoryArcID']), ml['Publisher']) @@ -1229,25 +1243,25 @@ class PostProcessor(object): import cmtagmylar metaresponse = cmtagmylar.run(self.nzb_folder, issueid=issueid, filename=ofilename) except ImportError: - logger.warn(module + ' comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/') + logger.warn('%s comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/' % module) metaresponse = "fail" if metaresponse == "fail": - logger.fdebug(module + ' Unable to write metadata successfully - check mylar.log file. Attempting to continue without metatagging...') + logger.fdebug('%s Unable to write metadata successfully - check mylar.log file. Attempting to continue without metatagging...' % module) elif any([metaresponse == "unrar error", metaresponse == "corrupt"]): - logger.error(module + ' This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it.') + logger.error('%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it.' % module) continue #launch failed download handling here. elif metaresponse.startswith('file not found'): filename_in_error = metaresponse.split('||')[1] - self._log("The file cannot be found in the location provided for metatagging to be used [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...") - logger.error(module + ' The file cannot be found in the location provided for metatagging to be used [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...') + self._log("The file cannot be found in the location provided for metatagging to be used [%s]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging..." % (filename_in_error)) + logger.error('%s The file cannot be found in the location provided for metatagging to be used [%s]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...' % (module, filename_in_error)) else: odir = os.path.split(metaresponse)[0] ofilename = os.path.split(metaresponse)[1] ext = os.path.splitext(metaresponse)[1] - logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..') - self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...') + logger.info('%s Sucessfully wrote metadata to .cbz (%s) - Continuing..' % (module, ofilename)) + self._log('Sucessfully wrote metadata to .cbz (%s) - proceeding...' % ofilename) dfilename = ofilename else: @@ -1261,11 +1275,11 @@ class PostProcessor(object): src_location = ofilename grab_src = ofilename - logger.fdebug(module + ' Source Path : ' + grab_src) + logger.fdebug('%s Source Path : %s' % (module, grab_src)) checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module) if not checkdirectory: - logger.warn(module + ' Error trying to validate/create directory. Aborting this process at this time.') + logger.warn('%s Error trying to validate/create directory. Aborting this process at this time.' % module) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) @@ -1275,12 +1289,12 @@ class PostProcessor(object): renamed_file = helpers.rename_param(ml['ComicID'], ml['ComicName'], ml['IssueNumber'], dfilename, issueid=ml['IssueID'], arc=ml['StoryArc']) if renamed_file: dfilename = renamed_file['nfilename'] - logger.fdebug(module + ' Renaming file to conform to configuration: ' + ofilename) + logger.fdebug('%s Renaming file to conform to configuration: %s' % (module, ofilename)) #if from a StoryArc, check to see if we're appending the ReadingOrder to the filename if mylar.CONFIG.READ2FILENAME: - logger.fdebug(module + ' readingorder#: ' + str(ml['ReadingOrder'])) + logger.fdebug('%s readingorder#: %s' % (module, ml['ReadingOrder'])) if int(ml['ReadingOrder']) < 10: readord = "00" + str(ml['ReadingOrder']) elif int(ml['ReadingOrder']) >= 10 and int(ml['ReadingOrder']) <= 99: readord = "0" + str(ml['ReadingOrder']) else: readord = str(ml['ReadingOrder']) @@ -1288,10 +1302,10 @@ class PostProcessor(object): grab_dst = os.path.join(grdst, dfilename) - logger.fdebug(module + ' Destination Path : ' + grab_dst) - logger.fdebug(module + ' Source Path : ' + grab_src) + logger.fdebug('%s Destination Path : %s' % (module, grab_dst)) + logger.fdebug('%s Source Path : %s' % (module, grab_src)) - logger.info(module + '[ONE-OFF MODE][' + mylar.CONFIG.ARC_FILEOPS.upper() + '] ' + grab_src + ' into directory : ' + grab_dst) + logger.info('%s[ONE-OFF MODE][%s] %s into directory : %s' % (module, mylar.CONFIG.ARC_FILEOPS.upper(), grab_src, grab_dst)) #this is also for issues that are part of a story arc, and don't belong to a watchlist series (ie. one-off's) try: @@ -1318,14 +1332,14 @@ class PostProcessor(object): myDB.action('DELETE from nzblog WHERE IssueID=? AND SARC=?', ['S' + str(ml['IssueArcID']),ml['StoryArc']]) myDB.action('DELETE from nzblog WHERE IssueID=? AND SARC=?', [ml['IssueArcID'],ml['StoryArc']]) - logger.fdebug(module + ' IssueArcID: ' + str(ml['IssueArcID'])) + logger.fdebug('%s IssueArcID: %s' % (module, ml['IssueArcID'])) ctrlVal = {"IssueArcID": ml['IssueArcID']} newVal = {"Status": "Downloaded", "Location": grab_dst} - logger.fdebug('writing: ' + str(newVal) + ' -- ' + str(ctrlVal)) + logger.fdebug('writing: %s -- %s' % (newVal, ctrlVal)) myDB.upsert("storyarcs", newVal, ctrlVal) - logger.fdebug(module + ' [' + ml['StoryArc'] + '] Post-Processing completed for: ' + grab_dst) + logger.fdebug('%s [%s] Post-Processing completed for: %s' % (module, ml['StoryArc'], grab_dst)) if (all([self.nzb_name != 'Manual Run', self.apicall is False]) or (self.oneoffinlist is True or all([self.issuearcid is not None, self.issueid is None]))) and not self.nzb_name.startswith('0-Day'): # and all([self.issueid is None, self.comicid is None, self.apicall is False]): ppinfo = [] @@ -1352,36 +1366,36 @@ class PostProcessor(object): logger.fdebug('[NZBNAME] nzbname (remove extensions, double spaces, convert underscores to spaces): ' + nzbname) nzbname = re.sub('\s', '.', nzbname) - logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname)) + logger.fdebug('%s After conversions, nzbname is : %s' % (module, nzbname)) # if mylar.USE_NZBGET==1: # nzbname=self.nzb_name - self._log("nzbname: " + str(nzbname)) + self._log("nzbname: %s" % nzbname) nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone() self.oneoff = False if nzbiss is None: self._log("Failure - could not initially locate nzbfile in my database to rename.") - logger.fdebug(module + ' Failure - could not locate nzbfile initially') + logger.fdebug('%s Failure - could not locate nzbfile initially' % module) # if failed on spaces, change it all to decimals and try again. nzbname = re.sub('[\(\)]', '', str(nzbname)) - self._log("trying again with this nzbname: " + str(nzbname)) - logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname)) + self._log("trying again with this nzbname: %s" % nzbname) + logger.fdebug('%s Trying to locate nzbfile again with nzbname of : %s' % (module, nzbname)) nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone() if nzbiss is None: - logger.error(module + ' Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.') + logger.error('%s Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.' % module) #set it up to run manual post-processing on self.nzb_folder self._log('Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.') self.valreturn.append({"self.log": self.log, "mode": 'outside'}) return self.queue.put(self.valreturn) else: - self._log("I corrected and found the nzb as : " + str(nzbname)) - logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname)) + self._log("I corrected and found the nzb as : %s" % nzbname) + logger.fdebug('%s Auto-corrected and found the nzb as : %s' % (module, nzbname)) #issueid = nzbiss['IssueID'] issueid = nzbiss['IssueID'] - logger.fdebug(module + ' Issueid: ' + str(issueid)) + logger.fdebug('%s Issueid: %s' % (module, issueid)) sarc = nzbiss['SARC'] self.oneoff = nzbiss['OneOff'] tmpiss = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [issueid]).fetchone() @@ -1479,17 +1493,17 @@ class PostProcessor(object): #loop through the hits here. if len(manual_list) == 0 and len(manual_arclist) == 0: if self.nzb_name == 'Manual Run': - logger.info(module + ' No matches for Manual Run ... exiting.') + logger.info('%s No matches for Manual Run ... exiting.' % module) if mylar.APILOCK is True: mylar.APILOCK = False return elif len(manual_arclist) > 0 and len(manual_list) == 0: - logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.') + logger.info('%s Manual post-processing completed for %s story-arc issues.' % (module, len(manual_arclist))) if mylar.APILOCK is True: mylar.APILOCK = False return elif len(manual_arclist) > 0: - logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.') + logger.info('%s Manual post-processing completed for %s story-arc issues.' % (module, len(manual_arclist))) i = 0 @@ -1523,7 +1537,7 @@ class PostProcessor(object): continue if any([dupthis['action'] == "write", dupthis['action'] == 'dupe_src']): - stat = ' [' + str(i) + '/' + str(len(manual_list)) + ']' + stat = ' [%s/%s]' % (i, len(manual_list)) self.Process_next(comicid, issueid, issuenumOG, ml, stat) dupthis = None @@ -1569,10 +1583,10 @@ class PostProcessor(object): annchk = "no" issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() if issuenzb is None: - logger.info(module + ' Could not detect as a standard issue - checking against annuals.') + logger.info('%s Could not detect as a standard issue - checking against annuals.' % module) issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() if issuenzb is None: - logger.info(module + ' issuenzb not found.') + logger.info('%s issuenzb not found.' % module) #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: @@ -1590,20 +1604,20 @@ class PostProcessor(object): elif 'G' in issueid or '-' in issueid: sandwich = 1 elif any([oneoff is True, issueid >= '900000', issueid == '1']): - logger.info(module + ' [ONE-OFF POST-PROCESSING] One-off download detected. Post-processing as a non-watchlist item.') + logger.info('%s [ONE-OFF POST-PROCESSING] One-off download detected. Post-processing as a non-watchlist item.' % module) sandwich = None #arbitrarily set it to None just to force one-off downloading below. else: - logger.error(module + ' Unable to locate downloaded file as being initiated via Mylar. Attempting to parse the filename directly and process.') + logger.error('%s Unable to locate downloaded file as being initiated via Mylar. Attempting to parse the filename directly and process.' % module) self._log('Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process.') self.valreturn.append({"self.log": self.log, "mode": 'outside'}) return self.queue.put(self.valreturn) else: - logger.info(module + ' Successfully located issue as an annual. Continuing.') + logger.info('%s Successfully located issue as an annual. Continuing.' % module) annchk = "yes" if issuenzb is not None: - logger.info(module + ' issuenzb found.') + logger.info('%s issuenzb found.' % module) if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) if all([sandwich is not None, helpers.is_number(sandwich), sarc is None]): @@ -1640,12 +1654,12 @@ class PostProcessor(object): for filename in filenames: if filename.lower().endswith(self.extensions): ofilename = orig_filename = filename - logger.fdebug(module + ' Valid filename located as : ' + ofilename) + logger.fdebug('%s Valid filename located as : %s' % (module, ofilename)) path, ext = os.path.splitext(ofilename) break if ofilename is None: - logger.error(module + ' Unable to post-process file as it is not in a valid cbr/cbz format or cannot be located in path. PostProcessing aborted.') + logger.error('%s Unable to post-process file as it is not in a valid cbr/cbz format or cannot be located in path. PostProcessing aborted.' % module) self._log('Unable to locate downloaded file to rename. PostProcessing aborted.') self.valreturn.append({"self.log": self.log, "mode": 'stop'}) @@ -1653,10 +1667,10 @@ class PostProcessor(object): if sandwich is not None and 'S' in sandwich: issuearcid = re.sub('S', '', issueid) - logger.fdebug(module + ' issuearcid:' + str(issuearcid)) + logger.fdebug('%s issuearcid:%s' % (module, issuearcid)) arcdata = myDB.selectone("SELECT * FROM storyarcs WHERE IssueArcID=?", [issuearcid]).fetchone() if arcdata is None: - logger.warn(module + ' Unable to locate issue within Story Arcs. Cannot post-process at this time - try to Refresh the Arc and manual post-process if necessary.') + logger.warn('%s Unable to locate issue within Story Arcs. Cannot post-process at this time - try to Refresh the Arc and manual post-process if necessary.' % module) self._log('Unable to locate issue within Story Arcs in orde to properly assign metadata. PostProcessing aborted.') self.valreturn.append({"self.log": self.log, "mode": 'stop'}) @@ -1689,24 +1703,24 @@ class PostProcessor(object): import cmtagmylar metaresponse = cmtagmylar.run(location, issueid=issueid, filename=os.path.join(self.nzb_folder, ofilename)) except ImportError: - logger.warn(module + ' comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/') + logger.warn('%s comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/' % module) metaresponse = "fail" if metaresponse == "fail": - logger.fdebug(module + ' Unable to write metadata successfully - check mylar.log file. Attempting to continue without metatagging...') + logger.fdebug('%s Unable to write metadata successfully - check mylar.log file. Attempting to continue without metatagging...' % module) elif any([metaresponse == "unrar error", metaresponse == "corrupt"]): - logger.error(module + ' This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it.') + logger.error('%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it.' %module) #launch failed download handling here. elif metaresponse.startswith('file not found'): filename_in_error = metaresponse.split('||')[1] - self._log("The file cannot be found in the location provided for metatagging [" + filename_in_error + "]. Please verify it exists, and re-run if necessary.") - logger.error(module + ' The file cannot be found in the location provided for metagging [' + filename_in_error + ']. Please verify it exists, and re-run if necessary.') + self._log("The file cannot be found in the location provided for metatagging [%s]. Please verify it exists, and re-run if necessary." % filename_in_error) + logger.error('%s The file cannot be found in the location provided for metagging [%s]. Please verify it exists, and re-run if necessary.' % (module, filename_in_error)) else: odir = os.path.split(metaresponse)[0] ofilename = os.path.split(metaresponse)[1] ext = os.path.splitext(metaresponse)[1] - logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..') - self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...') + logger.info('%s Sucessfully wrote metadata to .cbz (%s) - Continuing..' % (module, ofilename)) + self._log('Sucessfully wrote metadata to .cbz (%s) - proceeding...' % ofilename) dfilename = ofilename if metaresponse: @@ -1715,12 +1729,12 @@ class PostProcessor(object): src_location = location grab_src = os.path.join(src_location, ofilename) - self._log("Source Path : " + grab_src) - logger.info(module + ' Source Path : ' + grab_src) + self._log("Source Path : %s" % grab_src) + logger.info('%s Source Path : ' % (module, grab_src)) checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module) if not checkdirectory: - logger.warn(module + ' Error trying to validate/create directory. Aborting this process at this time.') + logger.warn('%s Error trying to validate/create directory. Aborting this process at this time.' % module) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) @@ -1730,12 +1744,12 @@ class PostProcessor(object): renamed_file = helpers.rename_param(comicid, comicname, issuenumber, dfilename, issueid=issueid, arc=sarc) if renamed_file: dfilename = renamed_file['nfilename'] - logger.fdebug(module + ' Renaming file to conform to configuration: ' + ofilename) + logger.fdebug('%s Renaming file to conform to configuration: %s' % (module, ofilename)) if sandwich is not None and 'S' in sandwich: #if from a StoryArc, check to see if we're appending the ReadingOrder to the filename if mylar.CONFIG.READ2FILENAME: - logger.fdebug(module + ' readingorder#: ' + str(arcdata['ReadingOrder'])) + logger.fdebug('%s readingorder#: %s' % (module, arcdata['ReadingOrder'])) if int(arcdata['ReadingOrder']) < 10: readord = "00" + str(arcdata['ReadingOrder']) elif int(arcdata['ReadingOrder']) >= 10 and int(arcdata['ReadingOrder']) <= 99: readord = "0" + str(arcdata['ReadingOrder']) else: readord = str(arcdata['ReadingOrder']) @@ -1746,10 +1760,10 @@ class PostProcessor(object): else: grab_dst = os.path.join(grdst, ofilename) - self._log("Destination Path : " + grab_dst) + self._log("Destination Path : %s" % grab_dst) - logger.info(module + ' Destination Path : ' + grab_dst) - logger.info(module + '[' + mylar.CONFIG.FILE_OPTS + '] ' + ofilename + ' into directory : ' + grab_dst) + logger.info('%s Destination Path : %s' % (module, grab_dst)) + logger.info('%s[%s] %s into directory : %s' % (module, mylar.CONFIG.FILE_OPTS, ofilename, grab_dst)) try: checkspace = helpers.get_free_space(grdst) @@ -1773,25 +1787,25 @@ class PostProcessor(object): myDB.action('DELETE from nzblog WHERE issueid=?', [issueid]) if sandwich is not None and 'S' in sandwich: - logger.info(module + ' IssueArcID is : ' + str(issuearcid)) + logger.info('%s IssueArcID is : %s' % (module, issuearcid)) ctrlVal = {"IssueArcID": issuearcid} newVal = {"Status": "Downloaded", "Location": grab_dst} myDB.upsert("storyarcs", newVal, ctrlVal) - logger.info(module + ' Updated status to Downloaded') + logger.info('%s Updated status to Downloaded' % module) - logger.info(module + ' Post-Processing completed for: [' + sarc + '] ' + grab_dst) + logger.info('%s Post-Processing completed for: [%s] %s' % (module, sarc, grab_dst)) self._log(u"Post Processing SUCCESSFUL! ") elif oneoff is True: - logger.info(module + ' IssueID is : ' + str(issueid)) + logger.info('%s IssueID is : %s' % (module, issueid)) ctrlVal = {"IssueID": issueid} newVal = {"Status": "Downloaded"} - logger.info(module + ' Writing to db: ' + str(newVal) + ' -- ' + str(ctrlVal)) + logger.info('%s Writing to db: %s -- %s' % (module, newVal, ctrlVal)) myDB.upsert("weekly", newVal, ctrlVal) - logger.info(module + ' Updated status to Downloaded') + logger.info('%s Updated status to Downloaded' % module) myDB.upsert("oneoffhistory", newVal, ctrlVal) - logger.info(module + ' Updated history for one-off\'s for tracking purposes') - logger.info(module + ' Post-Processing completed for: [ %s #%s ] %s' % (comicname, issuenumber, grab_dst)) + logger.info('%s Updated history for one-off\'s for tracking purposes' % module) + logger.info('%s Post-Processing completed for: [ %s #%s ] %s' % (module, comicname, issuenumber, grab_dst)) self._log(u"Post Processing SUCCESSFUL! ") try: @@ -1839,13 +1853,13 @@ class PostProcessor(object): if self.nzb_name == 'Manual Run': #loop through the hits here. if len(manual_list) == 0 and len(manual_arclist) == 0: - logger.info(module + ' No matches for Manual Run ... exiting.') + logger.info('%s No matches for Manual Run ... exiting.' % module) return elif len(manual_arclist) > 0 and len(manual_list) == 0: - logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.') + logger.info('%s Manual post-processing completed for %s story-arc issues.' % (module, len(manual_arclist))) return elif len(manual_arclist) > 0: - logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.') + logger.info('%s Manual post-processing completed for %s story-arc issues.' % (module, len(manual_arclist))) i = 0 for ml in manual_list: @@ -1881,14 +1895,14 @@ class PostProcessor(object): continue if any([dupthis['action'] == "write", dupthis['action'] == 'dupe_src']): - stat = ' [' + str(i) + '/' + str(len(manual_list)) + ']' + stat = ' [%s/%s]' % (i, len(manual_list)) self.Process_next(comicid, issueid, issuenumOG, ml, stat) dupthis = None if self.failed_files == 0: - logger.info(module + ' Manual post-processing completed for ' + str(i) + ' issues.') + logger.info('%s Manual post-processing completed for %s issues.' % (module, i)) else: - logger.info(module + ' Manual post-processing completed for ' + str(i) + ' issues [FAILED: ' + str(self.failed_files) + ']') + logger.info('%s Manual post-processing completed for %s issues [FAILED: %s]' % (module, i, self.failed_files)) return else: @@ -1936,18 +1950,18 @@ class PostProcessor(object): if ml is not None and mylar.CONFIG.SNATCHEDTORRENT_NOTIFY: snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=? OR provider=? OR provider=?) AND Status='Snatched'", [issueid, comicid, 'TPSE', 'DEM', 'WWT', '32P']).fetchone() if snatchnzb is None: - logger.fdebug(module + ' Was not snatched as a torrent. Using manual post-processing.') + logger.fdebug('%s Was not snatched as a torrent. Using manual post-processing.' % module) else: - logger.fdebug(module + ' Was downloaded from ' + snatchnzb['Provider'] + '. Enabling torrent manual post-processing completion notification.') + logger.fdebug('%s Was downloaded from %s. Enabling torrent manual post-processing completion notification.' % (module, snatchnzb['Provider'])) if issuenzb is None: issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid, comicid]).fetchone() annchk = "yes" if annchk == "no": - logger.info(module + stat + ' Starting Post-Processing for ' + issuenzb['ComicName'] + ' issue: ' + issuenzb['Issue_Number']) + logger.info('%s %s Starting Post-Processing for %s issue: %s' % (module, stat, issuenzb['ComicName'], issuenzb['Issue_Number'])) else: - logger.info(module + stat + ' Starting Post-Processing for ' + issuenzb['ReleaseComicName'] + ' issue: ' + issuenzb['Issue_Number']) - logger.fdebug(module + ' issueid: ' + str(issueid)) - logger.fdebug(module + ' issuenumOG: ' + issuenumOG) + logger.info('%s %s Starting Post-Processing for %s issue: %s' % (module, stat, issuenzb['ReleaseComicName'], issuenzb['Issue_Number'])) + logger.fdebug('%s issueid: %s' % (module, issueid)) + logger.fdebug('%s issuenumOG: %s' % (module, issuenumOG)) #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenzb['Issue_Number'] @@ -2003,8 +2017,8 @@ class PostProcessor(object): iss = iss_b4dec issdec = int(iss_decval) issueno = str(iss) - self._log("Issue Number: " + str(issueno)) - logger.fdebug(module + 'Issue Number: ' + str(issueno)) + self._log("Issue Number: %s" % issueno) + logger.fdebug('%s Issue Number: %s' % (module, issueno)) else: if len(iss_decval) == 1: iss = iss_b4dec + "." + iss_decval @@ -2013,8 +2027,8 @@ class PostProcessor(object): iss = iss_b4dec + "." + iss_decval.rstrip('0') issdec = int(iss_decval.rstrip('0')) * 10 issueno = iss_b4dec - self._log("Issue Number: " + str(iss)) - logger.fdebug(module + ' Issue Number: ' + str(iss)) + self._log("Issue Number: %s" % iss) + logger.fdebug('%s Issue Number: %s' % (module, iss)) else: iss = issuenum issueno = iss @@ -2027,7 +2041,7 @@ class PostProcessor(object): elif mylar.CONFIG.ZERO_LEVEL_N == "0x": zeroadd = "0" elif mylar.CONFIG.ZERO_LEVEL_N == "00x": zeroadd = "00" - logger.fdebug(module + ' Zero Suppression set to : ' + str(mylar.CONFIG.ZERO_LEVEL_N)) + logger.fdebug('%s Zero Suppression set to : %s' % (module, mylar.CONFIG.ZERO_LEVEL_N)) prettycomiss = None @@ -2039,14 +2053,14 @@ class PostProcessor(object): x = float(issueno) #validity check if x < 0: - logger.info('I\'ve encountered a negative issue #: ' + str(issueno) + '. Trying to accomodate.') - prettycomiss = '-' + str(zeroadd) + str(issueno[1:]) + logger.info('%s I\'ve encountered a negative issue #: %s. Trying to accomodate' % (module, issueno)) + prettycomiss = '-%s%s' % (zeroadd, issueno[1:]) elif x >= 0: pass else: raise ValueError except ValueError, e: - logger.warn('Unable to properly determine issue number [' + str(issueno) + '] - you should probably log this on github for help.') + logger.warn('Unable to properly determine issue number [%s] - you should probably log this on github for help.' % issueno) return if prettycomiss is None and len(str(issueno)) > 0: @@ -2065,7 +2079,7 @@ class PostProcessor(object): prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except - logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss)) + logger.fdebug('%s Zero level supplement set to %s. Issue will be set as : %s' % (module, mylar.CONFIG.ZERO_LEVEL_N, prettycomiss)) elif int(issueno) >= 10 and int(issueno) < 100: logger.fdebug('issue detected greater than 10, but less than 100') if mylar.CONFIG.ZERO_LEVEL_N == "none": @@ -2082,7 +2096,7 @@ class PostProcessor(object): prettycomiss = str(zeroadd) + str(iss) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except - logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '.Issue will be set as : ' + str(prettycomiss)) + logger.fdebug('%s Zero level supplement set to %s. Issue will be set as : %s' % (module, mylar.CONFIG.ZERO_LEVEL_N, prettycomiss)) else: logger.fdebug('issue detected greater than 100') if '.' in iss: @@ -2091,43 +2105,43 @@ class PostProcessor(object): prettycomiss = str(issueno) if issue_except != 'None': prettycomiss = str(prettycomiss) + issue_except - logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss)) + logger.fdebug('%s Zero level supplement set to %s. Issue will be set as : %s' % (module, mylar.CONFIG.ZERO_LEVEL_N, prettycomiss)) elif len(str(issueno)) == 0: prettycomiss = str(issueno) - logger.fdebug('issue length error - cannot determine length. Defaulting to None: ' + str(prettycomiss)) + logger.fdebug('issue length error - cannot determine length. Defaulting to None: %s ' % prettycomiss) if annchk == "yes": self._log("Annual detected.") - logger.fdebug(module + ' Pretty Comic Issue is : ' + str(prettycomiss)) + logger.fdebug('%s Pretty Comic Issue is : %s' % (module, prettycomiss)) issueyear = issuenzb['IssueDate'][:4] - self._log("Issue Year: " + str(issueyear)) - logger.fdebug(module + ' Issue Year : ' + str(issueyear)) + self._log("Issue Year: %s" % issueyear) + logger.fdebug('%s Issue Year : %s' % (module, issueyear)) month = issuenzb['IssueDate'][5:7].replace('-', '').strip() month_name = helpers.fullmonth(month) if month_name is None: month_name = 'None' publisher = comicnzb['ComicPublisher'] - self._log("Publisher: " + publisher) - logger.fdebug(module + ' Publisher: ' + publisher) + self._log("Publisher: %s" % publisher) + logger.fdebug('%s Publisher: %s' % (module, publisher)) #we need to un-unicode this to make sure we can write the filenames properly for spec.chars series = comicnzb['ComicName'].encode('ascii', 'ignore').strip() - self._log("Series: " + series) - logger.fdebug(module + ' Series: ' + str(series)) + self._log("Series: %s" % series) + logger.fdebug('%s Series: %s' % (module, series)) if comicnzb['AlternateFileName'] is None or comicnzb['AlternateFileName'] == 'None': seriesfilename = series else: seriesfilename = comicnzb['AlternateFileName'].encode('ascii', 'ignore').strip() - logger.fdebug(module + ' Alternate File Naming has been enabled for this series. Will rename series to : ' + seriesfilename) + logger.fdebug('%s Alternate File Naming has been enabled for this series. Will rename series to : %s' % (module, seriesfilename)) seriesyear = comicnzb['ComicYear'] - self._log("Year: " + seriesyear) - logger.fdebug(module + ' Year: ' + str(seriesyear)) + self._log("Year: %s" % seriesyear) + logger.fdebug('%s Year: %s' % (module, seriesyear)) comlocation = comicnzb['ComicLocation'] - self._log("Comic Location: " + comlocation) - logger.fdebug(module + ' Comic Location: ' + str(comlocation)) + self._log("Comic Location: %s" % comlocation) + logger.fdebug('%s Comic Location: %s' % (module, comlocation)) comversion = comicnzb['ComicVersion'] - self._log("Comic Version: " + str(comversion)) - logger.fdebug(module + ' Comic Version: ' + str(comversion)) + self._log("Comic Version: %s" % comversion) + logger.fdebug('%s Comic Version: %s' % (module, comversion)) if comversion is None: comversion = 'None' #if comversion is None, remove it so it doesn't populate with 'None' @@ -2136,7 +2150,7 @@ class PostProcessor(object): chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) self._log("No version # found for series - tag will not be available for renaming.") - logger.fdebug(module + ' No version # found for series, removing from filename') + logger.fdebug('%s No version # found for series, removing from filename' % module) logger.fdebug('%s New format is now: %s' % (module, chunk_file_format)) else: chunk_file_format = mylar.CONFIG.FILE_FORMAT @@ -2145,7 +2159,7 @@ class PostProcessor(object): chunk_f_f = re.sub('\$Annual', '', chunk_file_format) chunk_f = re.compile(r'\s+') chunk_file_format = chunk_f.sub(' ', chunk_f_f) - logger.fdebug(module + ' Not an annual - removing from filename parameters') + logger.fdebug('%s Not an annual - removing from filename parameters' % module) logger.fdebug('%s New format: %s' % (module, chunk_file_format)) else: @@ -2166,16 +2180,16 @@ class PostProcessor(object): for filename in filenames: if filename.lower().endswith(self.extensions): odir = root - logger.fdebug(module + ' odir (root): ' + odir) + logger.fdebug('%s odir (root): %s' % (module, odir)) ofilename = filename - logger.fdebug(module + ' ofilename: ' + ofilename) + logger.fdebug('%s ofilename: %s' % (module, ofilename)) path, ext = os.path.splitext(ofilename) try: if odir is None: - logger.fdebug(module + ' No root folder set.') + logger.fdebug('%s No root folder set.' % module) odir = self.nzb_folder except: - logger.error(module + ' unable to set root folder. Forcing it due to some error above most likely.') + logger.error('%s unable to set root folder. Forcing it due to some error above most likely.' % module) if os.path.isfile(self.nzb_folder) and self.nzb_folder.lower().endswith(self.extensions): import ntpath odir, ofilename = ntpath.split(self.nzb_folder) @@ -2186,13 +2200,13 @@ class PostProcessor(object): if ofilename is None: self._log("Unable to locate a valid cbr/cbz file. Aborting post-processing for this filename.") - logger.error(module + ' unable to locate a valid cbr/cbz file. Aborting post-processing for this filename.') + logger.error('%s unable to locate a valid cbr/cbz file. Aborting post-processing for this filename.' % module) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) - logger.fdebug(module + ' odir: ' + odir) - logger.fdebug(module + ' ofilename: ' + ofilename) + logger.fdebug('%s odir: %s' % (module, odir)) + logger.fdebug('%s ofilename: %s' % (module, ofilename)) #if meta-tagging is not enabled, we need to declare the check as being fail @@ -2213,7 +2227,7 @@ class PostProcessor(object): if mylar.CONFIG.ENABLE_META: self._log("Metatagging enabled - proceeding...") - logger.fdebug(module + ' Metatagging enabled - proceeding...') + logger.fdebug('%s Metatagging enabled - proceeding...' % module) pcheck = "pass" if mylar.CONFIG.CMTAG_START_YEAR_AS_VOLUME: vol_label = seriesyear @@ -2228,26 +2242,26 @@ class PostProcessor(object): pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, comversion=vol_label, manual="yes", filename=ml['ComicLocation']) except ImportError: - logger.fdebug(module + ' comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/') - logger.fdebug(module + ' continuing with PostProcessing, but I am not using metadata.') + logger.fdebug('%s comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/' % module) + logger.fdebug('%s continuing with PostProcessing, but I am not using metadata.' % module) pcheck = "fail" if pcheck == "fail": self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...") - logger.fdebug(module + ' Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...') + logger.fdebug('%s Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...' %module) self.failed_files +=1 #we need to set this to the cbz file since not doing it will result in nothing getting moved. #not sure how to do this atm elif any([pcheck == "unrar error", pcheck == "corrupt"]): if ml is not None: self._log("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and not post-processing.") - logger.error(module + ' This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and not post-processing.') + logger.error('%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and not post-processing.' % module) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) else: self._log("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.") - logger.error(module + ' This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying a different copy.') + logger.error('%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying a different copy.' % module) self.valreturn.append({"self.log": self.log, "mode": 'fail', "issueid": issueid, @@ -2258,8 +2272,8 @@ class PostProcessor(object): return self.queue.put(self.valreturn) elif pcheck.startswith('file not found'): filename_in_error = pcheck.split('||')[1] - self._log("The file cannot be found in the location provided [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Aborting.") - logger.error(module + ' The file cannot be found in the location provided [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Aborting') + self._log("The file cannot be found in the location provided [%s]. Please verify it exists, and re-run if necessary. Aborting." % filename_in_error) + logger.error('%s The file cannot be found in the location provided [%s]. Please verify it exists, and re-run if necessary. Aborting' % (module, filename_in_error)) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) @@ -2271,7 +2285,7 @@ class PostProcessor(object): ofilename = os.path.split(pcheck)[1] ext = os.path.splitext(ofilename)[1] self._log("Sucessfully wrote metadata to .cbz - Continuing..") - logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..') + logger.info('%s Sucessfully wrote metadata to .cbz (%s) - Continuing..' % (module, ofilename)) #Run Pre-script if mylar.CONFIG.ENABLE_PRE_SCRIPTS: @@ -2317,21 +2331,21 @@ class PostProcessor(object): pass else: odir, orig_filename = os.path.split(ml['ComicLocation']) - logger.fdebug(module + ' ofilename:' + ofilename) + logger.fdebug('%s ofilename: %s' % (module, ofilename)) if any([ofilename == odir, ofilename == odir[:-1], ofilename == '']): - self._log("There was a problem deciphering the filename/directory - please verify that the filename : [" + ofilename + "] exists in location [" + odir + "]. Aborting.") - logger.error(module + ' There was a problem deciphering the filename/directory - please verify that the filename : [' + ofilename + '] exists in location [' + odir + ']. Aborting.') + self._log("There was a problem deciphering the filename/directory - please verify that the filename : [%s] exists in location [%s]. Aborting." % (ofilename, odir)) + logger.error(module + ' There was a problem deciphering the filename/directory - please verify that the filename : [%s] exists in location [%s]. Aborting.' % (ofilename, odir)) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) - logger.fdebug(module + ' odir: ' + odir) - logger.fdebug(module + ' ofilename: ' + ofilename) + logger.fdebug('%s odir: %s' % (module, odir)) + logger.fdebug('%s ofilename: %s' % (module, ofilename)) ext = os.path.splitext(ofilename)[1] - logger.fdebug(module + ' ext:' + ext) + logger.fdebug('%s ext: %s' % (module, ext)) if ofilename is None or ofilename == '': - logger.error(module + ' Aborting PostProcessing - the filename does not exist in the location given. Make sure that ' + self.nzb_folder + ' exists and is the correct location.') + logger.error('%s Aborting PostProcessing - the filename does not exist in the location given. Make sure that %s exists and is the correct location.' % (module, self.nzb_folder)) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) @@ -2342,7 +2356,7 @@ class PostProcessor(object): if mylar.CONFIG.FILE_FORMAT == '' or not mylar.CONFIG.RENAME_FILES: self._log("Rename Files isn't enabled...keeping original filename.") - logger.fdebug(module + ' Rename Files is not enabled - keeping original filename.') + logger.fdebug('%s Rename Files is not enabled - keeping original filename.' % module) #check if extension is in nzb_name - will screw up otherwise if ofilename.lower().endswith(self.extensions): nfilename = ofilename[:-4] @@ -2365,13 +2379,13 @@ class PostProcessor(object): nfilename = '%s %s'.strip() % (nfilename[:xyb], nfilename[yyb+3:]) logger.fdebug('issueid information [%s] removed successsfully: %s' % (rem_issueid, nfilename)) - self._log("New Filename: " + nfilename) - logger.fdebug(module + ' New Filename: ' + nfilename) + self._log("New Filename: %s" % nfilename) + logger.fdebug('%s New Filename: %s' % (module, nfilename)) src = os.path.join(odir, ofilename) checkdirectory = filechecker.validateAndCreateDirectory(comlocation, True, module=module) if not checkdirectory: - logger.warn(module + ' Error trying to validate/create directory. Aborting this process at this time.') + logger.warn('%s Error trying to validate/create directory. Aborting this process at this time.' % module) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) @@ -2381,28 +2395,28 @@ class PostProcessor(object): dst = os.path.join(comlocation, (nfilename + ext).lower()) else: dst = os.path.join(comlocation, (nfilename + ext.lower())) - self._log("Source:" + src) - self._log("Destination:" + dst) - logger.fdebug(module + ' Source: ' + src) - logger.fdebug(module + ' Destination: ' + dst) + self._log("Source: %s" % src) + self._log("Destination: %s" % dst) + logger.fdebug('%s Source: %s' % (module, src)) + logger.fdebug('%s Destination: %s' % (module, dst)) if ml is None: #downtype = for use with updater on history table to set status to 'Downloaded' downtype = 'True' #non-manual run moving/deleting... - logger.fdebug(module + ' self.nzb_folder: ' + self.nzb_folder) - logger.fdebug(module + ' odir: ' + odir) - logger.fdebug(module + ' ofilename:' + ofilename) - logger.fdebug(module + ' nfilename:' + nfilename + ext) + logger.fdebug('%s self.nzb_folder: %s' % (module, self.nzb_folder)) + logger.fdebug('%s odir: %s' % (module, odir)) + logger.fdebug('%s ofilename: %s' % (module, ofilename)) + logger.fdebug('%s nfilename: %s' % (module, nfilename + ext)) if mylar.CONFIG.RENAME_FILES: if ofilename != (nfilename + ext): - logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename) + ' ..to.. ' + os.path.join(odir, nfilename + ext)) + logger.fdebug('%s Renaming %s ..to.. %s' % (module, os.path.join(odir, ofilename), os.path.join(odir, nfilename + ext))) else: - logger.fdebug(module + ' Filename is identical as original, not renaming.') + logger.fdebug('%s Filename is identical as original, not renaming.' % module) src = os.path.join(odir, ofilename) try: - self._log("[" + mylar.CONFIG.FILE_OPTS + "] " + src + " - to - " + dst) + self._log("[%s] %s - to - %s" % (mylar.CONFIG.FILE_OPTS, src, dst)) checkspace = helpers.get_free_space(comlocation) if checkspace is False: if all([pcheck is not None, pcheck != 'fail']): # meta was done @@ -2412,10 +2426,10 @@ class PostProcessor(object): if not fileoperation: raise OSError except Exception as e: - self._log("Failed to " + mylar.CONFIG.FILE_OPTS + " " + src + " - check log for exact error.") + self._log("Failed to %s %s - check log for exact error." % (mylar.CONFIG.FILE_OPTS, src)) self._log("Post-Processing ABORTED.") logger.error('%s Failed to %s %s: %s' % (module, mylar.CONFIG.FILE_OPTS, src, e)) - logger.error(module + ' Post-Processing ABORTED') + logger.error('%s Post-Processing ABORTED' % module) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) @@ -2431,12 +2445,12 @@ class PostProcessor(object): src = os.path.join(odir, ofilename) if mylar.CONFIG.RENAME_FILES: if ofilename != (nfilename + ext): - logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename)) #' ..to.. ' + os.path.join(odir, self.nzb_folder, str(nfilename + ext))) + logger.fdebug('%s Renaming %s ..to.. %s' % (module, os.path.join(odir, ofilename), os.path.join(odir, self.nzb_folder, str(nfilename + ext)))) else: - logger.fdebug(module + ' Filename is identical as original, not renaming.') + logger.fdebug('%s Filename is identical as original, not renaming.' % module) - logger.fdebug(module + ' odir src : ' + src) - logger.fdebug(module + '[' + mylar.CONFIG.FILE_OPTS + '] ' + src + ' ... to ... ' + dst) + logger.fdebug('%s odir src : %s' % (module, src)) + logger.fdebug('%s[%s] %s ... to ... %s' % (module, mylar.CONFIG.FILE_OPTS, src, dst)) try: checkspace = helpers.get_free_space(comlocation) if checkspace is False: @@ -2448,12 +2462,12 @@ class PostProcessor(object): raise OSError except Exception as e: logger.error('%s Failed to %s %s: %s' % (module, mylar.CONFIG.FILE_OPTS, src, e)) - logger.error(module + ' Post-Processing ABORTED.') + logger.error('%s Post-Processing ABORTED.' %module) self.failed_files +=1 self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) - logger.info(module + ' ' + mylar.CONFIG.FILE_OPTS + ' successful to : ' + dst) + logger.info('%s %s successful to : %s' % (module, mylar.CONFIG.FILE_OPTS, dst)) if any([mylar.CONFIG.FILE_OPTS == 'move', mylar.CONFIG.FILE_OPTS == 'copy']): self.tidyup(odir, True, subpath, filename=orig_filename) @@ -2468,8 +2482,8 @@ class PostProcessor(object): os.umask(0) os.chmod(dst.rstrip(), permission) except OSError: - logger.error(module + ' Failed to change file permissions. Ensure that the user running Mylar has proper permissions to change permissions in : ' + dst) - logger.fdebug(module + ' Continuing post-processing but unable to change file permissions in ' + dst) + logger.error('%s Failed to change file permissions. Ensure that the user running Mylar has proper permissions to change permissions in : %s' % (module, dst)) + logger.fdebug('%s Continuing post-processing but unable to change file permissions in %s' % (module, dst)) #let's reset the fileop to the original setting just in case it's a manual pp run if mylar.CONFIG.FILE_OPTS == 'copy': @@ -2503,7 +2517,7 @@ class PostProcessor(object): ctrlVal = {"IssueID": issueid} newVal = {"Status": "Downloaded", "Location": os.path.basename(dst)} - logger.fdebug('writing: ' + str(newVal) + ' -- ' + str(ctrlVal)) + logger.fdebug('writing: %s -- %s' % (newVal, ctrlVal)) myDB.upsert(updatetable, newVal, ctrlVal) try: @@ -2524,13 +2538,13 @@ class PostProcessor(object): logger.info('grdst:' + grdst) checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module) if not checkdirectory: - logger.warn(module + ' Error trying to validate/create directory. Aborting this process at this time.') + logger.warn('%s Error trying to validate/create directory. Aborting this process at this time.' % module) self.valreturn.append({"self.log": self.log, "mode": 'stop'}) return self.queue.put(self.valreturn) if mylar.CONFIG.READ2FILENAME: - logger.fdebug(module + ' readingorder#: ' + str(arcinfo['ReadingOrder'])) + logger.fdebug('%s readingorder#: %s' % (module, arcinfo['ReadingOrder'])) if int(arcinfo['ReadingOrder']) < 10: readord = "00" + str(arcinfo['ReadingOrder']) elif int(arcinfo['ReadingOrder']) >= 10 and int(arcinfo['ReadingOrder']) <= 99: readord = "0" + str(arcinfo['ReadingOrder']) else: readord = str(arcinfo['ReadingOrder']) @@ -2540,10 +2554,10 @@ class PostProcessor(object): grab_dst = os.path.join(grdst, dfilename) - logger.fdebug(module + ' Destination Path : ' + grab_dst) + logger.fdebug('%s Destination Path : %s' % (module, grab_dst)) grab_src = dst - logger.fdebug(module + ' Source Path : ' + grab_src) - logger.info(module + '[' + mylar.CONFIG.ARC_FILEOPS.upper() + '] ' + str(dst) + ' into directory : ' + str(grab_dst)) + logger.fdebug('%s Source Path : %s' % (module, grab_src)) + logger.info('%s[%s] %s into directory: %s' % (module, mylar.CONFIG.ARC_FILEOPS.upper(), dst, grab_dst)) try: #need to ensure that src is pointing to the series in order to do a soft/hard-link properly @@ -2561,13 +2575,13 @@ class PostProcessor(object): IssArcID = 'S' + str(ml['IssueArcID']) myDB.action('DELETE from nzblog WHERE IssueID=? AND SARC=?', [IssArcID,arcinfo['StoryArc']]) - logger.fdebug(module + ' IssueArcID: ' + str(ml['IssueArcID'])) + logger.fdebug('%s IssueArcID: %s' % (module, ml['IssueArcID'])) ctrlVal = {"IssueArcID": ml['IssueArcID']} newVal = {"Status": "Downloaded", "Location": grab_dst} - logger.fdebug('writing: ' + str(newVal) + ' -- ' + str(ctrlVal)) + logger.fdebug('writing: %s -- %s' % (newVal, ctrlVal)) myDB.upsert("storyarcs", newVal, ctrlVal) - logger.fdebug(module + ' [' + arcinfo['StoryArc'] + '] Post-Processing completed for: ' + grab_dst) + logger.fdebug('%s [%s] Post-Processing completed for: %s' % (module, arcinfo['StoryArc'], grab_dst)) except: pass @@ -2681,9 +2695,9 @@ class FolderCheck(): #junk the queue as it's not needed for folder monitoring, but needed for post-processing to run without error. helpers.job_management(write=True, job='Folder Monitor', current_run=helpers.utctimestamp(), status='Running') mylar.MONITOR_STATUS = 'Running' - logger.info(self.module + ' Checking folder ' + mylar.CONFIG.CHECK_FOLDER + ' for newly snatched downloads') + logger.info('%s Checking folder %s for newly snatched downloads' % (self.module, mylar.CONFIG.CHECK_FOLDER)) PostProcess = PostProcessor('Manual Run', mylar.CONFIG.CHECK_FOLDER, queue=self.queue) result = PostProcess.Process() - logger.info(self.module + ' Finished checking for newly snatched downloads') + logger.info('%s Finished checking for newly snatched downloads' % self.module) helpers.job_management(write=True, job='Folder Monitor', last_run_completed=helpers.utctimestamp(), status='Waiting') mylar.MONITOR_STATUS = 'Waiting' diff --git a/mylar/__init__.py b/mylar/__init__.py index 876b9ed9..fa050b3b 100644 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -504,7 +504,7 @@ def dbcheck(): c.execute('SELECT ReleaseDate from storyarcs') except sqlite3.OperationalError: try: - c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)') c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist') c.execute('DROP TABLE readinglist') except sqlite3.OperationalError: @@ -527,7 +527,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)') c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT)') conn.commit c.close @@ -1043,6 +1043,11 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT') + try: + c.execute('SELECT Aliases from storyarcs') + except sqlite3.OperationalError: + c.execute('ALTER TABLE storyarcs ADD COLUMN Aliases TEXT') + ## -- searchresults Table -- try: c.execute('SELECT SRID from searchresults') diff --git a/mylar/config.py b/mylar/config.py index 8f9d009c..a48c48a2 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -3,6 +3,7 @@ from collections import OrderedDict from operator import itemgetter import os +import glob import codecs import shutil import re @@ -74,6 +75,7 @@ _CONFIG_DEFINITIONS = OrderedDict({ 'ALTERNATE_LATEST_SERIES_COVERS': (bool, 'General', False), 'SHOW_ICONS': (bool, 'General', False), 'FORMAT_BOOKTYPE': (bool, 'General', False), + 'CLEANUP_CACHE': (bool, 'General', False), 'RSS_CHECKINTERVAL': (int, 'Scheduler', 20), 'SEARCH_INTERVAL': (int, 'Scheduler', 360), @@ -771,6 +773,26 @@ class Config(object): except OSError: logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR) + if self.CLEANUP_CACHE is True: + logger.fdebug('[Cache Cleanup] Cache Cleanup initiated. Will delete items from cache that are no longer needed.') + cache_types = ['*.nzb', '*.torrent', '*.zip', '*.html', 'mylar_*'] + cntr = 0 + for x in cache_types: + for f in glob.glob(os.path.join(self.CACHE_DIR,x)): + try: + if os.path.isdir(f): + shutil.rmtree(f) + else: + os.remove(f) + except Exception as e: + logger.warn('[ERROR] Unable to remove %s from cache. Could be a possible permissions issue ?' % f) + cntr+=1 + + if cntr > 1: + logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items') + else: + logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Nothing to clean!') + if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]): self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag') logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR) @@ -843,8 +865,10 @@ class Config(object): else: logger.fdebug('Successfully created ComicTagger Settings location.') - if self.DDL_LOCATION is None: + if not self.DDL_LOCATION: self.DDL_LOCATION = self.CACHE_DIR + if self.ENABLE_DDL is True: + logger.info('Setting DDL Location set to : %s' % self.DDL_LOCATION) if self.MODE_32P is False and self.RSSFEED_32P is not None: mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P) diff --git a/mylar/cv.py b/mylar/cv.py index 788fc9d5..0eb85191 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -72,7 +72,7 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist elif type == 'storyarc': PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date' elif type == 'comicyears': - PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck&offset=' + str(offset) + PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck,aliases&offset=' + str(offset) elif type == 'import': PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset) elif type == 'update_dates': @@ -340,10 +340,10 @@ def GetComicInfo(comicid, dom, safechk=None): comic['Type'] = 'TPB' elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower(): comic['Type'] = 'HC' - elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and 'can be found' not in comic_desc.lower(): + elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]): i = 0 comic['Type'] = 'One-Shot' - avoidwords = ['preceding', 'after the special'] + avoidwords = ['preceding', 'after the special', 'following the'] while i < 2: if i == 0: cbd = 'one-shot' @@ -718,11 +718,12 @@ def GetSeriesYears(dom): tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1] desdeck = 0 - tempseries['Volume'] = 'None' - #the description field actually holds the Volume# - so let's grab it + desc_soup = None try: descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText + desc_soup = Soup(descchunk, "html.parser") + desclinks = desc_soup.findAll('a') comic_desc = drophtml(descchunk) desdeck +=1 except: @@ -736,6 +737,139 @@ def GetSeriesYears(dom): except: comic_deck = 'None' + #comic['ComicDescription'] = comic_desc + + try: + tempseries['Aliases'] = dm.getElementsByTagName('aliases')[0].firstChild.wholeText + tempseries['Aliases'] = re.sub('\n', '##', tempseries['Aliases']).strip() + if tempseries['Aliases'][-2:] == '##': + tempseries['Aliases'] = tempseries['Aliases'][:-2] + #logger.fdebug('Aliases: ' + str(aliases)) + except: + tempseries['Aliases'] = 'None' + + tempseries['Volume'] = 'None' #noversion' + + #figure out if it's a print / digital edition. + tempseries['Type'] = 'None' + if comic_deck != 'None': + if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'one shot' in re.sub('-', '', comic_deck.lower()).strip(), 'hardcover' in comic_deck.lower()]): + if 'print' in comic_deck.lower(): + tempseries['Type'] = 'Print' + elif 'digital' in comic_deck.lower(): + tempseries['Type'] = 'Digital' + elif 'paperback' in comic_deck.lower(): + tempseries['Type'] = 'TPB' + elif 'hardcover' in comic_deck.lower(): + tempseries['Type'] = 'HC' + elif 'oneshot' in re.sub('-', '', comic_deck.lower()).strip(): + tempseries['Type'] = 'One-Shot' + + if comic_desc != 'None' and tempseries['Type'] == 'None': + if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower(): + tempseries['Type'] = 'Print' + elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower(): + tempseries['Type'] = 'Digital' + elif all(['paperback' in comic_desc[:60].lower(), 'paperback can be found' not in comic_desc.lower()]) or 'collects' in comic_desc[:60].lower(): + tempseries['Type'] = 'TPB' + elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower(): + tempseries['Type'] = 'HC' + elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]): + i = 0 + tempseries['Type'] = 'One-Shot' + avoidwords = ['preceding', 'after the special', 'following the'] + while i < 2: + if i == 0: + cbd = 'one-shot' + elif i == 1: + cbd = 'one shot' + tmp1 = comic_desc[:60].lower().find(cbd) + if tmp1 != -1: + for x in avoidwords: + tmp2 = comic_desc[:tmp1].lower().find(x) + if tmp2 != -1: + logger.fdebug('FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print') + tempseries['Type'] = 'Print' + i = 3 + break + i+=1 + else: + tempseries['Type'] = 'Print' + + if all([comic_desc != 'None', 'trade paperback' in comic_desc[:30].lower(), 'collecting' in comic_desc[:40].lower()]): + #ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145. + first_collect = comic_desc.lower().find('collecting') + #logger.info('first_collect: %s' % first_collect) + #logger.info('comic_desc: %s' % comic_desc) + #logger.info('desclinks: %s' % desclinks) + issue_list = [] + micdrop = [] + if desc_soup is not None: + #if it's point form bullets, ignore it cause it's not the current volume stuff. + test_it = desc_soup.find('ul') + if test_it: + for x in test_it.findAll('li'): + if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]): + mic_check = x.find('a') + micdrop.append(mic_check['data-ref-id']) + + for fc in desclinks: + #logger.info('fc: %s' % fc) + fc_id = fc['data-ref-id'] + #logger.info('fc_id: %s' % fc_id) + if fc_id in micdrop: + continue + fc_name = fc.findNext(text=True) + if fc_id.startswith('4000'): + fc_cid = None + fc_isid = fc_id + iss_start = fc_name.find('#') + issuerun = fc_name[iss_start:].strip() + fc_name = fc_name[:iss_start].strip() + elif fc_id.startswith('4050'): + fc_cid = fc_id + fc_isid = None + issuerun = fc.next_sibling + if issuerun is not None: + lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ') + if len(lines) > 0: + for x in sorted(lines, reverse=True): + srchline = issuerun.rfind(x) + if srchline != -1: + try: + if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ': + issuerun = issuerun[:srchline+len(x)] + break + except Exception as e: + logger.warn('[ERROR] %s' % e) + continue + else: + iss_start = fc_name.find('#') + issuerun = fc_name[iss_start:].strip() + fc_name = fc_name[:iss_start].strip() + + if issuerun.endswith('.') or issuerun.endswith(','): + #logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1])) + issuerun = issuerun[:-1] + if issuerun.endswith(' and '): + issuerun = issuerun[:-4].strip() + elif issuerun.endswith(' and'): + issuerun = issuerun[:-3].strip() + else: + continue + # except: + # pass + issue_list.append({'series': fc_name, + 'comicid': fc_cid, + 'issueid': fc_isid, + 'issues': issuerun}) + #first_collect = cis + + logger.info('Collected issues in volume: %s' % issue_list) + tempseries['Issue_List'] = issue_list + else: + tempseries['Issue_List'] = 'None' + while (desdeck > 0): if desdeck == 1: if comic_desc == 'None': @@ -760,11 +894,11 @@ def GetSeriesYears(dom): if i == 0: vfind = comicDes[v_find:v_find +15] #if it's volume 5 format basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'} - logger.fdebug('volume X format - ' + str(i) + ': ' + vfind) + logger.fdebug('volume X format - %s: %s' % (i, vfind)) else: vfind = comicDes[:v_find] # if it's fifth volume format basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'} - logger.fdebug('X volume format - ' + str(i) + ': ' + vfind) + logger.fdebug('X volume format - %s: %s' % (i, vfind)) volconv = '' for nums in basenums: if nums in vfind.lower(): @@ -773,6 +907,7 @@ def GetSeriesYears(dom): break #logger.info('volconv: ' + str(volconv)) + #now we attempt to find the character position after the word 'volume' if i == 0: volthis = vfind.lower().find('volume') volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit @@ -790,7 +925,7 @@ def GetSeriesYears(dom): ledigit = re.sub("[^0-9]", "", vf[0]) if ledigit != '': tempseries['Volume'] = ledigit - logger.fdebug("Volume information found! Adding to series record : volume " + tempseries['Volume']) + logger.fdebug("Volume information found! Adding to series record : volume %s" % tempseries['Volume']) break except: pass @@ -800,7 +935,7 @@ def GetSeriesYears(dom): i += 1 if tempseries['Volume'] == 'None': - logger.fdebug('tempseries[Volume]:' + str(tempseries['Volume'])) + logger.fdebug('tempseries[Volume]: %s' % tempseries['Volume']) desdeck -= 1 else: break @@ -810,7 +945,9 @@ def GetSeriesYears(dom): "ComicName": tempseries['Series'], "SeriesYear": tempseries['SeriesYear'], "Publisher": tempseries['Publisher'], - "Volume": tempseries['Volume']}) + "Volume": tempseries['Volume'], + "Aliases": tempseries['Aliases'], + "Type": tempseries['Type']}) return serieslist diff --git a/mylar/db.py b/mylar/db.py index d81d61dd..c9100e7a 100755 --- a/mylar/db.py +++ b/mylar/db.py @@ -26,8 +26,7 @@ import time import Queue import mylar - -from mylar import logger +import logger db_lock = threading.Lock() mylarQueue = Queue.Queue() diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 6c13be35..9eda92d0 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -234,7 +234,7 @@ class FileChecker(object): ab = len(path) tmppath = subpath[ab:] else: - tmppath = re.sub(path, '', subpath).strip() + tmppath = subpath.replace(path, '').strip() path_list = os.path.normpath(tmppath) if '/' == path_list[0] or '\\' == path_list[0]: diff --git a/mylar/getcomics.py b/mylar/getcomics.py index aaad9302..8aa46f5f 100644 --- a/mylar/getcomics.py +++ b/mylar/getcomics.py @@ -27,8 +27,10 @@ import json from bs4 import BeautifulSoup import requests import cfscrape +import zipfile import logger import mylar +from mylar import db class GC(object): @@ -63,7 +65,8 @@ class GC(object): return self.search_results() - def loadsite(self, title, link): + def loadsite(self, id, link): + title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id) with cfscrape.create_scraper() as s: self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers) @@ -89,6 +92,31 @@ class GC(object): link = lk['href'] titlefind = f.find("h1", {"class": "post-title"}) title = titlefind.get_text(strip=True) + title = re.sub(u'\u2013', '-', title).strip() + filename = title + issues = None + pack = False + #see if it's a pack type + issfind_st = title.find('#') + issfind_en = title.find('-', issfind_st) + if issfind_en != -1: + if all([title[issfind_en+1] == ' ', title[issfind_en+2].isdigit()]): + iss_en = title.find(' ', issfind_en+2) + if iss_en != -1: + issues = title[issfind_st+1:iss_en] + pack = True + if title[issfind_en+1].isdigit(): + iss_en = title.find(' ', issfind_en+1) + if iss_en != -1: + issues = title[issfind_st+1:iss_en] + pack = True + + # if it's a pack - remove the issue-range and the possible issue years (cause it most likely will span) and pass thru as separate items + if pack is True: + title = re.sub(issues, '', title).strip() + if title.endswith('#'): + title = title[:-1].strip() + option_find = f.find("p", {"style": "text-align: center;"}) i = 0 while i <= 2: @@ -96,6 +124,8 @@ class GC(object): if 'Year' in option_find: year = option_find.findNext(text=True) year = re.sub('\|', '', year).strip() + if pack is True and '-' in year: + title = re.sub('\('+year+'\)', '', title).strip() else: size = option_find.findNext(text=True) if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]): @@ -114,7 +144,10 @@ class GC(object): datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d")) resultlist.append({"title": title, "pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'), + "filename": filename, "size": re.sub(' ', '', size).strip(), + "pack": pack, + "issues": issues, "link": link, "year": year, "id": re.sub('post-', '', id).strip(), @@ -126,8 +159,9 @@ class GC(object): return results - def parse_downloadresults(self, title, mainlink): - + def parse_downloadresults(self, id, mainlink): + myDB = db.DBConnection() + title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id) soup = BeautifulSoup(open(title+'.html'), 'html.parser') orig_find = soup.find("p", {"style": "text-align: center;"}) i = 0 @@ -201,23 +235,35 @@ class GC(object): for x in links: logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link'])) + ctrlval = {'id': id} + vals = {'series': series, + 'year': year, + 'size': size, + 'issueid': self.issueid, + 'comicid': self.comicid, + 'link': link, + 'status': 'Queued'} + myDB.upsert('ddl_info', vals, ctrlval) + mylar.DDL_QUEUE.put({'link': link, 'mainlink': mainlink, 'series': series, 'year': year, 'size': size, 'comicid': self.comicid, - 'issueid': self.issueid}) + 'issueid': self.issueid, + 'id': id}) return {'success': True} - def downloadit(self, link, mainlink): + def downloadit(self, id, link, mainlink): if mylar.DDL_LOCK is True: logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.') return else: mylar.DDL_LOCK = True + myDB = db.DBConnection() filename = None try: with cfscrape.create_scraper() as s: @@ -228,6 +274,9 @@ class GC(object): path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename) + #write the filename to the db for tracking purposes... + myDB.upsert('ddl_info', {'filename': filename}, {'id': id}) + if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip': buf = StringIO(t.content) f = gzip.GzipFile(fileobj=buf) @@ -248,9 +297,29 @@ class GC(object): else: mylar.DDL_LOCK = False if os.path.isfile(path): + if path.endswith('.zip'): + new_path = os.path.join(mylar.CONFIG.DDL_LOCATION, re.sub('.zip', '', filename).strip()) + logger.info('Zip file detected. Unzipping into new modified path location: %s' % new_path) + try: + zip_f = zipfile.ZipFile(path, 'r') + zip_f.extractall(new_path) + zip_f.close() + except Exception as e: + logger.warn('[ERROR: %s] Unable to extract zip file: %s' % (e, new_path)) + return ({"success": False, + "filename": filename, + "path": None}) + else: + try: + os.remove(path) + except Exception as e: + logger.warn('[ERROR: %s] Unable to remove zip file from %s after extraction.' % (e, path)) + filename = None + else: + new_path = path return ({"success": True, "filename": filename, - "path": mylar.CONFIG.DDL_LOCATION}) + "path": new_path}) def issue_list(self, pack): #packlist = [x.strip() for x in pack.split(',)] diff --git a/mylar/helpers.py b/mylar/helpers.py index 75262506..73ce868b 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -21,6 +21,7 @@ from datetime import timedelta, date import subprocess import requests import shlex +import Queue import json import re import sys @@ -37,7 +38,7 @@ from apscheduler.triggers.interval import IntervalTrigger import mylar import logger -from mylar import sabnzbd, nzbget, process, getcomics +from mylar import db, sabnzbd, nzbget, process, getcomics def multikeysort(items, columns): @@ -266,7 +267,7 @@ def decimal_issue(iss): return deciss, dec_except def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False): - import db + #import db myDB = db.DBConnection() comicid = str(comicid) # it's coming in unicoded... @@ -718,7 +719,7 @@ def ComicSort(comicorder=None, sequence=None, imported=None): if sequence: # if it's on startup, load the sql into a tuple for use to avoid record-locking i = 0 - import db + #import db myDB = db.DBConnection() comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE") comicorderlist = [] @@ -803,7 +804,7 @@ def updateComicLocation(): # - set NEWCOMDIR = new ComicLocation #after running, set ComicLocation to new location in Configuration GUI - import db + #import db myDB = db.DBConnection() if mylar.CONFIG.NEWCOM_DIR is not None: logger.info('Performing a one-time mass update to Comic Location') @@ -935,7 +936,7 @@ def cleanhtml(raw_html): def issuedigits(issnum): - import db + #import db int_issnum = None @@ -1129,7 +1130,7 @@ def issuedigits(issnum): def checkthepub(ComicID): - import db + #import db myDB = db.DBConnection() publishers = ['marvel', 'dc', 'darkhorse'] pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone() @@ -1146,7 +1147,7 @@ def checkthepub(ComicID): return mylar.CONFIG.INDIE_PUB def annual_update(): - import db + #import db myDB = db.DBConnection() annuallist = myDB.select('SELECT * FROM annuals') if annuallist is None: @@ -1202,7 +1203,7 @@ def renamefile_readingorder(readorder): return readord def latestdate_fix(): - import db + #import db datefix = [] cnupdate = [] myDB = db.DBConnection() @@ -1254,7 +1255,7 @@ def latestdate_fix(): return def upgrade_dynamic(): - import db + #import db dynamic_comiclist = [] myDB = db.DBConnection() #update the comicdb to include the Dynamic Names (and any futher changes as required) @@ -1293,7 +1294,6 @@ def upgrade_dynamic(): def checkFolder(folderpath=None): from mylar import PostProcessor - import Queue queue = Queue.Queue() #monitor a selected folder for 'snatched' files that haven't been processed @@ -1339,7 +1339,7 @@ def LoadAlternateSearchNames(seriesname_alt, comicid): return Alternate_Names def havetotals(refreshit=None): - import db + #import db comics = [] myDB = db.DBConnection() @@ -1827,7 +1827,7 @@ def IssueDetails(filelocation, IssueID=None, justinfo=False): return issuedetails def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None): - import db + #import db myDB = db.DBConnection() if IssueID: issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() @@ -1859,7 +1859,7 @@ def int_num(s): return float(s) def listPull(weeknumber, year): - import db + #import db library = {} myDB = db.DBConnection() # Get individual comics @@ -1869,7 +1869,7 @@ def listPull(weeknumber, year): return library def listLibrary(comicid=None): - import db + #import db library = {} myDB = db.DBConnection() if comicid is None: @@ -1896,7 +1896,7 @@ def listLibrary(comicid=None): return library def listStoryArcs(): - import db + #import db library = {} myDB = db.DBConnection() # Get Distinct Arc IDs @@ -1910,7 +1910,7 @@ def listStoryArcs(): return library def listoneoffs(weeknumber, year): - import db + #import db library = [] myDB = db.DBConnection() # Get Distinct one-off issues from the pullist that have already been downloaded / snatched @@ -1926,7 +1926,7 @@ def listoneoffs(weeknumber, year): return library def manualArc(issueid, reading_order, storyarcid): - import db + #import db if issueid.startswith('4000-'): issueid = issueid[5:] @@ -2062,7 +2062,7 @@ def manualArc(issueid, reading_order, storyarcid): return def listIssues(weeknumber, year): - import db + #import db library = [] myDB = db.DBConnection() # Get individual issues @@ -2107,7 +2107,7 @@ def listIssues(weeknumber, year): return library def incr_snatched(ComicID): - import db + #import db myDB = db.DBConnection() incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone() logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1)) @@ -2123,7 +2123,7 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None, r #storyarcid = the storyarcid of the issue that's being checked for duplication. #rtnval = the return value of a previous duplicate_filecheck that's re-running against new values # - import db + #import db myDB = db.DBConnection() logger.info('[DUPECHECK] Duplicate check for ' + filename) @@ -2401,7 +2401,7 @@ def humanize_time(amount, units = 'seconds'): return buf def issue_status(IssueID): - import db + #import db myDB = db.DBConnection() IssueID = str(IssueID) @@ -2435,7 +2435,7 @@ def crc(filename): return hashlib.md5(filename).hexdigest() def issue_find_ids(ComicName, ComicID, pack, IssueNumber): - import db + #import db myDB = db.DBConnection() @@ -2562,7 +2562,7 @@ def cleanHost(host, protocol = True, ssl = False, username = None, password = No return host def checkthe_id(comicid=None, up_vals=None): - import db + #import db myDB = db.DBConnection() if not up_vals: chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone() @@ -2593,7 +2593,7 @@ def checkthe_id(comicid=None, up_vals=None): myDB.upsert("ref32p", newVal, ctrlVal) def updatearc_locs(storyarcid, issues): - import db + #import db myDB = db.DBConnection() issuelist = [] for x in issues: @@ -2683,7 +2683,7 @@ def updatearc_locs(storyarcid, issues): def spantheyears(storyarcid): - import db + #import db myDB = db.DBConnection() totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid]) @@ -2747,7 +2747,7 @@ def arcformat(arc, spanyears, publisher): return dstloc def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False): - import db + #import db from base64 import b16encode, b32decode #check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent. @@ -3009,7 +3009,7 @@ def weekly_info(week=None, year=None, current=None): return weekinfo def latestdate_update(): - import db + #import db myDB = db.DBConnection() ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID') if ccheck is None or len(ccheck) == 0: @@ -3031,6 +3031,7 @@ def latestdate_update(): myDB.upsert("comics", newVal, ctrlVal) def ddl_downloader(queue): + myDB = db.DBConnection() while True: if mylar.DDL_LOCK is True: time.sleep(5) @@ -3042,19 +3043,37 @@ def ddl_downloader(queue): logger.info('Cleaning up workers for shutdown') break + #write this to the table so we have a record of what's going on. + ctrlval = {'id': item['id']} + val = {'status': 'Downloading'} + myDB.upsert('ddl_info', val, ctrlval) + ddz = getcomics.GC() - ddzstat = ddz.downloadit(item['link'], item['mainlink']) + ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink']) + + nval = {'status': 'Completed'} + myDB.upsert('ddl_info', nval, ctrlval) if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]): - logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename'])) try: - mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'], - 'nzb_folder': ddzstat['path'], - 'failed': False, - 'issueid': item['issueid'], - 'comicid': item['comicid'], - 'apicall': True, - 'ddl': True}) + if ddzstat['filename'] is None: + logger.info('%s successfully downloaded - now initiating post-processing.' % (os.path.basename(ddzstat['path']))) + mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'], + 'nzb_folder': ddzstat['path'], + 'failed': False, + 'issueid': None, + 'comicid': item['comicid'], + 'apicall': True, + 'ddl': True}) + else: + logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename'])) + mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'], + 'nzb_folder': ddzstat['path'], + 'failed': False, + 'issueid': item['issueid'], + 'comicid': item['comicid'], + 'apicall': True, + 'ddl': True}) except Exception as e: logger.info('process error: %s [%s]' %(e, ddzstat)) elif mylar.CONFIG.POST_PROCESSING is True: @@ -3323,7 +3342,7 @@ def date_conversion(originaldate): def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None): jobresults = [] - import db + #import db myDB = db.DBConnection() if job is None: @@ -3540,7 +3559,7 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N def stupidchk(): - import db + #import db myDB = db.DBConnection() CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'") ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'") @@ -3854,7 +3873,7 @@ def publisherImages(publisher): return comicpublisher def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate): - import db + #import db myDB = db.DBConnection() watchlist = listLibrary() matchlist = [] @@ -3894,7 +3913,7 @@ def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate): def DateAddedFix(): - import db + #import db myDB = db.DBConnection() DA_A = datetime.datetime.today() DateAdded = DA_A.strftime('%Y-%m-%d') @@ -3905,8 +3924,6 @@ def DateAddedFix(): for an in annuals: myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]}) - - def file_ops(path,dst,arc=False,one_off=False): # # path = source path + filename # # dst = destination path + filename @@ -4051,7 +4068,6 @@ def file_ops(path,dst,arc=False,one_off=False): else: return False - from threading import Thread class ThreadWithReturnValue(Thread): diff --git a/mylar/search.py b/mylar/search.py index 1d783fb6..09d22d97 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -841,7 +841,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa pack_warning = True continue - logger.fdebug("checking search result: " + entry['title']) + logger.fdebug("checking search result: %s" % entry['title']) #some nzbsites feel that comics don't deserve a nice regex to strip the crap from the header, the end result is that we're #dealing with the actual raw header which causes incorrect matches below. #this is a temporary cut from the experimental search option (findcomicfeed) as it does this part well usually. @@ -938,20 +938,20 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa else: if entry['title'][:17] != '0-Day Comics Pack': comsize_m = helpers.human_size(comsize_b) - logger.fdebug("size given as: " + str(comsize_m)) + logger.fdebug('size given as: %s' % comsize_m) #----size constraints. #if it's not within size constaints - dump it now and save some time. if mylar.CONFIG.USE_MINSIZE: conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M") - logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b)) + logger.fdebug('comparing Min threshold %s .. to .. nzb %s' % (conv_minsize, comsize_b)) if int(conv_minsize) > int(comsize_b): - logger.fdebug("Failure to meet the Minimum size threshold - skipping") + logger.fdebug('Failure to meet the Minimum size threshold - skipping') continue if mylar.CONFIG.USE_MAXSIZE: conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M") - logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b)) + logger.fdebug('comparing Max threshold %s .. to .. nzb %s' % (conv_maxsize, comsize_b)) if int(comsize_b) > int(conv_maxsize): - logger.fdebug("Failure to meet the Maximium size threshold - skipping") + logger.fdebug('Failure to meet the Maximium size threshold - skipping') continue #---- date constaints. @@ -1014,7 +1014,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa else: postdate_int = time.mktime(dateconv[:len(dateconv) -1]) except: - logger.warn('Unable to parse posting date from provider result set for :' + entry['title']) + logger.warn('Unable to parse posting date from provider result set for : %s' % entry['title']) continue if all([digitaldate != '0000-00-00', digitaldate is not None]): @@ -1068,23 +1068,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa #logger.info('dateconv2: %s' % dateconv2.date()) #logger.info('digconv2: %s' % digconv2.date()) if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date(): - logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) + logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate)) elif dateconv2.date() < issconv2.date(): logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date())) - logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') + logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate)) continue else: - logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) + logger.fdebug('%s is after store date of %s' % (pubdate, stdate)) except: #if the above fails, drop down to the integer compare method as a failsafe. if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int: - logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) + logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate)) elif postdate_int < issuedate_int: logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int)) - logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') + logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate)) continue else: - logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) + logger.fdebug('%s is after store date of %s' % (pubdate, stdate)) # -- end size constaints. if '(digital first)' in ComicTitle.lower(): #entry['title'].lower(): @@ -1095,7 +1095,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa else: thisentry = ComicTitle #entry['title'] - logger.fdebug("Entry: " + thisentry) + logger.fdebug('Entry: %s' % thisentry) cleantitle = thisentry if 'mixed format' in cleantitle.lower(): @@ -1286,7 +1286,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if all([nzbprov == '32P', allow_packs == True, RSS == 'no']): logger.fdebug('pack:' + entry['pack']) - if all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2']): + if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', entry['pack'] is True])): #allow_packs is True if nzbprov == '32P': if entry['pack'] == '2': logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.') @@ -1294,21 +1294,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.') else: logger.fdebug('[PACK-QUEUE] Invalid Pack.') + else: + logger.fdebug('[PACK-QUEUE] DDL Pack detected for %s.' % entry['filename']) - #find the pack range. - pack_issuelist = None - issueid_info = None - if not entry['title'].startswith('0-Day Comics Pack'): - pack_issuelist = entry['issues'] - issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber) - if issueid_info['valid'] == True: - logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.') - else: - logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping') - continue + #find the pack range. + pack_issuelist = None + issueid_info = None + if not entry['title'].startswith('0-Day Comics Pack'): + pack_issuelist = entry['issues'] + issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber) + if issueid_info['valid'] == True: + logger.info('Issue Number %s exists within pack. Continuing.' % IssueNumber) + else: + logger.fdebug('Issue Number %s does NOT exist within this pack. Skipping' % IssueNumber) + continue #pack support. nowrite = False - nzbid = generate_id(nzbprov, entry['link']) + if all([nzbprov == 'ddl', 'getcomics' in entry['link']]): + nzbid = entry['id'] + else: + nzbid = generate_id(nzbprov, entry['link']) if manual is not True: downloadit = True else: @@ -1382,6 +1387,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa nzbid = generate_id(nzbprov, entry['id']) elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]): nzbid = entry['id'] + entry['title'] = entry['filename'] else: nzbid = generate_id(nzbprov, entry['link']) if manual is not True: @@ -1516,9 +1522,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa for isid in issinfo['issues']: updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff) updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov) - notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov) + notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov, True) + #notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov) else: - notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov) + notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], None, nzbprov, True) + #notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov) else: if alt_nzbname is None or alt_nzbname == '': @@ -1534,7 +1542,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa cyear = ComicYear else: cyear = comyear - notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov) + #notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov) + notify_snatch(ComicName, sent_to, cyear, IssueNumber, nzbprov, False) prov_count == 0 mylar.TMP_PROV = nzbprov @@ -2304,8 +2313,8 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc t_hash = None if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl': ggc = getcomics.GC(issueid=IssueID, comicid=ComicID) - sendsite = ggc.loadsite(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) - ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) + sendsite = ggc.loadsite(nzbid, link) + ddl_it = ggc.parse_downloadresults(nzbid, link) logger.info("ddl status response: %s" % ddl_it) if ddl_it['success'] is True: logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize())) @@ -2687,37 +2696,43 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip() updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff) #send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts) - notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov) + #notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov) + notify_snatch(sent_to, ComicName, comyear, IssueNumber, nzbprov, False) mylar.TMP_PROV = nzbprov return return_val -def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov): - - if IssueNumber is not None: - snline = '%s (%s) #%s snatched!' % (modcomicname, comyear, IssueNumber) +#def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov): +def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack): + if pack is False: + snline = 'Issue snatched!' else: - snline = '%s (%s) snatched!' % (modcomicname, comyear) + snline = 'Pack snatched!' + + if IssueNumber is not None: + snatched_name = '%s (%s) #%s' % (comicname, comyear, IssueNumber) + else: + snatched_name= '%s (%s)' % (comicname, comyear) if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH: logger.info(u"Sending Prowl notification") prowl = notifiers.PROWL() - prowl.notify(nzbname, "Download started using " + sent_to) + prowl.notify(snatched_name, "Download started using " + sent_to) if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH: logger.info(u"Sending NMA notification") nma = notifiers.NMA() - nma.notify(snline=snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) + nma.notify(snline=snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov) if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH: logger.info(u"Sending Pushover notification") pushover = notifiers.PUSHOVER() - pushover.notify(snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) + pushover.notify(snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov) if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH: logger.info(u"Sending Boxcar notification") boxcar = notifiers.BOXCAR() - boxcar.notify(snatched_nzb=nzbname, sent_to=sent_to, snline=snline) + boxcar.notify(snatched_nzb=snatched_name, sent_to=sent_to, snline=snline) if mylar.CONFIG.PUSHBULLET_ENABLED and mylar.CONFIG.PUSHBULLET_ONSNATCH: logger.info(u"Sending Pushbullet notification") pushbullet = notifiers.PUSHBULLET() - pushbullet.notify(snline=snline, snatched=nzbname, sent_to=sent_to, prov=nzbprov, method='POST') + pushbullet.notify(snline=snline, snatched=snatched_name, sent_to=sent_to, prov=nzbprov, method='POST') if mylar.CONFIG.TELEGRAM_ENABLED and mylar.CONFIG.TELEGRAM_ONSNATCH: logger.info(u"Sending Telegram notification") telegram = notifiers.TELEGRAM() @@ -2725,7 +2740,7 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov) if mylar.CONFIG.SLACK_ENABLED and mylar.CONFIG.SLACK_ONSNATCH: logger.info(u"Sending Slack notification") slack = notifiers.SLACK() - slack.notify("Snatched", snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) + slack.notify("Snatched", snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov) return diff --git a/mylar/updater.py b/mylar/updater.py index cd88e380..256c852d 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -1104,6 +1104,8 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False): temploc = '1' else: temploc = None + logger.warn('The filename [%s] does not have a valid issue number, and the Edition of the series is %s. You might need to Forcibly Mark the Series as TPB/GN and try this again.' % (tmpfc['ComicFilename'], rescan['Type'])) + return if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]): #remove the extension here diff --git a/mylar/webserve.py b/mylar/webserve.py index 032eee79..5a16da32 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -645,6 +645,8 @@ class WebInterface(object): seriesYear = cid['SeriesYear'] issuePublisher = cid['Publisher'] seriesVolume = cid['Volume'] + bookType = cid['Type'] + seriesAliases = cid['Aliases'] if storyarcpublisher is None: #assume that the arc is the same storyarcpublisher = issuePublisher @@ -670,6 +672,8 @@ class WebInterface(object): "IssuePublisher": issuePublisher, "CV_ArcID": arcid, "Int_IssueNumber": AD['Int_IssueNumber'], + "Type": bookType, + "Aliases": seriesAliases, "Manual": AD['Manual']} myDB.upsert("storyarcs", newVals, newCtrl) @@ -2194,6 +2198,41 @@ class WebInterface(object): annualDelete.exposed = True + def queueManage(self): # **args): + myDB = db.DBConnection() + activelist = 'There are currently no items currently downloading via Direct Download (DDL).' + active = myDB.selectone("SELECT * FROM DDL_INFO WHERE STATUS = 'Downloading'").fetchone() + if active is not None: + activelist ={'series': active['series'], + 'year': active['year'], + 'size': active['size'], + 'filename': active['filename'], + 'status': active['status'], + 'id': active['id']} + + resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.' + s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID WHERE c.status != 'Downloading'") + if s_info: + resultlist = [] + for si in s_info: + issue = si['Issue_Number'] + if issue is not None: + issue = '#%s' % issue + resultlist.append({'series': si['ComicName'], + 'issue': issue, + 'id': si['id'], + 'volume': si['ComicVersion'], + 'year': si['ComicYear'], + 'size': si['size'].strip(), + 'comicid': si['ComicID'], + 'issueid': si['IssueID'], + 'status': si['status']}) + + logger.info('resultlist: %s' % resultlist) + return serve_template(templatename="queue_management.html", title="Queue Management", activelist=activelist, resultlist=resultlist) + queueManage.exposed = True + + def previewRename(self, **args): #comicid=None, comicidlist=None): file_format = mylar.CONFIG.FILE_FORMAT myDB = db.DBConnection() @@ -4104,7 +4143,7 @@ class WebInterface(object): import random SRID = str(random.randint(100000, 999999)) - logger.info('[IMPORT] Issues found with valid ComicID information for : ' + comicinfo['ComicName'] + ' [' + str(comicinfo['ComicID']) + ']') + logger.info('[IMPORT] Issues found with valid ComicID information for : %s [%s]' % (comicinfo['ComicName'], comicinfo['ComicID'])) imported = {'ComicName': comicinfo['ComicName'], 'DynamicName': comicinfo['DynamicName'], 'Volume': comicinfo['Volume'], @@ -4127,7 +4166,7 @@ class WebInterface(object): # "ComicName": comicinfo['ComicName'], # "DynamicName": comicinfo['DynamicName']} # myDB.upsert("importresults", newVal, ctrlVal) - logger.info('[IMPORT] Successfully verified import sequence data for : ' + comicinfo['ComicName'] + '. Currently adding to your watchlist.') + logger.info('[IMPORT] Successfully verified import sequence data for : %s. Currently adding to your watchlist.' % comicinfo['ComicName']) RemoveIDS.append(comicinfo['ComicID']) #we need to remove these items from the comiclist now, so they don't get processed again @@ -4200,9 +4239,10 @@ class WebInterface(object): else: raise cherrypy.HTTPRedirect("importResults") else: - comicstoIMP.append(result['ComicLocation'])#.decode(mylar.SYS_ENCODING, 'replace')) + #logger.fdebug('result: %s' % result) + comicstoIMP.append(result['ComicLocation']) #.decode(mylar.SYS_ENCODING, 'replace')) getiss = result['IssueNumber'] - #logger.info('getiss:' + getiss) + #logger.fdebug('getiss: %s' % getiss) if 'annual' in getiss.lower(): tmpiss = re.sub('[^0-9]','', getiss).strip() if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4: @@ -4217,10 +4257,10 @@ class WebInterface(object): miniss_num = helpers.issuedigits(minISSUE) startiss_num = helpers.issuedigits(startISSUE) if int(getiss_num) > int(miniss_num): - #logger.fdebug('Minimum issue now set to : ' + getiss + ' - it was : ' + minISSUE) + logger.fdebug('Minimum issue now set to : %s - it was %s' % (getiss, minISSUE)) minISSUE = getiss if int(getiss_num) < int(startiss_num): - #logger.fdebug('Start issue now set to : ' + getiss + ' - it was : ' + startISSUE) + logger.fdebug('Start issue now set to : %s - it was %s' % (getiss, startISSUE)) startISSUE = str(getiss) if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start. startyear = result['ComicYear'] From 5a98e4f4a27bca9a937fdcfc86774ab31457f845 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 1 Feb 2019 17:07:54 -0500 Subject: [PATCH 27/54] FIX: fix for missed variables in last commit --- mylar/PostProcessor.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 752c8ce0..a1cef19e 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -731,7 +731,7 @@ class PostProcessor(object): datematch = "False" else: if int(isc['IssueDate'][:4]) < int(watchmatch['issue_year']): - logger.fdebug('%s[ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename' % (isc['IssueDate'], watchmatch['issue_year'])) + logger.fdebug('%s[ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename' % (module, isc['IssueDate'], watchmatch['issue_year'])) datematch = "False" if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12: @@ -741,7 +741,7 @@ class PostProcessor(object): issyr = int(monthval[:4]) - 1 if datematch == "False" and issyr is not None: - logger.fdebug('%s[ISSUE-VERIFY] %s comparing to %s : rechecking by month-check versus year.' % (issyr, watchmatch['issue_year'])) + logger.fdebug('%s[ISSUE-VERIFY] %s comparing to %s : rechecking by month-check versus year.' % (module, issyr, watchmatch['issue_year'])) datematch = "True" if int(issyr) != int(watchmatch['issue_year']): logger.fdebug('%s[ISSUE-VERIFY][.:FAIL:.] Issue is before the modified issue year of %s' % (module, issyr)) @@ -1043,11 +1043,11 @@ class PostProcessor(object): #if ReleaseDate doesn't exist, use IssueDate #if no issue date was found, then ignore. issyr = None - logger.fdebug('issuedate:' + str(isc['IssueDate'])) - logger.fdebug('issuechk: ' + str(isc['IssueDate'][5:7])) + logger.fdebug('issuedate: %s' % isc['IssueDate']) + logger.fdebug('issuechk: %s' % isc['IssueDate'][5:7]) - logger.fdebug('StoreDate ' + str(isc['ReleaseDate'])) - logger.fdebug('IssueDate: ' + str(isc['IssueDate'])) + logger.fdebug('StoreDate %s' % isc['ReleaseDate']) + logger.fdebug('IssueDate: %s' % isc['IssueDate']) if all([isc['ReleaseDate'] is not None, isc['ReleaseDate'] != '0000-00-00']) or all([isc['IssueDate'] is not None, isc['IssueDate'] != '0000-00-00']): if isc['ReleaseDate'] == '0000-00-00': datevalue = isc['IssueDate'] @@ -1098,7 +1098,7 @@ class PostProcessor(object): clocation = os.path.join(arcmatch['comiclocation'], arcmatch['sub'], tmpfilename) else: clocation = os.path.join(arcmatch['comiclocation'], tmpfilename) - logger.info('[%s %s#] MATCH: %s / %s / %s' % (k, isc['IssueNumber'], clocation, isc['IssueID'], v[i]['ArcValues']['IssueID'])) + logger.info('[%s #%s] MATCH: %s / %s / %s' % (k, isc['IssueNumber'], clocation, isc['IssueID'], v[i]['ArcValues']['IssueID'])) if v[i]['ArcValues']['Publisher'] is None: arcpublisher = v[i]['ArcValues']['ComicPublisher'] else: From 350a5c204544dfc78e7011ed05a91eace43b86f3 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 1 Feb 2019 17:21:20 -0500 Subject: [PATCH 28/54] FIX: variable fix for post-processing --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index a1cef19e..d1e502af 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -535,7 +535,7 @@ class PostProcessor(object): #check for Paused status / #check for Ended status and 100% completion of issues. if wv['Status'] == 'Paused' or (wv['Have'] == wv['Total'] and not any(['Present' in wv['ComicPublished'], helpers.now()[:4] in wv['ComicPublished']])): - logger.warn('%s [%s] is either Paused or in an Ended status with 100% completion. Ignoring for match.' % (wv['ComicName'], wv['ComicYear'])) + logger.warn('%s [%s] is either Paused or in an Ended status with 100\% completion. Ignoring for match.' % (wv['ComicName'], wv['ComicYear'])) continue wv_comicname = wv['ComicName'] wv_comicpublisher = wv['ComicPublisher'] From 9683646d8cb4709545221a83099ceb26464c32f6 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 1 Feb 2019 17:30:16 -0500 Subject: [PATCH 29/54] FIX: another variable fix for post-processing --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index d1e502af..2a1d894a 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -535,7 +535,7 @@ class PostProcessor(object): #check for Paused status / #check for Ended status and 100% completion of issues. if wv['Status'] == 'Paused' or (wv['Have'] == wv['Total'] and not any(['Present' in wv['ComicPublished'], helpers.now()[:4] in wv['ComicPublished']])): - logger.warn('%s [%s] is either Paused or in an Ended status with 100\% completion. Ignoring for match.' % (wv['ComicName'], wv['ComicYear'])) + logger.warn('%s [%s] is either Paused or in an Ended status with 100%s completion. Ignoring for match.' % (wv['ComicName'], wv['ComicYear'], '%')) continue wv_comicname = wv['ComicName'] wv_comicpublisher = wv['ComicPublisher'] From 92a60760a09827ae0a1229e3c5e9b7176f0be422 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 1 Feb 2019 20:13:58 -0500 Subject: [PATCH 30/54] FIX: Get rid of 'Site' error when searching non-torrent/ddl related sites --- mylar/search.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mylar/search.py b/mylar/search.py index 09d22d97..610ebf19 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -896,6 +896,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa comsize_b = entry['size'] elif entry['site'] == 'DDL': comsize_b = helpers.human2bytes(entry['size']) + else: + tmpsz = entry.enclosures[0] + comsize_b = tmpsz['length'] except Exception as e: logger.warn('[ERROR] %s [%s]' % (e, entry)) tmpsz = entry.enclosures[0] From cf2c066d2203203f0cacc1dd13049e4fc7f6b48b Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 1 Feb 2019 20:46:03 -0500 Subject: [PATCH 31/54] FIX:(#2183) Fix for pack information not being present when searching nzb providers --- mylar/search.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mylar/search.py b/mylar/search.py index 610ebf19..b9ef04be 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1281,6 +1281,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa downloadit = False #-------------------------------------fix this! + try: + pack_test = entry['pack'] + except Exception as e: + pack_test = False + if nzbprov == 'Public Torrents' and any([entry['site'] == 'WWT', entry['site'] == 'DEM']): if entry['site'] == 'WWT': nzbprov = 'WWT' @@ -1289,7 +1294,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if all([nzbprov == '32P', allow_packs == True, RSS == 'no']): logger.fdebug('pack:' + entry['pack']) - if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', entry['pack'] is True])): #allow_packs is True + if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', pack_test is True])): #allow_packs is True if nzbprov == '32P': if entry['pack'] == '2': logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.') From 9ca2c3b47dc8437f6dde012d2e27613fdab1daa4 Mon Sep 17 00:00:00 2001 From: evilhero Date: Sat, 2 Feb 2019 00:33:19 -0500 Subject: [PATCH 32/54] FIX: Added more verifications (date/volume/year) when story-arc post-processing is being performed --- mylar/PostProcessor.py | 150 ++++++++++++++++++++++++++++++++--------- 1 file changed, 117 insertions(+), 33 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 2a1d894a..e127c4b3 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -943,7 +943,7 @@ class PostProcessor(object): "Location": av['Location']}, "WatchValues": {"SeriesYear": av['SeriesYear'], "LatestDate": av['IssueDate'], - "ComicVersion": 'v' + str(av['SeriesYear']), + "ComicVersion": av['Volume'], "ComicID": av['ComicID'], "Publisher": av['IssuePublisher'], "Total": av['TotalIssues'], # this will return the total issues in the arc (not needed for this) @@ -1037,51 +1037,134 @@ class PostProcessor(object): else: for isc in issuechk: datematch = "True" + if isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00': + try: + if isc['DigitalDate'] != '0000-00-00' and int(re.sub('-', '', isc['DigitalDate']).strip()) <= int(re.sub('-', '', isc['ReleaseDate']).strip()): + monthval = isc['DigitalDate'] + arc_issueyear = isc['DigitalDate'][:4] + else: + monthval = isc['ReleaseDate'] + arc_issueyear = isc['ReleaseDate'][:4] + except: + monthval = isc['ReleaseDate'] + arc_issueyear = isc['ReleaseDate'][:4] + + else: + try: + if isc['DigitalDate'] != '0000-00-00' and int(re.sub('-', '', isc['DigitalDate']).strip()) <= int(re.sub('-', '', isc['ReleaseDate']).strip()): + monthval = isc['DigitalDate'] + arc_issueyear = isc['DigitalDate'][:4] + else: + monthval = isc['IssueDate'] + arc_issueyear = isc['IssueDate'][:4] + except: + monthval = isc['IssueDate'] + arc_issueyear = isc['IssueDate'][:4] + + if len(arcmatch) >= 1 and arcmatch['issue_year'] is not None: #if the # of matches is more than 1, we need to make sure we get the right series #compare the ReleaseDate for the issue, to the found issue date in the filename. #if ReleaseDate doesn't exist, use IssueDate #if no issue date was found, then ignore. + logger.fdebug('%s[ARC ISSUE-VERIFY] Now checking against %s - %s' % (module, k, v[i]['WatchValues']['ComicID'])) issyr = None - logger.fdebug('issuedate: %s' % isc['IssueDate']) - logger.fdebug('issuechk: %s' % isc['IssueDate'][5:7]) + #logger.fdebug('issuedate: %s' % isc['IssueDate']) + #logger.fdebug('issuechk: %s' % isc['IssueDate'][5:7]) + #logger.fdebug('StoreDate %s' % isc['ReleaseDate']) + #logger.fdebug('IssueDate: %s' % isc['IssueDate']) + if isc['DigitalDate'] is not None and isc['DigitalDate'] != '0000-00-00': + if int(isc['DigitalDate'][:4]) < int(arcmatch['issue_year']): + logger.fdebug('%s[ARC ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename' % (module, isc['DigitalDate'], arcmatch['issue_year'])) + datematch = "False" - logger.fdebug('StoreDate %s' % isc['ReleaseDate']) - logger.fdebug('IssueDate: %s' % isc['IssueDate']) - if all([isc['ReleaseDate'] is not None, isc['ReleaseDate'] != '0000-00-00']) or all([isc['IssueDate'] is not None, isc['IssueDate'] != '0000-00-00']): + elif all([isc['ReleaseDate'] is not None, isc['ReleaseDate'] != '0000-00-00']): if isc['ReleaseDate'] == '0000-00-00': datevalue = isc['IssueDate'] - if int(datevalue[:4]) < int(arcmatch['issue_year']): - logger.fdebug('%s %s is before the issue year %s that was discovered in the filename' % (module, datevalue[:4], arcmatch['issue_year'])) - datematch = "False" else: datevalue = isc['ReleaseDate'] - if int(datevalue[:4]) < int(arcmatch['issue_year']): - logger.fdebug('%s %s is before the issue year of %s that was discovered in the filename' % (module, datevalue[:4], arcmatch['issue_year'])) - datematch = "False" - - monthval = datevalue - - if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12: - issyr = int(monthval[:4]) + 1 - logger.fdebug('%s IssueYear (issyr) is %s' % (module, issyr)) - elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3: - issyr = int(monthval[:4]) - 1 - - if datematch == "False" and issyr is not None: - logger.fdebug('%s %s comparing to %s : rechecking by month-check versus year.' % (module, issyr, arcmatch['issue_year'])) - datematch = "True" - if int(issyr) != int(arcmatch['issue_year']): - logger.fdebug('%s[.:FAIL:.] Issue is before the modified issue year of %s' % (module, issyr)) - datematch = "False" - + if int(datevalue[:4]) < int(arcmatch['issue_year']): + logger.fdebug('%s[ARC ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename' % (module, datevalue[:4], arcmatch['issue_year'])) + datematch = "False" + elif all([isc['IssueDate'] is not None, isc['IssueDate'] != '0000-00-00']): + if isc['IssueDate'] == '0000-00-00': + datevalue = isc['ReleaseDate'] + else: + datevalue = isc['IssueDate'] + if int(datevalue[:4]) < int(arcmatch['issue_year']): + logger.fdebug('%s[ARC ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename' % (module, datevalue[:4], arcmatch['issue_year'])) + datematch = "False" else: - logger.info('%s Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, v[i]['WatchValues']['ComicID'], isc['IssueID'])) + if int(isc['IssueDate'][:4]) < int(arcmatch['issue_year']): + logger.fdebug('%s[ARC ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename' % (module, isc['IssueDate'], arcmatch['issue_year'])) + datematch = "False" - logger.fdebug('datematch: %s' % datematch) - logger.fdebug('temploc: %s' % helpers.issuedigits(temploc)) - logger.fdebug('arcissue: %s' % helpers.issuedigits(v[i]['ArcValues']['IssueNumber'])) - if datematch == "True" and helpers.issuedigits(temploc) == helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): + if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12: + issyr = int(monthval[:4]) + 1 + logger.fdebug('%s[ARC ISSUE-VERIFY] IssueYear (issyr) is %s' % (module, issyr)) + elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3: + issyr = int(monthval[:4]) - 1 + + if datematch == "False" and issyr is not None: + logger.fdebug('%s[ARC ISSUE-VERIFY] %s comparing to %s : rechecking by month-check versus year.' % (module, issyr, arcmatch['issue_year'])) + datematch = "True" + if int(issyr) != int(arcmatch['issue_year']): + logger.fdebug('%s[.:FAIL:.] Issue is before the modified issue year of %s' % (module, issyr)) + datematch = "False" + + else: + logger.info('%s Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, v[i]['WatchValues']['ComicID'], isc['IssueID'])) + + logger.fdebug('datematch: %s' % datematch) + logger.fdebug('temploc: %s' % helpers.issuedigits(temploc)) + logger.fdebug('arcissue: %s' % helpers.issuedigits(v[i]['ArcValues']['IssueNumber'])) + if datematch == "True" and helpers.issuedigits(temploc) == helpers.issuedigits(v[i]['ArcValues']['IssueNumber']): + + arc_values = v[i]['WatchValues'] + if any([arc_values['ComicVersion'] is None, arc_values['ComicVersion'] == 'None']): + tmp_arclist_vol = '1' + else: + tmp_arclist_vol = re.sub("[^0-9]", "", arc_values['ComicVersion']).strip() + if all([arcmatch['series_volume'] != 'None', arcmatch['series_volume'] is not None]): + tmp_arcmatch_vol = re.sub("[^0-9]","", arcmatch['series_volume']).strip() + if len(tmp_arcmatch_vol) == 4: + if int(tmp_arcmatch_vol) == int(arc_values['SeriesYear']): + logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of %s matched to volume/year label of %s' % (module, arc_values['SeriesYear'], tmp_arcmatch_vol)) + else: + logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of %s DID NOT match to volume/year label of %s' % (module, arc_values['SeriesYear'], tmp_arcmatch_vol)) + datematch = "False" + if len(arcvals) > 1 and int(tmp_arcmatch_vol) > 1: + if int(tmp_arcmatch_vol) == int(tmp_arclist_vol): + logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s' % (modulue, arc_values['ComicVersion'], arcmatch['series_volume'])) + else: + logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s' % (module, arc_values['ComicVersion'], arcmatch['series_volume'])) + continue + else: + if any([tmp_arclist_vol is None, tmp_arclist_vol == 'None', tmp_arclist_vol == '']): + logger.fdebug('%s[ARC ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.' % module) + datematch = "False" + elif len(arcvals) == 1 and int(tmp_arclist_vol) == 1: + logger.fdebug('%s[ARC ISSUE-VERIFY][Lone Volume MATCH] Volume label of %s indicates only volume for this series on your watchlist.' % (module, arc_values['ComicVersion'])) + elif int(tmp_arclist_vol) > 1: + logger.fdebug('%s[ARC ISSUE-VERIFY][Lone Volume FAILURE] Volume label of %s indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.' % (module, arc_values['ComicVersion'])) + datematch = "False" + + if datematch == "False" and all([arcmatch['issue_year'] is not None, arcmatch['issue_year'] != 'None', arc_issueyear is not None]): + #now we see if the issue year matches exactly to what we have within Mylar. + if int(arc_issueyear) == int(arcmatch['issue_year']): + logger.fdebug('%s[ARC ISSUE-VERIFY][Issue Year MATCH] Issue Year of %s is a match to the year found in the filename of : %s' % (module, arc_issueyear, arcmatch['issue_year'])) + datematch = 'True' + else: + logger.fdebug('%s[ARC ISSUE-VERIFY][Issue Year FAILURE] Issue Year of %s does NOT match the year found in the filename of : %s' % (module, arc_issueyear, arcmatch['issue_year'])) + logger.fdebug('%s[ARC ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year.' % module) + if issyr is not None: + if int(issyr) != int(arcmatch['issue_year']): + logger.fdebug('%s[ARC ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of %s is before the modified issue year of %s' % (module, issyr, arcmatch['issue_year'])) + else: + logger.fdebug('%s[ARC ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of %s is a match to the year found in the filename of : %s' % (module, issyr, arcmatch['issue_year'])) + datematch = 'True' + + if datematch == 'True': passit = False if len(manual_list) > 0: if any([ v[i]['ArcValues']['IssueID'] == x['IssueID'] for x in manual_list ]): @@ -1092,6 +1175,7 @@ class PostProcessor(object): a['IssueArcID'] = v[i]['ArcValues']['IssueArcID'] break passit = True + if passit == False: tmpfilename = helpers.conversion(arcmatch['comicfilename']) if arcmatch['sub']: From 039ebc3d755a0d91311d2889bdf55e540312516f Mon Sep 17 00:00:00 2001 From: evilhero Date: Sat, 2 Feb 2019 00:46:14 -0500 Subject: [PATCH 33/54] FIX: Fix for typo in PostProcessor --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index e127c4b3..72d32fe3 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -1135,7 +1135,7 @@ class PostProcessor(object): datematch = "False" if len(arcvals) > 1 and int(tmp_arcmatch_vol) > 1: if int(tmp_arcmatch_vol) == int(tmp_arclist_vol): - logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s' % (modulue, arc_values['ComicVersion'], arcmatch['series_volume'])) + logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s' % (module, arc_values['ComicVersion'], arcmatch['series_volume'])) else: logger.fdebug('%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s' % (module, arc_values['ComicVersion'], arcmatch['series_volume'])) continue From 5df40f07b6c342921ae85d13cfa4af3119d9d761 Mon Sep 17 00:00:00 2001 From: evilhero Date: Sat, 2 Feb 2019 16:17:39 -0500 Subject: [PATCH 34/54] FIX: Fix for logging error during post-processing --- mylar/PostProcessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 72d32fe3..80e466fc 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -1814,7 +1814,7 @@ class PostProcessor(object): grab_src = os.path.join(src_location, ofilename) self._log("Source Path : %s" % grab_src) - logger.info('%s Source Path : ' % (module, grab_src)) + logger.info('%s Source Path : %s' % (module, grab_src)) checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module) if not checkdirectory: From 99241f700a880522a4144cfb940690e934743912 Mon Sep 17 00:00:00 2001 From: evilhero Date: Sat, 2 Feb 2019 16:18:57 -0500 Subject: [PATCH 35/54] IMP: Added One-Shot as a type to weekly pull, allows for weekly one-offs to work as expected with one-shot issues, FIX: Fixed dbUpdater start sequence line to be logged so it's not just saying Update Completed --- mylar/search.py | 15 ++++++--------- mylar/updater.py | 4 ++-- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/mylar/search.py b/mylar/search.py index b9ef04be..51d59600 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -896,11 +896,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa comsize_b = entry['size'] elif entry['site'] == 'DDL': comsize_b = helpers.human2bytes(entry['size']) - else: - tmpsz = entry.enclosures[0] - comsize_b = tmpsz['length'] except Exception as e: - logger.warn('[ERROR] %s [%s]' % (e, entry)) tmpsz = entry.enclosures[0] comsize_b = tmpsz['length'] @@ -1116,7 +1112,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa parsed_comic = p_comic.listFiles() logger.fdebug('parsed_info: %s' % parsed_comic) - if parsed_comic['parse_status'] == 'success' and (all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']): + if parsed_comic['parse_status'] == 'success' and (all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or all([booktype == 'One-Shot', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']): try: fcomic = filechecker.FileChecker(watchcomic=ComicName) filecomic = fcomic.matchIT(parsed_comic) @@ -1373,7 +1369,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa logger.fdebug("issue we are looking for is : %s" % findcomiciss) logger.fdebug("integer value of issue we are looking for : %s" % intIss) else: - if intIss is None: + if intIss is None and all([booktype == 'One-Shot', helpers.issuedigits(parsed_comic['issue_number']) == 1000]): + intIss = 1000 + else: intIss = 9999999999 if parsed_comic['issue_number'] is not None: logger.fdebug("issue we found for is : %s" % parsed_comic['issue_number']) @@ -1387,7 +1385,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa pc_in = None else: pc_in = helpers.issuedigits(parsed_comic['issue_number']) - #issue comparison now as well if int(intIss) == int(comintIss) or all([cmloopit == 4, findcomiciss is None, pc_in is None]) or all([cmloopit == 4, findcomiciss is None, pc_in == 1]): nowrite = False @@ -1801,7 +1798,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): StoreDate = result['ReleaseDate'] DigitalDate = result['DigitalDate'] TorrentID_32p = None - booktype = None + booktype = result['Type'] elif mode == 'pullwant': ComicName = result['COMIC'] Comicname_filesafe = helpers.filesafe(ComicName) @@ -1818,7 +1815,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): IssueDate = result['SHIPDATE'] StoreDate = IssueDate DigitalDate = '0000-00-00' - booktype = None + booktype = result['format'] else: comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() if mode == 'want_ann': diff --git a/mylar/updater.py b/mylar/updater.py index 256c852d..fa6c6383 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -114,12 +114,12 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False): #logger.fdebug('%s [%s] Was refreshed less than %s hours ago. Skipping Refresh at this time.' % (ComicName, ComicID, cache_hours)) cnt +=1 continue - logger.info('[' + str(cnt) + '/' + str(len(comiclist)) + '] Refreshing :' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']') + logger.info('[%s/%s] Refreshing :%s (%s) [%s]' % (cnt, len(comiclist), ComicName, dspyear, ComicID)) else: ComicID = comic['ComicID'] ComicName = comic['ComicName'] - logger.fdebug('Refreshing: ' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']') + logger.info('Refreshing/Updating: %s (%s) [%s]' % (ComicName, dspyear, ComicID)) mismatch = "no" if not mylar.CONFIG.CV_ONLY or ComicID[:1] == "G": From 6b6ab8d2858810646842da03df4d32312ef784f2 Mon Sep 17 00:00:00 2001 From: evilhero Date: Sat, 2 Feb 2019 17:11:09 -0500 Subject: [PATCH 36/54] FIX: fix for DDL provider throwing an error when searching and no issue number present (ala one-shot issue) --- mylar/search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/search.py b/mylar/search.py index 51d59600..c6679ce7 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -622,7 +622,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if nzbprov == 'ddl': cmname = re.sub("%20", " ", str(comsrc)) logger.fdebug('Sending request to DDL site for : %s %s' % (findcomic, isssearch)) - b = getcomics.GC(query=findcomic + ' ' + isssearch) + b = getcomics.GC(query='%s %s' % (findcomic, isssearch)) bb = b.search() #logger.info('bb returned from DDL: %s' % bb) elif RSS == "yes": From ab4e87363e8de509797f3c684e5fa847a2ef91ac Mon Sep 17 00:00:00 2001 From: evilhero Date: Sun, 3 Feb 2019 14:41:11 -0500 Subject: [PATCH 37/54] FIX: fixed cache folder being publically exposed, FIX: added .secure folder and move cookies into secured location as required --- mylar/auth32p.py | 6 +++--- mylar/config.py | 20 ++++++++++++++++++++ mylar/webstart.py | 4 +--- 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/mylar/auth32p.py b/mylar/auth32p.py index 95575481..df269035 100644 --- a/mylar/auth32p.py +++ b/mylar/auth32p.py @@ -69,7 +69,7 @@ class info32p(object): try: with cfscrape.create_scraper() as s: s.headers = self.headers - cj = LWPCookieJar(os.path.join(mylar.CONFIG.CACHE_DIR, ".32p_cookies.dat")) + cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) cj.load() s.cookies = cj @@ -248,7 +248,7 @@ class info32p(object): with cfscrape.create_scraper() as s: s.headers = self.headers - cj = LWPCookieJar(os.path.join(mylar.CONFIG.CACHE_DIR, ".32p_cookies.dat")) + cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) cj.load() s.cookies = cj data = [] @@ -403,7 +403,7 @@ class info32p(object): except Exception as e: logger.error('%s Can\'t create session with cfscrape' % self.module) - self.session_path = session_path if session_path is not None else os.path.join(mylar.CONFIG.CACHE_DIR, ".32p_cookies.dat") + self.session_path = session_path if session_path is not None else os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat") self.ses.cookies = LWPCookieJar(self.session_path) if not os.path.exists(self.session_path): logger.fdebug('%s Session cookie does not exist. Signing in and Creating.' % self.module) diff --git a/mylar/config.py b/mylar/config.py index a48c48a2..e5ca4151 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -76,6 +76,7 @@ _CONFIG_DEFINITIONS = OrderedDict({ 'SHOW_ICONS': (bool, 'General', False), 'FORMAT_BOOKTYPE': (bool, 'General', False), 'CLEANUP_CACHE': (bool, 'General', False), + 'SECURE_DIR': (str, 'General', None), 'RSS_CHECKINTERVAL': (int, 'Scheduler', 20), 'SEARCH_INTERVAL': (int, 'Scheduler', 360), @@ -773,6 +774,25 @@ class Config(object): except OSError: logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR) + + if not self.SECURE_DIR: + self.SECURE_DIR = os.path.join(mylar.DATA_DIR, '.secure') + + if not os.path.exists(self.SECURE_DIR): + try: + os.makedirs(self.SECURE_DIR) + except OSError: + logger.error('[Secure DIR Check] Could not create secure directory. Check permissions of datadir: ' + mylar.DATA_DIR) + + #make sure the cookies.dat file is not in cache + for f in glob.glob(os.path.join(self.CACHE_DIR, '.32p_cookies.dat')): + try: + if os.path.isfile(f): + shutil.move(f, os.path.join(self.SECURE_DIR, '.32p_cookies.dat')) + except Exception as e: + logger.error('SECURE-DIR-MOVE] Unable to move cookies file into secure location. This is a fatal error.') + sys.exit() + if self.CLEANUP_CACHE is True: logger.fdebug('[Cache Cleanup] Cache Cleanup initiated. Will delete items from cache that are no longer needed.') cache_types = ['*.nzb', '*.torrent', '*.zip', '*.html', 'mylar_*'] diff --git a/mylar/webstart.py b/mylar/webstart.py index 3c41d9cb..c6c9d0e3 100755 --- a/mylar/webstart.py +++ b/mylar/webstart.py @@ -99,9 +99,7 @@ def initialize(options): }, '/cache': { 'tools.staticdir.on': True, - 'tools.staticdir.dir': mylar.CONFIG.CACHE_DIR, - 'tools.auth_basic.on': False, - 'tools.auth.on': False + 'tools.staticdir.dir': mylar.CONFIG.CACHE_DIR } } From 5fc5d17a1c60081d70567f9b20f5b0e92315516f Mon Sep 17 00:00:00 2001 From: evilhero Date: Mon, 4 Feb 2019 13:54:37 -0500 Subject: [PATCH 38/54] FIX: Trying to fix O. --- data/interfaces/default/config.html | 1 - 1 file changed, 1 deletion(-) diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index bd6ef60b..44d2eae5 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -2011,7 +2011,6 @@ var obj = JSON.parse(data); var versionsab = obj['version']; vsab = numberWithDecimals(versionsab); - alert(vsab); $('#sabstatus').val(obj['status']); $('#sabversion span').text('SABnzbd version: '+versionsab); if ( vsab < "0.8.0" ){ From 1032db735ab587be90aa23f8cf4ea7970bce243d Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 5 Feb 2019 10:09:36 -0500 Subject: [PATCH 39/54] FIX: Fix for logging error when matching post-processing issues with no issue number --- mylar/PostProcessor.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 80e466fc..de9dfc4e 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -653,6 +653,7 @@ class PostProcessor(object): fcdigit = helpers.issuedigits(temploc) issuechk = myDB.select("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit]) else: + fcdigit = None issuechk = myDB.select("SELECT * from issues WHERE ComicID=?", [cs['ComicID']]) if not issuechk: @@ -748,7 +749,10 @@ class PostProcessor(object): datematch = "False" else: - logger.info('%s[ISSUE-VERIFY] Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, cs['ComicID'], isc['IssueID'])) + if fcdigit is None: + logger.info('%s[ISSUE-VERIFY] Found matching issue for ComicID: %s / IssueID: %s' % (module, cs['ComicID'], isc['IssueID'])) + else: + logger.info('%s[ISSUE-VERIFY] Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, cs['ComicID'], isc['IssueID'])) if datematch == "True": # if we get to here, we need to do some more comparisons just to make sure we have the right volume @@ -1027,6 +1031,7 @@ class PostProcessor(object): fcdigit = helpers.issuedigits(temploc) issuechk = myDB.select("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [v[i]['WatchValues']['ComicID'], fcdigit]) else: + fcdigit = None issuechk = myDB.select("SELECT * from storyarcs WHERE ComicID=?", [v[i]['WatchValues']['ComicID']]) if issuechk is None: @@ -1113,7 +1118,10 @@ class PostProcessor(object): datematch = "False" else: - logger.info('%s Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, v[i]['WatchValues']['ComicID'], isc['IssueID'])) + if fcdigit is None: + logger.info('%s Found matching issue for ComicID: %s / IssueID: %s' % (module, v[i]['WatchValues']['ComicID'], isc['IssueID'])) + else: + logger.info('%s Found matching issue # %s for ComicID: %s / IssueID: %s' % (module, fcdigit, v[i]['WatchValues']['ComicID'], isc['IssueID'])) logger.fdebug('datematch: %s' % datematch) logger.fdebug('temploc: %s' % helpers.issuedigits(temploc)) From e891aa5e27e0fcb0cffc8aaf1604dc4f808d8363 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 5 Feb 2019 13:12:33 -0500 Subject: [PATCH 40/54] FIX: Fix for 32P logon problems due to security changes --- mylar/auth32p.py | 322 ++++++++++++++++++++++++++-------------------- mylar/rsscheck.py | 90 +++++-------- 2 files changed, 216 insertions(+), 196 deletions(-) diff --git a/mylar/auth32p.py b/mylar/auth32p.py index df269035..8544288f 100644 --- a/mylar/auth32p.py +++ b/mylar/auth32p.py @@ -1,3 +1,18 @@ +# This file is part of Mylar. +# +# Mylar is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Mylar is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Mylar. If not, see . + import urllib2 import json import re @@ -13,7 +28,7 @@ import cfscrape from operator import itemgetter import mylar -from mylar import logger, filechecker, helpers +from mylar import db, logger, filechecker, helpers class info32p(object): @@ -35,7 +50,6 @@ class info32p(object): self.method = None lses = self.LoginSession(mylar.CONFIG.USERNAME_32P, mylar.CONFIG.PASSWORD_32P) - if not lses.login(): if not self.test: logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module) @@ -49,6 +63,7 @@ class info32p(object): logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module) self.authkey = lses.authkey self.passkey = lses.passkey + self.session = lses.ses self.uid = lses.uid try: mylar.INKDROPS_32P = int(math.floor(float(lses.inkdrops['results'][0]['inkdrops']))) @@ -67,26 +82,26 @@ class info32p(object): feedinfo = [] try: - with cfscrape.create_scraper() as s: - s.headers = self.headers - cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) - cj.load() - s.cookies = cj +# with cfscrape.create_scraper(delay=15) as s: +# s.headers = self.headers +# cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) +# cj.load() +# s.cookies = cj if mylar.CONFIG.VERIFY_32P == 1 or mylar.CONFIG.VERIFY_32P == True: verify = True else: verify = False - logger.fdebug('[32P] Verify SSL set to : %s' % verify) +# logger.fdebug('[32P] Verify SSL set to : %s' % verify) if not verify: - #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$ +# #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$ from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # post to the login form - r = s.post(self.url, verify=verify, allow_redirects=True) + r = self.session.post(self.url, verify=verify, allow_redirects=True) #logger.debug(self.module + " Content session reply" + r.text) @@ -246,147 +261,177 @@ class info32p(object): logger.warn('No results found for search on 32P.') return "no results" - with cfscrape.create_scraper() as s: - s.headers = self.headers - cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) - cj.load() - s.cookies = cj - data = [] - pdata = [] - pubmatch = False +# with cfscrape.create_scraper(delay=15) as s: +# s.headers = self.headers +# cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) +# cj.load() +# s.cookies = cj + data = [] + pdata = [] + pubmatch = False - if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]): - data.append({"id": torrentid, - "series": series_search}) - else: - if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]): + if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]): + data.append({"id": torrentid, + "series": series_search}) + else: + if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]): + if mylar.CONFIG.SEARCH_32P is True: + url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F + params = {'action': 'serieslist', 'filter': series_search} + time.sleep(1) #just to make sure we don't hammer, 1s pause. + t = self.session.get(url, params=params, verify=True, allow_redirects=True) + soup = BeautifulSoup(t.content, "html.parser") + results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"}) + + for r in results: if mylar.CONFIG.SEARCH_32P is True: - url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F - params = {'action': 'serieslist', 'filter': series_search} - time.sleep(1) #just to make sure we don't hammer, 1s pause. - t = s.get(url, params=params, verify=True, allow_redirects=True) - soup = BeautifulSoup(t.content, "html.parser") - results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"}) + torrentid = r['data-id'] + torrentname = r.findNext(text=True) + torrentname = torrentname.strip() + else: + torrentid = r['id'] + torrentname = r['series'] - for r in results: - if mylar.CONFIG.SEARCH_32P is True: - torrentid = r['data-id'] - torrentname = r.findNext(text=True) - torrentname = torrentname.strip() - else: - torrentid = r['id'] - torrentname = r['series'] - - as_d = filechecker.FileChecker() - as_dinfo = as_d.dynamic_replace(torrentname) - seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip() - logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search)) - if seriesresult.lower() == mod_series.lower(): + as_d = filechecker.FileChecker() + as_dinfo = as_d.dynamic_replace(torrentname) + seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip() + logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search)) + if seriesresult.lower() == mod_series.lower(): + logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid)) + data.append({"id": torrentid, + "series": torrentname}) + elif publisher_search.lower() in seriesresult.lower(): + logger.fdebug('[MATCH] Publisher match.') + tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip() + as_t = filechecker.FileChecker() + as_tinfo = as_t.dynamic_replace(tmp_torrentname) + if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower(): logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid)) - data.append({"id": torrentid, - "series": torrentname}) - elif publisher_search.lower() in seriesresult.lower(): - logger.fdebug('[MATCH] Publisher match.') - tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip() - as_t = filechecker.FileChecker() - as_tinfo = as_t.dynamic_replace(tmp_torrentname) - if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower(): - logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid)) - pdata.append({"id": torrentid, - "series": torrentname}) - pubmatch = True + pdata.append({"id": torrentid, + "series": torrentname}) + pubmatch = True - logger.fdebug('%s series listed for searching that match.' % len(data)) - else: - logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id'])) - pdata.append({"id": chk_id['id'], - "series": chk_id['series']}) - pubmatch = True - - if all([len(data) == 0, len(pdata) == 0]): - return "no results" + logger.fdebug('%s series listed for searching that match.' % len(data)) else: - dataset = [] - if len(data) > 0: - dataset += data - if len(pdata) > 0: - dataset += pdata - logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...') + logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id'])) + pdata.append({"id": chk_id['id'], + "series": chk_id['series']}) + pubmatch = True - if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']) and any([len(data) == 1, len(pdata) == 1]): - #update the 32p_reference so we avoid doing a url lookup next time - helpers.checkthe_id(comic_id, dataset) + if all([len(data) == 0, len(pdata) == 0]): + return "no results" + else: + dataset = [] + if len(data) > 0: + dataset += data + if len(pdata) > 0: + dataset += pdata + logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...') + + if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']) and any([len(data) == 1, len(pdata) == 1]): + #update the 32p_reference so we avoid doing a url lookup next time + helpers.checkthe_id(comic_id, dataset) + else: + if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']): + pass else: - if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']): - pass - else: - logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.') + logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.') - results32p = [] - resultlist = {} + results32p = [] + resultlist = {} - for x in dataset: - #for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st) - payload = {"action": "groupsearch", - "id": x['id'], #searchid, - "issue": issue_search} - #in order to match up against 0-day stuff, volume has to be none at this point - #when doing other searches tho, this should be allowed to go through - #if all([volume_search != 'None', volume_search is not None]): - # payload.update({'volume': re.sub('v', '', volume_search).strip()}) - if series_search.startswith('0-Day Comics Pack'): - payload.update({"volume": volume_search}) + for x in dataset: + #for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st) + payload = {"action": "groupsearch", + "id": x['id'], #searchid, + "issue": issue_search} + #in order to match up against 0-day stuff, volume has to be none at this point + #when doing other searches tho, this should be allowed to go through + #if all([volume_search != 'None', volume_search is not None]): + # payload.update({'volume': re.sub('v', '', volume_search).strip()}) + if series_search.startswith('0-Day Comics Pack'): + payload.update({"volume": volume_search}) - payload = json.dumps(payload) - payload = json.loads(payload) + payload = json.dumps(payload) + payload = json.loads(payload) - logger.fdebug('payload: %s' % payload) - url = 'https://32pag.es/ajax.php' - time.sleep(1) #just to make sure we don't hammer, 1s pause. - try: - d = s.get(url, params=payload, verify=True, allow_redirects=True) - except Exception as e: - logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url)) + logger.fdebug('payload: %s' % payload) + url = 'https://32pag.es/ajax.php' + time.sleep(1) #just to make sure we don't hammer, 1s pause. + try: + d = self.session.get(url, params=payload, verify=True, allow_redirects=True) + except Exception as e: + logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url)) - try: - searchResults = d.json() - except: - searchResults = d.text - logger.debug('%s Search Result did not return valid JSON, falling back on text: %s' % (self.module, searchResults.text)) - return False + try: + searchResults = d.json() + except Exception as e: + searchResults = d.text + logger.debug('[%s] %s Search Result did not return valid JSON, falling back on text: %s' % (e, self.module, searchResults.text)) + return False - if searchResults['status'] == 'success' and searchResults['count'] > 0: - logger.fdebug('successfully retrieved %s search results' % searchResults['count']) - for a in searchResults['details']: - if series_search.startswith('0-Day Comics Pack'): - title = series_search - else: - title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'] - results32p.append({'link': a['id'], - 'title': title, - 'filesize': a['size'], - 'issues': a['issues'], - 'pack': a['pack'], - 'format': a['format'], - 'language': a['language'], - 'seeders': a['seeders'], - 'leechers': a['leechers'], - 'scanner': a['scanner'], - 'chkit': {'id': x['id'], 'series': x['series']}, - 'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'), - 'int_pubdate': float(a['upload_time'])}) + if searchResults['status'] == 'success' and searchResults['count'] > 0: + logger.fdebug('successfully retrieved %s search results' % searchResults['count']) + for a in searchResults['details']: + if series_search.startswith('0-Day Comics Pack'): + title = series_search + else: + title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'] + results32p.append({'link': a['id'], + 'title': title, + 'filesize': a['size'], + 'issues': a['issues'], + 'pack': a['pack'], + 'format': a['format'], + 'language': a['language'], + 'seeders': a['seeders'], + 'leechers': a['leechers'], + 'scanner': a['scanner'], + 'chkit': {'id': x['id'], 'series': x['series']}, + 'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'), + 'int_pubdate': float(a['upload_time'])}) - else: - logger.fdebug('32P did not return any valid search results.') - - if len(results32p) > 0: - resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False) - logger.debug('%s Resultslist: %s' % (self.module, resultlist)) else: - resultlist = 'no results' + logger.fdebug('32P did not return any valid search results.') + + if len(results32p) > 0: + resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False) + logger.debug('%s Resultslist: %s' % (self.module, resultlist)) + else: + resultlist = 'no results' return resultlist + def downloadfile(self, payload, filepath): + url = 'https://32pag.es/torrents.php' + try: + r = self.session.get(url, params=payload, verify=True, stream=True, allow_redirects=True) + except Exception as e: + logger.error('%s [%s] Could not POST URL %s' % ('[32P-DOWNLOADER]', e, url)) + return False + + if str(r.status_code) != '200': + logger.warn('Unable to download torrent from 32P [Status Code returned: %s]' % r.status_code) + if str(r.status_code) == '404' and site == '32P': + logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.') + helpers.delete_cache_entry(linkit) + else: + logger.info('content: %s' % r.content) + return False + + + with open(filepath, 'wb') as f: + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() + + return True + + def delete_cache_entry(self, id): + myDB = db.DBConnection() + myDB.action("DELETE FROM rssdb WHERE link=? AND Site='32P'", [id]) + class LoginSession(object): def __init__(self, un, pw, session_path=None): ''' @@ -399,7 +444,7 @@ class info32p(object): ''' self.module = '[32P-AUTHENTICATION]' try: - self.ses = cfscrape.create_scraper() + self.ses = cfscrape.create_scraper(delay=15) except Exception as e: logger.error('%s Can\'t create session with cfscrape' % self.module) @@ -466,7 +511,7 @@ class info32p(object): if r.status_code != 200: if r.status_code == 302: - newloc = r.headers.get('location', '') + newloc = r.headers.get('Location', '') logger.warn('Got redirect from the POST-ajax action=login GET: %s' % newloc) self.error = {'status':'redirect-error', 'message':'got redirect from POST-ajax login action : ' + newloc} else: @@ -614,16 +659,19 @@ class info32p(object): if (self.test_skey_valid()): logger.fdebug('%s Session key-based login was good.' % self.module) self.method = 'Session Cookie retrieved OK.' - return True + return {'ses': self.ses, + 'status': True} if (self.test_login()): logger.fdebug('%s Credential-based login was good.' % self.module) self.method = 'Credential-based login OK.' - return True + return {'ses': self.ses, + 'status': True} logger.warn('%s Both session key and credential-based logins failed.' % self.module) self.method = 'Both session key & credential login failed.' - return False + return {'ses': self.ses, + 'status': False} #if __name__ == '__main__': diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index b2170965..3f5b38c3 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -920,33 +920,12 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None): 'authkey': mylar.AUTHKEY_32P, 'id': linkit} - headers = None #{'Accept-encoding': 'gzip', - # 'User-Agent': str(mylar.USER_AGENT)} - #elif site == 'TPSE': - # pass - #linkit should be the magnet link since it's TPSE - #url = linkit + dfile = auth32p.info32p() + file_download = dfile.downloadfile(payload, filepath) + if file_download is False: + return "fail" - #url = helpers.torrent_create('TPSE', linkit) - - #if url.startswith('https'): - # tpse_referrer = 'https://torrentproject.se/' - #else: - # tpse_referrer = 'http://torrentproject.se/' - - #try: - # scraper = cfscrape.create_scraper() - # cf_cookievalue, cf_user_agent = scraper.get_tokens(url) - # headers = {'Accept-encoding': 'gzip', - # 'User-Agent': cf_user_agent} - - #except Exception, e: - # return "fail" - - #logger.fdebug('Grabbing torrent from url:' + str(url)) - - #payload = None - #verify = False + logger.fdebug('[%s] Saved torrent file to : %s' % (site, filepath)) elif site == 'DEM': url = helpers.torrent_create('DEM', linkit) @@ -991,7 +970,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None): payload = None verify = False - if site != 'Public Torrents': + if site != 'Public Torrents' and site != '32P': if not verify: #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displayed. #disable SSL warnings - too many 'warning' messages about invalid certificates @@ -1008,6 +987,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None): except ImportError: logger.warn('[EPIC FAILURE] Cannot load the requests module') return "fail" + try: scraper = cfscrape.create_scraper() if site == 'WWT': @@ -1020,31 +1000,31 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None): #r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers) except Exception, e: logger.warn('Error fetching data from %s (%s): %s' % (site, url, e)) - if site == '32P': - logger.info('[TOR2CLIENT-32P] Retrying with 32P') - if mylar.CONFIG.MODE_32P == 1: + # if site == '32P': + # logger.info('[TOR2CLIENT-32P] Retrying with 32P') + # if mylar.CONFIG.MODE_32P == 1: - logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.') - feed32p = auth32p.info32p(reauthenticate=True) - feedinfo = feed32p.authenticate() + # logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.') + # feed32p = auth32p.info32p(reauthenticate=True) + # feedinfo = feed32p.authenticate() - if feedinfo == "disable": - helpers.disable_provider('32P') - return "fail" + # if feedinfo == "disable": + # helpers.disable_provider('32P') + # return "fail" - logger.debug('[TOR2CLIENT-32P] Creating CF Scraper') - scraper = cfscrape.create_scraper() + # logger.debug('[TOR2CLIENT-32P] Creating CF Scraper') + # scraper = cfscrape.create_scraper() - try: - r = scraper.get(url, params=payload, verify=verify, allow_redirects=True) - except Exception, e: - logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e)) - return "fail" - else: - logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P') - return "fail" - else: - return "fail" + # try: + # r = scraper.get(url, params=payload, verify=verify, allow_redirects=True) + # except Exception, e: + # logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e)) + # return "fail" + # else: + # logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P') + # return "fail" + # else: + # return "fail" if any([site == 'DEM', site == 'WWT']) and any([str(r.status_code) == '403', str(r.status_code) == '404', str(r.status_code) == '503']): if str(r.status_code) != '503': @@ -1069,15 +1049,6 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None): except Exception, e: return "fail" - if str(r.status_code) != '200': - logger.warn('Unable to download torrent from ' + site + ' [Status Code returned: ' + str(r.status_code) + ']') - if str(r.status_code) == '404' and site == '32P': - logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.') - delete_cache_entry(linkit) - else: - logger.info('content: %s' % r.content) - return "fail" - if any([site == 'DEM', site == 'WWT']): if r.headers.get('Content-Encoding') == 'gzip': buf = StringIO(r.content) @@ -1091,8 +1062,9 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None): logger.fdebug('[' + site + '] Saved torrent file to : ' + filepath) else: - #tpse is magnet links only... - filepath = linkit + if site != '32P': + #tpse is magnet links only... + filepath = linkit if mylar.USE_UTORRENT: uTC = utorrent.utorrentclient() From 6c487d5c54fb192dfd6c239a78e9f4feaca20798 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 5 Feb 2019 13:14:02 -0500 Subject: [PATCH 41/54] FIX: Added some additional error codes for pull-list retrieval, FIX: Fixed cache-cleanup not indicating how many items were successsfully cleaned --- mylar/config.py | 2 +- mylar/locg.py | 140 +++++++++++++++++++++++--------------------- mylar/weeklypull.py | 4 +- 3 files changed, 78 insertions(+), 68 deletions(-) diff --git a/mylar/config.py b/mylar/config.py index e5ca4151..233205f2 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -809,7 +809,7 @@ class Config(object): cntr+=1 if cntr > 1: - logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items') + logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items' % cntr) else: logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Nothing to clean!') diff --git a/mylar/locg.py b/mylar/locg.py index bd7b046c..05580e69 100755 --- a/mylar/locg.py +++ b/mylar/locg.py @@ -28,7 +28,7 @@ def locg(pulldate=None,weeknumber=None,year=None): if pulldate is None or pulldate == '00000000': weeknumber = todaydate.strftime("%U") elif '-' in pulldate: - #find the week number + #find the week number weektmp = datetime.date(*(int(s) for s in pulldate.split('-'))) weeknumber = weektmp.strftime("%U") #we need to now make sure we default to the correct week @@ -58,82 +58,90 @@ def locg(pulldate=None,weeknumber=None,year=None): logger.warn(e) return {'status': 'failure'} - if r.status_code == '619': + if str(r.status_code) == '619': logger.warn('[' + str(r.status_code) + '] No date supplied, or an invalid date was provided [' + str(pulldate) + ']') - return {'status': 'failure'} - elif r.status_code == '999' or r.status_code == '111': + return {'status': 'failure'} + elif str(r.status_code) == '999' or str(r.status_code) == '111': logger.warn('[' + str(r.status_code) + '] Unable to retrieve data from site - this is a site.specific issue [' + str(pulldate) + ']') - return {'status': 'failure'} + return {'status': 'failure'} + elif str(r.status_code) == '200': + data = r.json() - data = r.json() + logger.info('[WEEKLY-PULL] There are ' + str(len(data)) + ' issues for the week of ' + str(weeknumber) + ', ' + str(year)) + pull = [] - logger.info('[WEEKLY-PULL] There are ' + str(len(data)) + ' issues for the week of ' + str(weeknumber) + ', ' + str(year)) - pull = [] + for x in data: + pull.append({'series': x['series'], + 'alias': x['alias'], + 'issue': x['issue'], + 'publisher': x['publisher'], + 'shipdate': x['shipdate'], + 'coverdate': x['coverdate'], + 'comicid': x['comicid'], + 'issueid': x['issueid'], + 'weeknumber': x['weeknumber'], + 'annuallink': x['link'], + 'year': x['year'], + 'volume': x['volume'], + 'seriesyear': x['seriesyear'], + 'format': x['type']}) + shipdate = x['shipdate'] - for x in data: - pull.append({'series': x['series'], - 'alias': x['alias'], - 'issue': x['issue'], - 'publisher': x['publisher'], - 'shipdate': x['shipdate'], - 'coverdate': x['coverdate'], - 'comicid': x['comicid'], - 'issueid': x['issueid'], - 'weeknumber': x['weeknumber'], - 'annuallink': x['link'], - 'year': x['year'], - 'volume': x['volume'], - 'seriesyear': x['seriesyear'], - 'format': x['type']}) - shipdate = x['shipdate'] + myDB = db.DBConnection() - myDB = db.DBConnection() + myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)") - myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)") + #clear out the upcoming table here so they show the new values properly. + if pulldate == '00000000': + logger.info('Re-creating pullist to ensure everything\'s fresh.') + myDB.action('DELETE FROM weekly WHERE weeknumber=? AND year=?',[int(weeknumber), int(year)]) - #clear out the upcoming table here so they show the new values properly. - if pulldate == '00000000': - logger.info('Re-creating pullist to ensure everything\'s fresh.') - myDB.action('DELETE FROM weekly WHERE weeknumber=? AND year=?',[int(weeknumber), int(year)]) + for x in pull: + comicid = None + issueid = None + comicname = x['series'] + if x['comicid'] is not None: + comicid = x['comicid'] + if x['issueid'] is not None: + issueid= x['issueid'] + if x['alias'] is not None: + comicname = x['alias'] - for x in pull: - comicid = None - issueid = None - comicname = x['series'] - if x['comicid'] is not None: - comicid = x['comicid'] - if x['issueid'] is not None: - issueid= x['issueid'] - if x['alias'] is not None: - comicname = x['alias'] + cl_d = mylar.filechecker.FileChecker() + cl_dyninfo = cl_d.dynamic_replace(comicname) + dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip() - cl_d = mylar.filechecker.FileChecker() - cl_dyninfo = cl_d.dynamic_replace(comicname) - dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip() + controlValueDict = {'DYNAMICNAME': dynamic_name, + 'ISSUE': re.sub('#', '', x['issue']).strip()} - controlValueDict = {'DYNAMICNAME': dynamic_name, - 'ISSUE': re.sub('#', '', x['issue']).strip()} - - newValueDict = {'SHIPDATE': x['shipdate'], - 'PUBLISHER': x['publisher'], - 'STATUS': 'Skipped', - 'COMIC': comicname, - 'COMICID': comicid, - 'ISSUEID': issueid, - 'WEEKNUMBER': x['weeknumber'], - 'ANNUALLINK': x['annuallink'], - 'YEAR': x['year'], - 'VOLUME': x['volume'], - 'SERIESYEAR': x['seriesyear'], - 'FORMAT': x['format']} - myDB.upsert("weekly", newValueDict, controlValueDict) + newValueDict = {'SHIPDATE': x['shipdate'], + 'PUBLISHER': x['publisher'], + 'STATUS': 'Skipped', + 'COMIC': comicname, + 'COMICID': comicid, + 'ISSUEID': issueid, + 'WEEKNUMBER': x['weeknumber'], + 'ANNUALLINK': x['annuallink'], + 'YEAR': x['year'], + 'VOLUME': x['volume'], + 'SERIESYEAR': x['seriesyear'], + 'FORMAT': x['format']} + myDB.upsert("weekly", newValueDict, controlValueDict) - logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber)) - #set the last poll date/time here so that we don't start overwriting stuff too much... - mylar.CONFIG.PULL_REFRESH = todaydate + logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber)) + #set the last poll date/time here so that we don't start overwriting stuff too much... + mylar.CONFIG.PULL_REFRESH = todaydate - return {'status': 'success', - 'count': len(data), - 'weeknumber': weeknumber, - 'year': year} + return {'status': 'success', + 'count': len(data), + 'weeknumber': weeknumber, + 'year': year} + + else: + if str(r.status_code) == '666': + logger.warn('[%s] The error returned is: %s' % (r.status_code, r.headers)) + return {'status': 'update_required'} + else: + logger.warn('[%s] The error returned is: %s' % (r.status_code, r.headers)) + return {'status': 'failure'} diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index 29e50f9b..435be313 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -81,7 +81,9 @@ def pullit(forcecheck=None, weeknumber=None, year=None): elif chk_locg['status'] == 'success': logger.info('[PULL-LIST] Weekly Pull List successfully loaded with ' + str(chk_locg['count']) + ' issues.') return new_pullcheck(chk_locg['weeknumber'],chk_locg['year']) - + elif chk_log['status'] == 'update_required': + logger.warn('[PULL-LIST] Your version of Mylar is not up-to-date. You MUST update before this works') + return else: logger.info('[PULL-LIST] Unable to retrieve weekly pull-list. Dropping down to legacy method of PW-file') mylar.PULLBYFILE = pull_the_file(newrl) From 56706f2ac64ddab6471b0e332948e9e5816d070f Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 7 Feb 2019 12:46:46 -0500 Subject: [PATCH 42/54] FIX: Fix for refresh not honouring the alternate_latest_series_covers option properly, FIX: When adding a series, if the series had a start year with a ? in it, would error out, IMP: Added more columns to the ddl_info table --- mylar/__init__.py | 18 ++++++++++++- mylar/cv.py | 2 +- mylar/importer.py | 65 +++++++++++++++++++++++++++++++++-------------- 3 files changed, 64 insertions(+), 21 deletions(-) diff --git a/mylar/__init__.py b/mylar/__init__.py index fa050b3b..7380d200 100644 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -528,7 +528,7 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)') c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT, remote_filesize TEXT, updated_date TEXT, mainlink TEXT)') conn.commit c.close @@ -1099,6 +1099,22 @@ def dbcheck(): except sqlite3.OperationalError: c.execute('ALTER TABLE jobhistory ADD COLUMN status TEXT') + ## -- DDL_info Table -- + try: + c.execute('SELECT remote_filesize from ddl_info') + except sqlite3.OperationalError: + c.execute('ALTER TABLE ddl_info ADD COLUMN remote_filesize TEXT') + + try: + c.execute('SELECT updated_date from ddl_info') + except sqlite3.OperationalError: + c.execute('ALTER TABLE ddl_info ADD COLUMN updated_date TEXT') + + try: + c.execute('SELECT mainlink from ddl_info') + except sqlite3.OperationalError: + c.execute('ALTER TABLE ddl_info ADD COLUMN mainlink TEXT') + #if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere #prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the #value in the sql so we can display it in the details screen for everyone to wonder at. diff --git a/mylar/cv.py b/mylar/cv.py index 0eb85191..a94193dd 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -271,7 +271,7 @@ def GetComicInfo(comicid, dom, safechk=None): comic['ComicYear'] = '0000' #safety check, cause you known, dufus'... - if comic['ComicYear'][-1:] == '-': + if any([comic['ComicYear'][-1:] == '-', comic['ComicYear'][-1:] == '?']): comic['ComicYear'] = comic['ComicYear'][:-1] try: diff --git a/mylar/importer.py b/mylar/importer.py index bdbfcaa4..d599541a 100644 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -240,7 +240,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is False: PRComicImage = os.path.join('cache', str(comicid) + ".jpg") ComicImage = helpers.replacetheslash(PRComicImage) - if os.path.isfile(os.path.join(comlocation, 'cover.jpg')) is True: + if os.path.isfile(PRComicImage) is True: logger.fdebug('Cover already exists for series. Not redownloading.') else: covercheck = helpers.getImage(comicid, comic['ComicImage']) @@ -248,18 +248,15 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No logger.info('Attempting to retrieve alternate comic image for the series.') covercheck = helpers.getImage(comicid, comic['ComicImageALT']) - PRComicImage = os.path.join('cache', str(comicid) + ".jpg") - ComicImage = helpers.replacetheslash(PRComicImage) - - #if the comic cover local is checked, save a cover.jpg to the series folder. - if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]): - try: - comiclocal = os.path.join(comlocation, 'cover.jpg') - shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal) - if mylar.CONFIG.ENFORCE_PERMS: - filechecker.setperms(comiclocal) - except IOError as e: - logger.error('Unable to save cover (' + str(comiclocal) + ') into series directory (' + str(comlocation) + ') at this time.') + #if the comic cover local is checked, save a cover.jpg to the series folder. + if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(PRComicImage) is False]): + try: + comiclocal = os.path.join(comlocation, 'cover.jpg') + shutil.copyfile(PRComicImage, comiclocal) + if mylar.CONFIG.ENFORCE_PERMS: + filechecker.setperms(comiclocal) + except IOError as e: + logger.error('Unable to save cover (' + str(comiclocal) + ') into series directory (' + str(comlocation) + ') at this time.') else: ComicImage = None @@ -350,8 +347,21 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No if anndata: manualAnnual(annchk=anndata) - if all([mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True, lastissueid != importantdates['LatestIssueID']]): - image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage']) + if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True: #, lastissueid != importantdates['LatestIssueID']]): + if os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg') is True: + cover_modtime = datetime.datetime.utcfromtimestamp(os.path.getmtime(os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg'))) + cover_mtime = datetime.datetime.strftime(cover_modtime, '%Y-%m-%d') + if importantdates['LatestStoreDate'] != '0000-00-00': + lsd = re.sub('-', '', importantdates['LatestStoreDate']).strip() + else: + lsd = re.sub('-', '', importantdates['LatestDate']).strip() + if re.sub('-', '', cover_mtime).strip() < lsd: + logger.info('Attempting to retrieve new issue cover for display') + image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage']) + else: + logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID'])) + else: + image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage']) else: logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID'])) @@ -1070,6 +1080,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) latestiss = "0" latestdate = "0000-00-00" + latest_stdate = "0000-00-00" latestissueid = None firstiss = "10000000" firstdate = "2099-00-00" @@ -1241,8 +1252,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call latestissueid = issid if firstval['Issue_Date'] != '0000-00-00': latestdate = str(firstval['Issue_Date']) + latest_stdate = storedate else: latestdate = storedate + latest_stdate = storedate if firstval['Issue_Date'] < firstdate and firstval['Issue_Date'] != '0000-00-00': firstiss = issnum @@ -1290,7 +1303,12 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call styear = str(SeriesYear) if firstdate is not None: if SeriesYear != firstdate[:4]: - logger.fdebug('Series start date (%s) crosses over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV will say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate)) + if firstdate[:4] == '2099': + logger.fdebug('Series start date (%s) differs from First Issue start date as First Issue date is unknown - assuming Series Year as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear)) + else: + logger.fdebug('Series start date (%s) cannot be properly determined and/or it might cross over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate)) + if firstdate == '2099-00-00': + firstdate = '%s-01-01' % SeriesYear styear = str(firstdate[:4]) if firstdate[5:7] == '00': @@ -1320,7 +1338,15 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call if recentchk <= 55: lastpubdate = 'Present' else: - lastpubdate = str(ltmonth) + ' ' + str(ltyear) + if ltmonth == '?': + if ltyear == '0000': + lastpubdate = '?' + else: + lastpubdate = str(ltyear) + elif ltyear == '0000': + lastpubdate = '?' + else: + lastpubdate = str(ltmonth) + ' ' + str(ltyear) if stmonth == '?' and ('?' in lastpubdate and '0000' in lastpubdate): lastpubdate = 'Present' @@ -1357,6 +1383,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call importantdates['LatestIssue'] = latestiss importantdates['LatestIssueID'] = latestissueid importantdates['LatestDate'] = latestdate + importantdates['LatestStoreDate'] = latest_stdate importantdates['LastPubDate'] = lastpubdate importantdates['SeriesStatus'] = 'Active' @@ -1561,10 +1588,10 @@ def image_it(comicid, latestissueid, comlocation, ComicImage): ComicImage = helpers.replacetheslash(PRComicImage) #if the comic cover local is checked, save a cover.jpg to the series folder. - if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]): + if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(PRComicImage)]): try: comiclocal = os.path.join(comlocation, 'cover.jpg') - shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal) + shutil.copyfile(PRComicImage, comiclocal) if mylar.CONFIG.ENFORCE_PERMS: filechecker.setperms(comiclocal) except IOError as e: From 33c09924f67b980d3a3331e3f0c56c5841f5cb9b Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 7 Feb 2019 13:11:02 -0500 Subject: [PATCH 43/54] FIX:(#2184) Possible fix for correct issues being rejected from search results due to an invalid booktype --- mylar/search.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mylar/search.py b/mylar/search.py index c6679ce7..a99717c9 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1112,7 +1112,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa parsed_comic = p_comic.listFiles() logger.fdebug('parsed_info: %s' % parsed_comic) - if parsed_comic['parse_status'] == 'success' and (all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or all([booktype == 'One-Shot', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']): + if parsed_comic['parse_status'] == 'success' and (all([booktype is None, parsed_comic['booktype'] == 'issue']) or all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or all([booktype == 'One-Shot', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']): try: fcomic = filechecker.FileChecker(watchcomic=ComicName) filecomic = fcomic.matchIT(parsed_comic) @@ -1704,7 +1704,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False): UseFuzzy = None ComicVersion = comic['Volume'] TorrentID_32p = None - booktype = None + booktype = comic['Type'] else: Comicname_filesafe = comic['ComicName_Filesafe'] SeriesYear = comic['ComicYear'] @@ -1893,6 +1893,7 @@ def searchIssueIDList(issuelist): UseFuzzy = comic['UseFuzzy'] ComicVersion = comic['ComicVersion'] TorrentID_32p = comic['TorrentID_32P'] + booktype = comic['Type'] if issue['IssueDate'] == None: IssueYear = comic['ComicYear'] else: @@ -1902,7 +1903,7 @@ def searchIssueIDList(issuelist): else: AllowPacks = False - foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate']) + foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate'], booktype=booktype) if foundNZB['status'] is True: updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash']) logger.info('Completed search request.') From 63dc7bf0ba0637495d73dd7902e3fd4e26f61fc8 Mon Sep 17 00:00:00 2001 From: evilhero Date: Mon, 11 Feb 2019 11:21:16 -0500 Subject: [PATCH 44/54] FIX: Post-processing queue was not being initialized if the mylar API was not enabled --- mylar/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mylar/__init__.py b/mylar/__init__.py index 7380d200..958eca5c 100644 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -397,7 +397,7 @@ def start(): SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE") SEARCHPOOL.start() - if all([CONFIG.POST_PROCESSING is True, CONFIG.API_ENABLED is True]): + if CONFIG.POST_PROCESSING is True: logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....') PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE") PPPOOL.start() From d4e930d4a64ada38a0fdb1335c9e1fb334146022 Mon Sep 17 00:00:00 2001 From: evilhero Date: Mon, 11 Feb 2019 17:04:28 -0500 Subject: [PATCH 45/54] FIX:(#2195) When adding a TPB/GN series that is missing a html id within the description field would error when attempting to parse the description properly --- mylar/cv.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/mylar/cv.py b/mylar/cv.py index a94193dd..4922cb80 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -380,12 +380,16 @@ def GetComicInfo(comicid, dom, safechk=None): micdrop.append(mic_check['data-ref-id']) for fc in desclinks: - #logger.info('fc: %s' % fc) - fc_id = fc['data-ref-id'] - #logger.info('fc_id: %s' % fc_id) + try: + fc_id = fc['data-ref-id'] + except: + continue + if fc_id in micdrop: continue + fc_name = fc.findNext(text=True) + if fc_id.startswith('4000'): fc_cid = None fc_isid = fc_id @@ -432,7 +436,10 @@ def GetComicInfo(comicid, dom, safechk=None): #first_collect = cis logger.info('Collected issues in volume: %s' % issue_list) - comic['Issue_List'] = issue_list + if len(issue_list) == 0: + comic['Issue_List'] = 'None' + else: + comic['Issue_List'] = issue_list else: comic['Issue_List'] = 'None' From 954369f7b03520f144bd0c68ececfd49e750d288 Mon Sep 17 00:00:00 2001 From: Arathen Date: Tue, 5 Feb 2019 18:32:08 +1100 Subject: [PATCH 46/54] IMP: Change qBittorrent start-on-load from two-way force/pause start, to a three-way force/pause/default start. IMP: Add variable to distinguish between old and new WebAPI in qBittorrent --- data/interfaces/default/config.html | 16 ++++++++++++++-- mylar/config.py | 3 ++- mylar/torrent/clients/qbittorrent.py | 17 +++++++++++------ mylar/webserve.py | 7 +++++-- 4 files changed, 32 insertions(+), 11 deletions(-) diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 44d2eae5..0d301f41 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -703,9 +703,21 @@
Folder path where torrents will be assigned to +
+ Add torrent using client default action +
+
+ Add torrent and force start +
+
+ Add torrent paused +
- - Automatically start torrent on successful loading within qBittorrent client + qBittorrent "Add torrent" options +
+
+ + Use the new qBittorrent client WebAPI available since v4.1.0
diff --git a/mylar/config.py b/mylar/config.py index 233205f2..1e73ec27 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -348,7 +348,8 @@ _CONFIG_DEFINITIONS = OrderedDict({ 'QBITTORRENT_PASSWORD': (str, 'qBittorrent', None), 'QBITTORRENT_LABEL': (str, 'qBittorrent', None), 'QBITTORRENT_FOLDER': (str, 'qBittorrent', None), - 'QBITTORRENT_STARTONLOAD': (bool, 'qBittorrent', False), + 'QBITTORRENT_LOADACTION': (int, 'qBittorrent', 0), #0': client default, #1': force start, #2': add paused + 'QBITTORRENT_NEWAPI': (bool, 'qBittorrent', True), 'OPDS_ENABLE': (bool, 'OPDS', False), 'OPDS_AUTHENTICATION': (bool, 'OPDS', False), diff --git a/mylar/torrent/clients/qbittorrent.py b/mylar/torrent/clients/qbittorrent.py index 6643e023..e1fd4571 100644 --- a/mylar/torrent/clients/qbittorrent.py +++ b/mylar/torrent/clients/qbittorrent.py @@ -111,17 +111,22 @@ class TorrentClient(object): else: logger.debug('Successfully submitted for add via file. Verifying item is now on client.') - if mylar.CONFIG.QBITTORRENT_STARTONLOAD: - logger.info('attempting to start') - startit = self.client.force_start(hash) - logger.info('startit returned:' + str(startit)) - else: - logger.info('attempting to pause torrent incase it starts') + if mylar.CONFIG.QBITTORRENT_LOADACTION == 1: + logger.info('Attempting to force start torrent') + try: + startit = self.client.force_start(hash) + logger.info('startit returned:' + str(startit)) + except: + logger.warn('Unable to force start torrent - please check your client.') + elif mylar.CONFIG.QBITTORRENT_LOADACTION == 2: + logger.info('Attempting to pause torrent after loading') try: startit = self.client.pause(hash) logger.info('startit paused:' + str(startit)) except: logger.warn('Unable to pause torrent - possibly already paused?') + else: + logger.info('Client default add action selected. Doing nothing.') try: time.sleep(5) # wait 5 in case it's not populated yet. diff --git a/mylar/webserve.py b/mylar/webserve.py index 5a16da32..d3f36fbc 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -4739,7 +4739,10 @@ class WebInterface(object): "qbittorrent_password": mylar.CONFIG.QBITTORRENT_PASSWORD, "qbittorrent_label": mylar.CONFIG.QBITTORRENT_LABEL, "qbittorrent_folder": mylar.CONFIG.QBITTORRENT_FOLDER, - "qbittorrent_startonload": helpers.checked(mylar.CONFIG.QBITTORRENT_STARTONLOAD), + "qbittorrent_clientdefault": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 0), + "qbittorrent_forcestart": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 1), + "qbittorrent_addpaused": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 2), + "qbittorrent_newapi": helpers.checked(mylar.CONFIG.QBITTORRENT_NEWAPI), "blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR, "usenet_retention": mylar.CONFIG.USENET_RETENTION, "nzbsu": helpers.checked(mylar.CONFIG.NZBSU), @@ -5104,7 +5107,7 @@ class WebInterface(object): def configUpdate(self, **kwargs): checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search', 'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload', - 'enable_torrents', 'qbittorrent_startonload', 'enable_rss', 'nzbsu', 'nzbsu_verify', + 'enable_torrents', 'qbittorrent_newapi', 'enable_rss', 'nzbsu', 'nzbsu_verify', 'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_public', 'enable_32p', 'enable_torznab', 'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing', 'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts', From 96851510a5d5515ae35d5c7a3e1fdbb5a3676876 Mon Sep 17 00:00:00 2001 From: Arathen Date: Wed, 13 Feb 2019 00:46:12 +1100 Subject: [PATCH 47/54] IMP: Update all qBittorrent WebAPI paths for client v4.1.0+. IMP: New functions to add torrents to qBittorrent by URL and file. IMP: Remove variable to distinguish between old and new WebAPI in qBittorrent --- data/interfaces/default/config.html | 8 +- lib/qbittorrent/client.py | 178 +++++++++++++--------------- mylar/config.py | 1 - mylar/webserve.py | 3 +- 4 files changed, 87 insertions(+), 103 deletions(-) diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 0d301f41..d80baab9 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -704,21 +704,17 @@ Folder path where torrents will be assigned to
- Add torrent using client default action + Add torrent only
Add torrent and force start
- Add torrent paused + Add torrent and pause
qBittorrent "Add torrent" options
-
- - Use the new qBittorrent client WebAPI available since v4.1.0 -
diff --git a/lib/qbittorrent/client.py b/lib/qbittorrent/client.py index 73d8d753..6809144d 100644 --- a/lib/qbittorrent/client.py +++ b/lib/qbittorrent/client.py @@ -1,7 +1,6 @@ import requests import json - class LoginRequired(Exception): def __str__(self): return 'Please login first.' @@ -15,7 +14,7 @@ class Client(object): self.url = url session = requests.Session() - check_prefs = session.get(url+'query/preferences') + check_prefs = session.get(url+'api/v2/app/preferences') if check_prefs.status_code == 200: self._is_authenticated = True @@ -24,9 +23,9 @@ class Client(object): elif check_prefs.status_code == 404: self._is_authenticated = False raise RuntimeError(""" - This wrapper only supports qBittorrent applications - with version higher than 3.1.x. - Please use the latest qBittorrent release. + This wrapper only supports qBittorrent applications with + version higher than 4.1.0 (which implemented Web API v2.0). + Please use the latest qBittorrent release. """) else: @@ -104,7 +103,7 @@ class Client(object): :return: Response to login request to the API. """ self.session = requests.Session() - login = self.session.post(self.url+'login', + login = self.session.post(self.url+'api/v2/auth/login', data={'username': username, 'password': password}) if login.text == 'Ok.': @@ -116,7 +115,7 @@ class Client(object): """ Logout the current session. """ - response = self._get('logout') + response = self._get('api/v2/auth/logout') self._is_authenticated = False return response @@ -125,27 +124,20 @@ class Client(object): """ Get qBittorrent version. """ - return self._get('version/qbittorrent') + return self._get('api/v2/app/version') @property def api_version(self): """ Get WEB API version. """ - return self._get('version/api') - - @property - def api_min_version(self): - """ - Get minimum WEB API version. - """ - return self._get('version/api_min') + return self._get('api/v2/app/webapiVersion') def shutdown(self): """ Shutdown qBittorrent. """ - return self._get('command/shutdown') + return self._get('api/v2/app/shutdown') def torrents(self, **filters): """ @@ -157,6 +149,7 @@ class Client(object): :param reverse: Enable reverse sorting. :param limit: Limit the number of torrents returned. :param offset: Set offset (if less than 0, offset from end). + :param hashes: Filter by hashes. Can contain multiple hashes separated by |. :return: list() of torrent with matching filter. """ @@ -166,7 +159,7 @@ class Client(object): name = 'filter' if name == 'status' else name params[name] = value - return self._get('query/torrents', params=params) + return self._get('api/v2/torrents/info', params=params) def get_torrent(self, infohash): """ @@ -174,7 +167,7 @@ class Client(object): :param infohash: INFO HASH of the torrent. """ - return self._get('query/propertiesGeneral/' + infohash.lower()) + return self._get('api/v2/torrents/properties', params={'hash': infohash.lower()}) def get_torrent_trackers(self, infohash): """ @@ -182,7 +175,7 @@ class Client(object): :param infohash: INFO HASH of the torrent. """ - return self._get('query/propertiesTrackers/' + infohash.lower()) + return self._get('api/v2/torrents/trackers', params={'hash': infohash.lower()}) def get_torrent_webseeds(self, infohash): """ @@ -190,7 +183,7 @@ class Client(object): :param infohash: INFO HASH of the torrent. """ - return self._get('query/propertiesWebSeeds/' + infohash.lower()) + return self._get('api/v2/torrents/webseeds', params={'hash': infohash.lower()}) def get_torrent_files(self, infohash): """ @@ -198,14 +191,14 @@ class Client(object): :param infohash: INFO HASH of the torrent. """ - return self._get('query/propertiesFiles/' + infohash.lower()) + return self._get('api/v2/torrents/files', params={'hash': infohash.lower()}) @property def global_transfer_info(self): """ Get JSON data of the global transfer info of qBittorrent. """ - return self._get('query/transferInfo') + return self._get('api/v2/transfer/info') @property def preferences(self): @@ -228,7 +221,7 @@ class Client(object): qb.preferences() """ - prefs = self._get('query/preferences') + prefs = self._get('api/v2/app/preferences') class Proxy(Client): """ @@ -270,11 +263,11 @@ class Client(object): def sync(self, rid=0): """ Sync the torrents by supplied LAST RESPONSE ID. - Read more @ http://git.io/vEgXr + Read more @ https://git.io/fxgB8 :param rid: Response ID of last request. """ - return self._get('sync/maindata', params={'rid': rid}) + return self._get('api/v2/sync/maindata', params={'rid': rid}) def download_from_link(self, link, **kwargs): """ @@ -286,22 +279,20 @@ class Client(object): :return: Empty JSON data. """ - # old:new format - old_arg_map = {'save_path': 'savepath'} # , 'label': 'category'} - - # convert old option names to new option names - options = kwargs.copy() - for old_arg, new_arg in old_arg_map.items(): - if options.get(old_arg) and not options.get(new_arg): - options[new_arg] = options[old_arg] - - options['urls'] = link - - # workaround to send multipart/formdata request - # http://stackoverflow.com/a/23131823/4726598 - dummy_file = {'_dummy': (None, '_dummy')} - - return self._post('command/download', data=options, files=dummy_file) + # qBittorrent requires adds to be done with multipath/form-data + # POST requests for both URLs and .torrent files. Info on this + # can be found here, and here: + # http://docs.python-requests.org/en/master/user/quickstart/#post-a-multipart-encoded-file + # http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files + if isinstance(link, list): + links = '\n'.join(link) + else: + links = link + torrent_data = {} + torrent_data['urls'] = (None, links) + for k, v in kwargs.iteritems(): + torrent_data[k] = (None, v) + return self._post('api/v2/torrents/add', data=None, files=torrent_data) def download_from_file(self, file_buffer, **kwargs): """ @@ -313,18 +304,23 @@ class Client(object): :return: Empty JSON data. """ + # qBittorrent requires adds to be done with multipath/form-data + # POST requests for both URLs and .torrent files. Info on this + # can be found here, and here: + # http://docs.python-requests.org/en/master/user/quickstart/#post-a-multipart-encoded-file + # http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files if isinstance(file_buffer, list): - torrent_files = {} - for i, f in enumerate(file_buffer): - torrent_files.update({'torrents%s' % i: f}) + torrent_data = [] + for f in file_buffer: + fname = f.name + torrent_data.append(('torrents', (fname, f))) else: - torrent_files = {'torrents': file_buffer} + fname = file_buffer.name + torrent_data = [('torrents', (fname, file_buffer))] + for k, v in kwargs.iteritems(): + torrent_data.append((k, (None, v))) - data = kwargs.copy() - - if data.get('save_path'): - data.update({'savepath': data['save_path']}) - return self._post('command/upload', data=data, files=torrent_files) + return self._post('api/v2/torrents/add', data=None, files=torrent_data) def add_trackers(self, infohash, trackers): """ @@ -335,7 +331,7 @@ class Client(object): """ data = {'hash': infohash.lower(), 'urls': trackers} - return self._post('command/addTrackers', data=data) + return self._post('api/v2/torrents/addTrackers', data=data) @staticmethod def _process_infohash_list(infohash_list): @@ -356,13 +352,13 @@ class Client(object): :param infohash: INFO HASH of torrent. """ - return self._post('command/pause', data={'hash': infohash.lower()}) + return self._post('api/v2/torrents/pause', data={'hashes': infohash.lower()}) def pause_all(self): """ Pause all torrents. """ - return self._get('command/pauseAll') + return self._post('api/v2/torrents/pause', data={'hashes': 'all'}) def pause_multiple(self, infohash_list): """ @@ -371,18 +367,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/pauseAll', data=data) - - def set_label(self, infohash_list, label): - """ - Set the label on multiple torrents. - IMPORTANT: OLD API method, kept as it is to avoid breaking stuffs. - - :param infohash_list: Single or list() of infohashes. - """ - data = self._process_infohash_list(infohash_list) - data['label'] = label - return self._post('command/setLabel', data=data) + return self._post('api/v2/torrents/pause', data=data) def set_category(self, infohash_list, category): """ @@ -392,7 +377,7 @@ class Client(object): """ data = self._process_infohash_list(infohash_list) data['category'] = category - return self._post('command/setCategory', data=data) + return self._post('api/v2/torrents/setCategory', data=data) def resume(self, infohash): """ @@ -400,13 +385,13 @@ class Client(object): :param infohash: INFO HASH of torrent. """ - return self._post('command/resume', data={'hash': infohash.lower()}) + return self._post('api/v2/torrents/resume', data={'hashes': infohash.lower()}) def resume_all(self): """ Resume all torrents. """ - return self._get('command/resumeAll') + return self._get('api/v2/torrents/resume', data={'hashes': 'all'}) def resume_multiple(self, infohash_list): """ @@ -415,7 +400,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/resumeAll', data=data) + return self._post('api/v2/torrents/resume', data=data) def delete(self, infohash_list): """ @@ -424,16 +409,21 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/delete', data=data) + data['deleteFiles'] = 'false' + return self._post('api/v2/torrents/delete', data=data) def delete_permanently(self, infohash_list): """ Permanently delete torrents. + *** WARNING : This will instruct qBittorrent to delete files + *** from your hard disk. Use with caution. + :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/deletePerm', data=data) + data['deleteFiles'] = 'true' + return self._post('api/v2/torrents/delete', data=data) def recheck(self, infohash_list): """ @@ -442,7 +432,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/recheck', data=data) + return self._post('api/v2/torrents/recheck', data=data) def increase_priority(self, infohash_list): """ @@ -451,7 +441,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/increasePrio', data=data) + return self._post('api/v2/torrents/increasePrio', data=data) def decrease_priority(self, infohash_list): """ @@ -460,7 +450,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/decreasePrio', data=data) + return self._post('api/v2/torrents/decreasePrio', data=data) def set_max_priority(self, infohash_list): """ @@ -469,7 +459,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/topPrio', data=data) + return self._post('api/v2/torrents/topPrio', data=data) def set_min_priority(self, infohash_list): """ @@ -478,7 +468,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/bottomPrio', data=data) + return self._post('api/v2/torrents/bottomPrio', data=data) def set_file_priority(self, infohash, file_id, priority): """ @@ -488,7 +478,7 @@ class Client(object): :param file_id: ID of the file to set priority. :param priority: Priority level of the file. """ - if priority not in [0, 1, 2, 7]: + if priority not in [0, 1, 6, 7]: raise ValueError("Invalid priority, refer WEB-UI docs for info.") elif not isinstance(file_id, int): raise TypeError("File ID must be an int") @@ -497,7 +487,7 @@ class Client(object): 'id': file_id, 'priority': priority} - return self._post('command/setFilePrio', data=data) + return self._post('api/v2/torrents/filePrio', data=data) # Get-set global download and upload speed limits. @@ -505,7 +495,7 @@ class Client(object): """ Get global download speed limit. """ - return self._get('command/getGlobalDlLimit') + return self._get('api/v2/transfer/downloadLimit') def set_global_download_limit(self, limit): """ @@ -513,7 +503,7 @@ class Client(object): :param limit: Speed limit in bytes. """ - return self._post('command/setGlobalDlLimit', data={'limit': limit}) + return self._post('api/v2/transfer/setDownloadLimit', data={'limit': limit}) global_download_limit = property(get_global_download_limit, set_global_download_limit) @@ -522,7 +512,7 @@ class Client(object): """ Get global upload speed limit. """ - return self._get('command/getGlobalUpLimit') + return self._get('api/v2/transfer/uploadLimit') def set_global_upload_limit(self, limit): """ @@ -530,7 +520,7 @@ class Client(object): :param limit: Speed limit in bytes. """ - return self._post('command/setGlobalUpLimit', data={'limit': limit}) + return self._post('api/v2/transfer/setUploadLimit', data={'limit': limit}) global_upload_limit = property(get_global_upload_limit, set_global_upload_limit) @@ -543,7 +533,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/getTorrentsDlLimit', data=data) + return self._post('api/v2/torrents/downloadLimit', data=data) def set_torrent_download_limit(self, infohash_list, limit): """ @@ -554,7 +544,7 @@ class Client(object): """ data = self._process_infohash_list(infohash_list) data.update({'limit': limit}) - return self._post('command/setTorrentsDlLimit', data=data) + return self._post('api/v2/torrents/setDownloadLimit', data=data) def get_torrent_upload_limit(self, infohash_list): """ @@ -563,7 +553,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/getTorrentsUpLimit', data=data) + return self._post('api/v2/torrents/uploadLimit', data=data) def set_torrent_upload_limit(self, infohash_list, limit): """ @@ -574,26 +564,26 @@ class Client(object): """ data = self._process_infohash_list(infohash_list) data.update({'limit': limit}) - return self._post('command/setTorrentsUpLimit', data=data) + return self._post('api/v2/torrents/setUploadLimit', data=data) # setting preferences def set_preferences(self, **kwargs): """ Set preferences of qBittorrent. - Read all possible preferences @ http://git.io/vEgDQ + Read all possible preferences @ https://git.io/fx2Y9 :param kwargs: set preferences in kwargs form. """ json_data = "json={}".format(json.dumps(kwargs)) headers = {'content-type': 'application/x-www-form-urlencoded'} - return self._post('command/setPreferences', data=json_data, + return self._post('api/v2/app/setPreferences', data=json_data, headers=headers) def get_alternative_speed_status(self): """ Get Alternative speed limits. (1/0) """ - return self._get('command/alternativeSpeedLimitsEnabled') + return self._get('api/v2/transfer/speedLimitsMode') alternative_speed_status = property(get_alternative_speed_status) @@ -601,7 +591,7 @@ class Client(object): """ Toggle alternative speed limits. """ - return self._get('command/toggleAlternativeSpeedLimits') + return self._get('api/v2/transfer/toggleSpeedLimitsMode') def toggle_sequential_download(self, infohash_list): """ @@ -610,7 +600,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/toggleSequentialDownload', data=data) + return self._post('api/v2/torrents/toggleSequentialDownload', data=data) def toggle_first_last_piece_priority(self, infohash_list): """ @@ -619,7 +609,7 @@ class Client(object): :param infohash_list: Single or list() of infohashes. """ data = self._process_infohash_list(infohash_list) - return self._post('command/toggleFirstLastPiecePrio', data=data) + return self._post('api/v2/torrents/toggleFirstLastPiecePrio', data=data) def force_start(self, infohash_list, value=True): """ @@ -630,4 +620,4 @@ class Client(object): """ data = self._process_infohash_list(infohash_list) data.update({'value': json.dumps(value)}) - return self._post('command/setForceStart', data=data) + return self._post('api/v2/torrents/setForceStart', data=data) diff --git a/mylar/config.py b/mylar/config.py index 1e73ec27..069110bc 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -349,7 +349,6 @@ _CONFIG_DEFINITIONS = OrderedDict({ 'QBITTORRENT_LABEL': (str, 'qBittorrent', None), 'QBITTORRENT_FOLDER': (str, 'qBittorrent', None), 'QBITTORRENT_LOADACTION': (int, 'qBittorrent', 0), #0': client default, #1': force start, #2': add paused - 'QBITTORRENT_NEWAPI': (bool, 'qBittorrent', True), 'OPDS_ENABLE': (bool, 'OPDS', False), 'OPDS_AUTHENTICATION': (bool, 'OPDS', False), diff --git a/mylar/webserve.py b/mylar/webserve.py index d3f36fbc..9abccf5e 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -4742,7 +4742,6 @@ class WebInterface(object): "qbittorrent_clientdefault": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 0), "qbittorrent_forcestart": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 1), "qbittorrent_addpaused": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 2), - "qbittorrent_newapi": helpers.checked(mylar.CONFIG.QBITTORRENT_NEWAPI), "blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR, "usenet_retention": mylar.CONFIG.USENET_RETENTION, "nzbsu": helpers.checked(mylar.CONFIG.NZBSU), @@ -5107,7 +5106,7 @@ class WebInterface(object): def configUpdate(self, **kwargs): checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search', 'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload', - 'enable_torrents', 'qbittorrent_newapi', 'enable_rss', 'nzbsu', 'nzbsu_verify', + 'enable_torrents', 'enable_rss', 'nzbsu', 'nzbsu_verify', 'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_public', 'enable_32p', 'enable_torznab', 'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing', 'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts', From 47ea6024229fa1cafc1f98a18575037e6e37ea32 Mon Sep 17 00:00:00 2001 From: Arathen Date: Wed, 13 Feb 2019 00:52:36 +1100 Subject: [PATCH 48/54] FIX: Change qBt resume_all to a POST operation. --- lib/qbittorrent/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qbittorrent/client.py b/lib/qbittorrent/client.py index 6809144d..86ba54bf 100644 --- a/lib/qbittorrent/client.py +++ b/lib/qbittorrent/client.py @@ -391,7 +391,7 @@ class Client(object): """ Resume all torrents. """ - return self._get('api/v2/torrents/resume', data={'hashes': 'all'}) + return self._post('api/v2/torrents/resume', data={'hashes': 'all'}) def resume_multiple(self, infohash_list): """ From 1f895fd32dba7caed33f1591f404e875e95f3211 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 12 Feb 2019 10:41:35 -0500 Subject: [PATCH 49/54] FIX: Changed qbitorrent loadaction from radio option to dropdown option --- data/interfaces/default/config.html | 26 ++++++++++++++------------ mylar/config.py | 2 +- mylar/torrent/clients/qbittorrent.py | 4 ++-- mylar/webserve.py | 4 +--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index d80baab9..39aeb506 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -703,18 +703,20 @@
Folder path where torrents will be assigned to -
- Add torrent only -
-
- Add torrent and force start -
-
- Add torrent and pause -
-
- qBittorrent "Add torrent" options -
+
+ + +
diff --git a/mylar/config.py b/mylar/config.py index 069110bc..1a72220e 100644 --- a/mylar/config.py +++ b/mylar/config.py @@ -348,7 +348,7 @@ _CONFIG_DEFINITIONS = OrderedDict({ 'QBITTORRENT_PASSWORD': (str, 'qBittorrent', None), 'QBITTORRENT_LABEL': (str, 'qBittorrent', None), 'QBITTORRENT_FOLDER': (str, 'qBittorrent', None), - 'QBITTORRENT_LOADACTION': (int, 'qBittorrent', 0), #0': client default, #1': force start, #2': add paused + 'QBITTORRENT_LOADACTION': (str, 'qBittorrent', 'default'), #default, force_start, paused 'OPDS_ENABLE': (bool, 'OPDS', False), 'OPDS_AUTHENTICATION': (bool, 'OPDS', False), diff --git a/mylar/torrent/clients/qbittorrent.py b/mylar/torrent/clients/qbittorrent.py index e1fd4571..5be911e8 100644 --- a/mylar/torrent/clients/qbittorrent.py +++ b/mylar/torrent/clients/qbittorrent.py @@ -111,14 +111,14 @@ class TorrentClient(object): else: logger.debug('Successfully submitted for add via file. Verifying item is now on client.') - if mylar.CONFIG.QBITTORRENT_LOADACTION == 1: + if mylar.CONFIG.QBITTORRENT_LOADACTION == 'force_start': logger.info('Attempting to force start torrent') try: startit = self.client.force_start(hash) logger.info('startit returned:' + str(startit)) except: logger.warn('Unable to force start torrent - please check your client.') - elif mylar.CONFIG.QBITTORRENT_LOADACTION == 2: + elif mylar.CONFIG.QBITTORRENT_LOADACTION == 'pause': logger.info('Attempting to pause torrent after loading') try: startit = self.client.pause(hash) diff --git a/mylar/webserve.py b/mylar/webserve.py index 9abccf5e..50fb7a31 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -4739,9 +4739,7 @@ class WebInterface(object): "qbittorrent_password": mylar.CONFIG.QBITTORRENT_PASSWORD, "qbittorrent_label": mylar.CONFIG.QBITTORRENT_LABEL, "qbittorrent_folder": mylar.CONFIG.QBITTORRENT_FOLDER, - "qbittorrent_clientdefault": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 0), - "qbittorrent_forcestart": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 1), - "qbittorrent_addpaused": helpers.radio(mylar.CONFIG.QBITTORRENT_LOADACTION, 2), + "qbittorrent_loadaction": mylar.CONFIG.QBITTORRENT_LOADACTION, "blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR, "usenet_retention": mylar.CONFIG.USENET_RETENTION, "nzbsu": helpers.checked(mylar.CONFIG.NZBSU), From 1b2a2f5da3f96a436c7db8c13f7b838da2482341 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 12 Feb 2019 16:28:42 -0500 Subject: [PATCH 50/54] IMP: Added qBitorrrent test connection option and added some additional error logging --- data/interfaces/default/config.html | 64 ++++++++++++++++++++-------- mylar/torrent/clients/qbittorrent.py | 47 ++++++++++---------- mylar/webserve.py | 15 +++++++ 3 files changed, 87 insertions(+), 39 deletions(-) diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 39aeb506..fb0b58f2 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -683,15 +683,15 @@
- +
- +
- +
@@ -703,20 +703,24 @@
Folder path where torrents will be assigned to
-
- - -
+
+ + +
+
+ + +
@@ -2159,6 +2163,32 @@ $("#add_torznab").before(torformfields); }); + $('#qbittorrent_test').click(function () { + var imagechk = document.getElementById("qbittorrent_statusicon"); + var host = document.getElementById("qbittorrent_host").value; + var username = document.getElementById("qbittorrent_username").value; + var password = document.getElementById("qbittorrent_password").value; + $.get("testqbit", + { host: host, username: username, password: password }, + function(data){ + if (data.error != undefined) { + alert(data.error); + return; + } + $('#ajaxMsg').html("
"+data+"
"); + if ( data.indexOf("Successfully") > -1){ + imagechk.src = ""; + imagechk.src = "interfaces/default/images/success.png"; + imagechk.style.visibility = "visible"; + } else { + imagechk.src = ""; + imagechk.src = "interfaces/default/images/fail.png"; + imagechk.style.visibility = "visible"; + } + }); + $('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut(); + }); + function addAction() { $('#autoadd').append(''); }; diff --git a/mylar/torrent/clients/qbittorrent.py b/mylar/torrent/clients/qbittorrent.py index 5be911e8..1b97d714 100644 --- a/mylar/torrent/clients/qbittorrent.py +++ b/mylar/torrent/clients/qbittorrent.py @@ -11,29 +11,33 @@ class TorrentClient(object): def __init__(self): self.conn = None - def connect(self, host, username, password): + def connect(self, host, username, password, test=False): if self.conn is not None: return self.connect if not host: - return {'status': False} + return {'status': False, 'error': 'host not specified'} try: - logger.info(host) self.client = client.Client(host) except Exception as e: - logger.error('Could not create qBittorrent Object' + str(e)) - return {'status': False} + logger.error('Could not create qBittorrent Object %s' % e) + return {'status': False, 'error': e} else: try: self.client.login(username, password) except Exception as e: - logger.error('Could not connect to qBittorrent ' + host) + logger.error('Could not connect to qBittorrent: %s' % host) + return {'status': False, 'error': e} else: - return self.client + if test is True: + version = self.client.qbittorrent_version + return {'status': True, 'version': version} + else: + return self.client def find_torrent(self, hash): - logger.debug('Finding Torrent hash: ' + hash) + logger.debug('Finding Torrent hash: %s' % hash) torrent_info = self.get_torrent(hash) if torrent_info: return True @@ -41,11 +45,11 @@ class TorrentClient(object): return False def get_torrent(self, hash): - logger.debug('Getting Torrent info hash: ' + hash) + logger.debug('Getting Torrent info hash: %s' % hash) try: torrent_info = self.client.get_torrent(hash) except Exception as e: - logger.error('Could not get torrent info for ' + hash) + logger.error('Could not get torrent info for %s' % hash) return False else: logger.info('Successfully located information for torrent') @@ -55,7 +59,7 @@ class TorrentClient(object): def load_torrent(self, filepath): if not filepath.startswith('magnet'): - logger.info('filepath to torrent file set to : ' + filepath) + logger.info('filepath to torrent file set to : %s' % filepath) if self.client._is_authenticated is True: logger.info('Checking if Torrent Exists!') @@ -68,15 +72,15 @@ class TorrentClient(object): logger.debug('Magnet (load_torrent) initiating') else: hash = self.get_the_hash(filepath) - logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath))) + logger.debug('FileName (load_torrent): %s' % os.path.basename(filepath)) - logger.debug('Torrent Hash (load_torrent): "' + hash + '"') + logger.debug('Torrent Hash (load_torrent): "%s"' % hash) #Check if torrent already added if self.find_torrent(hash): logger.info('load_torrent: Torrent already exists!') - return {'status': False} + return {'status': False, 'error': 'Torrent already exists'} #should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download #multiple copies of the same issues that's already downloaded else: @@ -94,8 +98,8 @@ class TorrentClient(object): else: tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL)) except Exception as e: - logger.debug('Torrent not added') - return {'status': False} + logger.error('Torrent not added') + return {'status': False, 'error': e} else: logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.') else: @@ -106,8 +110,8 @@ class TorrentClient(object): else: tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL)) except Exception as e: - logger.debug('Torrent not added') - return {'status': False} + logger.error('Torrent not added') + return {'status': False, 'error': e} else: logger.debug('Successfully submitted for add via file. Verifying item is now on client.') @@ -115,14 +119,14 @@ class TorrentClient(object): logger.info('Attempting to force start torrent') try: startit = self.client.force_start(hash) - logger.info('startit returned:' + str(startit)) + logger.info('startit returned: %s' % startit) except: logger.warn('Unable to force start torrent - please check your client.') elif mylar.CONFIG.QBITTORRENT_LOADACTION == 'pause': logger.info('Attempting to pause torrent after loading') try: startit = self.client.pause(hash) - logger.info('startit paused:' + str(startit)) + logger.info('startit paused: %s' % startit) except: logger.warn('Unable to pause torrent - possibly already paused?') else: @@ -133,7 +137,7 @@ class TorrentClient(object): tinfo = self.get_torrent(hash) except Exception as e: logger.warn('Torrent was not added! Please check logs') - return {'status': False} + return {'status': False, 'error': e} else: logger.info('Torrent successfully added!') filelist = self.client.get_torrent_files(hash) @@ -165,6 +169,5 @@ class TorrentClient(object): metainfo = bencode.decode(torrent_file.read()) info = metainfo['info'] thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper() - logger.debug('Hash: ' + thehash) return thehash diff --git a/mylar/webserve.py b/mylar/webserve.py index 50fb7a31..8a25a58a 100644 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -5686,6 +5686,21 @@ class WebInterface(object): return "Successfully validated connection to %s" % host testrtorrent.exposed = True + def testqbit(self, host, username, password): + import torrent.clients.qbittorrent as QbitClient + qc = QbitClient.TorrentClient() + qclient = qc.connect(host, username, password, True) + if not qclient: + logger.warn('[qBittorrent] Could not establish connection to %s' % host) + return 'Error establishing connection to Qbittorrent' + else: + if qclient['status'] is False: + logger.warn('[qBittorrent] Could not establish connection to %s. Error returned:' % (host, qclient['error'])) + return 'Error establishing connection to Qbittorrent' + else: + logger.info('[qBittorrent] Successfully validated connection to %s [%s]' % (host, qclient['version'])) + return 'Successfully validated qBittorrent connection' + testqbit.exposed = True def testnewznab(self, name, host, ssl, apikey): result = helpers.newznab_test(name, host, ssl, apikey) From 40b466c31e0916b67f367cce8ed9effbbd0a6213 Mon Sep 17 00:00:00 2001 From: evilhero Date: Tue, 12 Feb 2019 19:32:33 -0500 Subject: [PATCH 51/54] FIX: fix for on-snatch notification message being messed up --- mylar/search.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/mylar/search.py b/mylar/search.py index a99717c9..3432d8e5 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1528,10 +1528,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff) updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov) notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov, True) - #notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov) else: notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], None, nzbprov, True) - #notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov) else: if alt_nzbname is None or alt_nzbname == '': @@ -1547,8 +1545,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa cyear = ComicYear else: cyear = comyear - #notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov) - notify_snatch(ComicName, sent_to, cyear, IssueNumber, nzbprov, False) + notify_snatch(sent_to, ComicName, cyear, IssueNumber, nzbprov, False) prov_count == 0 mylar.TMP_PROV = nzbprov @@ -2702,12 +2699,10 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip() updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff) #send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts) - #notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov) notify_snatch(sent_to, ComicName, comyear, IssueNumber, nzbprov, False) mylar.TMP_PROV = nzbprov return return_val -#def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov): def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack): if pack is False: snline = 'Issue snatched!' @@ -2730,7 +2725,7 @@ def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack): if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH: logger.info(u"Sending Pushover notification") pushover = notifiers.PUSHOVER() - pushover.notify(snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov) + pushover.notify(snline, snatched_nzb=snatched_name, prov=nzbprov, sent_to=sent_to) if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH: logger.info(u"Sending Boxcar notification") boxcar = notifiers.BOXCAR() From 041231a74bd28c179ddc1a48b14d5f58e0a4a5e4 Mon Sep 17 00:00:00 2001 From: evilhero Date: Thu, 14 Feb 2019 14:14:47 -0500 Subject: [PATCH 52/54] FIX: Experimental searches would not honour one-shot issues being searched without an issue number --- mylar/findcomicfeed.py | 26 +++++++++++++++++++------- mylar/search.py | 4 ++-- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/mylar/findcomicfeed.py b/mylar/findcomicfeed.py index fe2e2cd0..44ca4bb6 100755 --- a/mylar/findcomicfeed.py +++ b/mylar/findcomicfeed.py @@ -10,7 +10,7 @@ import mylar import unicodedata import urllib -def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix): +def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix, booktype=None): cName = searchName #clean up searchName due to webparse/redudant naming that would return too specific of results. @@ -39,7 +39,12 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix): encodeSearch = urllib.quote_plus(searchName) splitSearch = encodeSearch.split(" ") - if len(searchIssue) == 1: + tmpsearchIssue = searchIssue + + if any([booktype == 'One-Shot', booktype == 'TPB']): + tmpsearchIssue = '1' + loop = 4 + elif len(searchIssue) == 1: loop = 3 elif len(searchIssue) == 2: loop = 2 @@ -71,17 +76,24 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix): i = 1 while (i <= loop): if i == 1: - searchmethod = searchIssue + searchmethod = tmpsearchIssue elif i == 2: - searchmethod = '0' + searchIssue + searchmethod = '0' + tmpsearchIssue elif i == 3: - searchmethod = '00' + searchIssue + searchmethod = '00' + tmpsearchIssue + elif i == 4: + searchmethod = tmpsearchIssue else: break - joinSearch = "+".join(splitSearch) + "+" +searchmethod + if i == 4: + logger.fdebug('Now searching experimental for %s to try and ensure all the bases are covered' % cName) + joinSearch = "+".join(splitSearch) + else: + logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod) + joinSearch = "+".join(splitSearch) + "+" +searchmethod + - logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod) if mylar.CONFIG.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr" elif mylar.CONFIG.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz" diff --git a/mylar/search.py b/mylar/search.py index 3432d8e5..c4ba1b91 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -651,7 +651,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa if ComicName[:17] == '0-Day Comics Pack': searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7], 'torrentid_32p': None} else: - searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p} + searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p, 'booktype': booktype} #first we find the id on the serieslist of 32P #then we call the ajax against the id and issue# and volume (if exists) a = auth32p.info32p(searchterm=searchterm) @@ -805,7 +805,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa elif nzbprov == 'experimental': #bb = parseit.MysterBinScrape(comsearch[findloop], comyear) logger.info('sending %s to experimental search' % findcomic) - bb = findcomicfeed.Startit(findcomic, isssearch, comyear, ComicVersion, IssDateFix) + bb = findcomicfeed.Startit(findcomic, isssearch, comyear, ComicVersion, IssDateFix, booktype) # since the regexs in findcomicfeed do the 3 loops, lets force the exit after cmloopit == 1 From 86ef093076dd1e189410d9450bc9fee0b4960aa3 Mon Sep 17 00:00:00 2001 From: barbequesauce Date: Tue, 12 Feb 2019 11:51:32 -0500 Subject: [PATCH 53/54] IMP: Allow StoryArc datatables config to endure long term --- data/interfaces/default/storyarc_detail.html | 1 + 1 file changed, 1 insertion(+) diff --git a/data/interfaces/default/storyarc_detail.html b/data/interfaces/default/storyarc_detail.html index 687c5ca3..66e0eb8f 100755 --- a/data/interfaces/default/storyarc_detail.html +++ b/data/interfaces/default/storyarc_detail.html @@ -319,6 +319,7 @@ "sInfoFiltered":"(filtered from _MAX_ total items)"}, "iDisplayLength": 25, "sPaginationType": "full_numbers", + "stateDuration": 0, "aaSorting": [] }) resetFilters("item"); From a73fe4716606797c3f8902fe2d7f596f269da413 Mon Sep 17 00:00:00 2001 From: Arathen Date: Thu, 14 Feb 2019 17:55:59 +1100 Subject: [PATCH 54/54] FIX: Combine qBittorrent add-paused action with the initial add-torrent to fix pauses occasionally being lost. Revise the way args are passed to the QBT add torrent method. --- mylar/torrent/clients/qbittorrent.py | 34 ++++++++++++---------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/mylar/torrent/clients/qbittorrent.py b/mylar/torrent/clients/qbittorrent.py index 1b97d714..20203a6b 100644 --- a/mylar/torrent/clients/qbittorrent.py +++ b/mylar/torrent/clients/qbittorrent.py @@ -85,18 +85,22 @@ class TorrentClient(object): #multiple copies of the same issues that's already downloaded else: logger.info('Torrent not added yet, trying to add it now!') - if any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']): - down_dir = None - else: - down_dir = mylar.CONFIG.QBITTORRENT_FOLDER - logger.info('Forcing Download location to: %s' % down_dir) + # Build an arg dict based on user prefs. + addargs = {} + if not any([mylar.CONFIG.QBITTORRENT_LABEL is None, mylar.CONFIG.QBITTORRENT_LABEL == '', mylar.CONFIG.QBITTORRENT_LABEL == 'None']): + addargs.update( { 'category': str(mylar.CONFIG.QBITTORRENT_LABEL) } ) + logger.info('Setting download label to: %s' % mylar.CONFIG.QBITTORRENT_LABEL) + if not any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']): + addargs.update( { 'savepath': str(mylar.CONFIG.QBITTORRENT_FOLDER) } ) + logger.info('Forcing download location to: %s' % mylar.CONFIG.QBITTORRENT_FOLDER) + if mylar.CONFIG.QBITTORRENT_LOADACTION == 'pause': + addargs.update( { 'paused': 'true' } ) + logger.info('Attempting to add torrent in paused state') + if filepath.startswith('magnet'): try: - if down_dir is not None: - tid = self.client.download_from_link(filepath, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL)) - else: - tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL)) + tid = self.client.download_from_link(filepath, **addargs) except Exception as e: logger.error('Torrent not added') return {'status': False, 'error': e} @@ -105,10 +109,7 @@ class TorrentClient(object): else: try: torrent_content = open(filepath, 'rb') - if down_dir is not None: - tid = self.client.download_from_file(torrent_content, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL)) - else: - tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL)) + tid = self.client.download_from_file(torrent_content, **addargs) except Exception as e: logger.error('Torrent not added') return {'status': False, 'error': e} @@ -122,13 +123,6 @@ class TorrentClient(object): logger.info('startit returned: %s' % startit) except: logger.warn('Unable to force start torrent - please check your client.') - elif mylar.CONFIG.QBITTORRENT_LOADACTION == 'pause': - logger.info('Attempting to pause torrent after loading') - try: - startit = self.client.pause(hash) - logger.info('startit paused: %s' % startit) - except: - logger.warn('Unable to pause torrent - possibly already paused?') else: logger.info('Client default add action selected. Doing nothing.')