From f0305ec1aec7c733bd4455e138a1a8c77fce0d89 Mon Sep 17 00:00:00 2001 From: evilhero Date: Fri, 8 May 2015 22:39:28 -0400 Subject: [PATCH] IMP: 32P legacy mode (rss feed only) and Auth mode (user/pass) available now. With Auth mode, will monitor all of personal notification feeds, IMP: Changed access methods to 32P for retrieving links, FIX:(#1007) Fixed error for one-off downloading via weekly pull list, FIX: (#1004) Fix for post-processing errors (missing files), FIX: (#1001) Extra scripts & pre-scripts will now accept bash scripts, FIX: Annuals were being incorrectly tallied in Have total as well as not having status correctly updated, IMP: Fixed some discrepencies with the provider_order generation line, IMP: Made some additional adjustments to the file-checking to account for some additional variations in naming, FIX: Fixed traceback errors when folder monitor was running and wouldn't be able to complete the post-processing properly, FIX: Cleanup code and some other small fixes.... --- data/interfaces/default/config.html | 102 +++++++-- data/interfaces/default/manage.html | 25 +-- mylar/PostProcessor.py | 88 ++++++-- mylar/__init__.py | 127 +++++------ mylar/auth32p.py | 123 +++++++++++ mylar/cmtagmylar.py | 4 +- mylar/filechecker.py | 66 +++--- mylar/importer.py | 2 +- mylar/rsscheck.py | 127 +++++------ mylar/rsscheckit.py | 28 ++- mylar/search.py | 329 ++++++++++++++++++---------- mylar/updater.py | 32 ++- mylar/webserve.py | 18 +- mylar/weeklypull.py | 2 +- 14 files changed, 715 insertions(+), 358 deletions(-) create mode 100644 mylar/auth32p.py diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index eb6de81b..2f919918 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -166,10 +166,6 @@
*Use this if experiencing parsing problems* -
- -
*Use this if CV's URL has changed* -
@@ -609,7 +656,7 @@
- +
@@ -1126,7 +1173,29 @@ { $("#pushbulletoptions").slideUp(); } - + }); + + if ($("#legacymode_32p").is(":checked")) + { + $("#auth32p_options").hide(); + $("#legacy32p_options").show(); + } + else + { + $("#legacy32p_options").hide(); + $("#auth32p_options").show(); + } + + $('input[type=radio]').change(function(){ + if ($("#legacymode_32p").is(":checked")) + { + $("#auth32p_options").fadeOut("fast", function() { $("#legacy32p_options").fadeIn() }); + } + else + { + $("#legacy32p_options").fadeOut("fast", function() { $("#auth32p_options").fadeIn() }); + } + }); if ($("#nzb_downloader_sabnzbd").is(":checked")) { @@ -1143,7 +1212,6 @@ $("#sabnzbd_options,#nzbget_options").hide(); $("#blackhole_options").show(); } - }); $('input[type=radio]').change(function(){ if ($("#nzb_downloader_sabnzbd").is(":checked")) diff --git a/data/interfaces/default/manage.html b/data/interfaces/default/manage.html index 7d590477..5a5e94ff 100755 --- a/data/interfaces/default/manage.html +++ b/data/interfaces/default/manage.html @@ -143,20 +143,19 @@ <%def name="javascriptIncludes()"> - diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index 4f0e59db..75bc715e 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -24,6 +24,7 @@ import logging import mylar import subprocess import urllib2 +import sys from xml.dom.minidom import parseString @@ -80,7 +81,7 @@ class PostProcessor(object): def _log(self, message, level=logger.message): #level=logger.MESSAGE): """ - A wrapper for the internal logger which also keeps track of messages and saves them to a string for $ + A wrapper for the internal logger which also keeps track of messages and saves them to a string for sabnzbd post-processing logging functions. message: The string to log (unicode) level: The log level to use (optional) @@ -94,23 +95,39 @@ class PostProcessor(object): ep_obj: The object to use when calling the pre script """ + logger.fdebug("initiating pre script detection.") self._log("initiating pre script detection.") + logger.fdebug("mylar.PRE_SCRIPTS : " + mylar.PRE_SCRIPTS) self._log("mylar.PRE_SCRIPTS : " + mylar.PRE_SCRIPTS) # for currentScriptName in mylar.PRE_SCRIPTS: - currentScriptName = str(mylar.PRE_SCRIPTS).decode("string_escape") - self._log("pre script detected...enabling: " + str(currentScriptName)) + with open(mylar.PRE_SCRIPTS, 'r') as f: + first_line = f.readline() + + if mylar.PRE_SCRIPTS.endswith('.sh'): + shell_cmd = re.sub('#!','', first_line).strip() + if shell_cmd == '' or shell_cmd is None: + shell_cmd = '/bin/bash' + else: + #forces mylar to use the executable that it was run with to run the extra script. + shell_cmd = sys.executable + + currentScriptName = shell_cmd + ' ' + str(mylar.PRE_SCRIPTS).decode("string_escape") + logger.fdebug("pre script detected...enabling: " + str(currentScriptName)) # generate a safe command line string to execute the script and provide all the parameters script_cmd = shlex.split(currentScriptName, posix=False) + [str(nzb_name), str(nzb_folder), str(seriesmetadata)] + logger.fdebug("cmd to be executed: " + str(script_cmd)) self._log("cmd to be executed: " + str(script_cmd)) # use subprocess to run the command and capture output - self._log(u"Executing command "+str(script_cmd)) - self._log(u"Absolute path to script: "+script_cmd[0]) + logger.fdebug(u"Executing command "+str(script_cmd)) + logger.fdebug(u"Absolute path to script: "+script_cmd[0]) try: p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR) out, err = p.communicate() #@UnusedVariable + logger.fdebug(u"Script result: " + out) self._log(u"Script result: " + out) except OSError, e: + logger.warn(u"Unable to run pre_script: " + str(script_cmd)) self._log(u"Unable to run pre_script: " + str(script_cmd)) def _run_extra_scripts(self, nzb_name, nzb_folder, filen, folderp, seriesmetadata): @@ -119,23 +136,39 @@ class PostProcessor(object): ep_obj: The object to use when calling the extra script """ + logger.fdebug("initiating extra script detection.") self._log("initiating extra script detection.") + logger.fdebug("mylar.EXTRA_SCRIPTS : " + mylar.EXTRA_SCRIPTS) self._log("mylar.EXTRA_SCRIPTS : " + mylar.EXTRA_SCRIPTS) # for curScriptName in mylar.EXTRA_SCRIPTS: - curScriptName = str(mylar.EXTRA_SCRIPTS).decode("string_escape") - self._log("extra script detected...enabling: " + str(curScriptName)) + with open(mylar.EXTRA_SCRIPTS, 'r') as f: + first_line = f.readline() + + if mylar.EXTRA_SCRIPTS.endswith('.sh'): + shell_cmd = re.sub('#!','', first_line) + if shell_cmd == '' or shell_cmd is None: + shell_cmd = '/bin/bash' + else: + #forces mylar to use the executable that it was run with to run the extra script. + shell_cmd = sys.executable + + curScriptName = shell_cmd + ' ' + str(mylar.EXTRA_SCRIPTS).decode("string_escape") + logger.fdebug("extra script detected...enabling: " + str(curScriptName)) # generate a safe command line string to execute the script and provide all the parameters script_cmd = shlex.split(curScriptName) + [str(nzb_name), str(nzb_folder), str(filen), str(folderp), str(seriesmetadata)] + logger.fdebug("cmd to be executed: " + str(script_cmd)) self._log("cmd to be executed: " + str(script_cmd)) # use subprocess to run the command and capture output - self._log(u"Executing command "+str(script_cmd)) - self._log(u"Absolute path to script: "+script_cmd[0]) + logger.fdebug(u"Executing command "+str(script_cmd)) + logger.fdebug(u"Absolute path to script: "+script_cmd[0]) try: p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR) out, err = p.communicate() #@UnusedVariable + logger.fdebug(u"Script result: " + out) self._log(u"Script result: " + out) except OSError, e: + logger.warn(u"Unable to run extra_script: " + str(script_cmd)) self._log(u"Unable to run extra_script: " + str(script_cmd)) @@ -339,10 +372,19 @@ class PostProcessor(object): nzbname = re.sub(str(ext), '', str(nzbname)) #replace spaces - nzbname = re.sub(' ', '.', str(nzbname)) - nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname)) - nzbname = re.sub('[\&]', 'and', str(nzbname)) - nzbname = re.sub('_', '.', str(nzbname)) + # let's change all space to decimals for simplicity + logger.fdebug('[NZBNAME]: ' + nzbname) + #gotta replace & or escape it + nzbname = re.sub("\&", 'and', nzbname) + nzbname = re.sub('[\,\:\?\']', '', nzbname) + nzbname = re.sub('[\(\)]', ' ', nzbname) + logger.fdebug('[NZBNAME] nzbname (remove chars): ' + nzbname) + nzbname = re.sub('.cbr', '', nzbname).strip() + nzbname = re.sub('.cbz', '', nzbname).strip() + nzbname = re.sub('\s+',' ', nzbname) #make sure we remove the extra spaces. + logger.fdebug('[NZBNAME] nzbname (remove extensions, double spaces): ' + nzbname) + nzbname = re.sub('[\s\_]', '.', nzbname) + logger.fdebug('[NZBNAME] end nzbname (spaces to dots): ' + nzbname) logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname)) # if mylar.USE_NZBGET==1: @@ -1122,12 +1164,18 @@ class PostProcessor(object): class FolderCheck(): - def run(self): - module = '[FOLDER-CHECK]' + def __init__(self): + import Queue import PostProcessor, logger - #monitor a selected folder for 'snatched' files that haven't been processed - logger.info(module + ' Checking folder ' + mylar.CHECK_FOLDER + ' for newly snatched downloads') - PostProcess = PostProcessor.PostProcessor('Manual Run', mylar.CHECK_FOLDER) - result = PostProcess.Process() - logger.info(module + ' Finished checking for newly snatched downloads') + + self.module = '[FOLDER-CHECK]' + self.queue = Queue.Queue() + + def run(self): + #monitor a selected folder for 'snatched' files that haven't been processed + #junk the queue as it's not needed for folder monitoring, but needed for post-processing to run without error. + logger.info(self.module + ' Checking folder ' + mylar.CHECK_FOLDER + ' for newly snatched downloads') + PostProcess = PostProcessor('Manual Run', mylar.CHECK_FOLDER, queue=self.queue) + result = PostProcess.Process() + logger.info(self.module + ' Finished checking for newly snatched downloads') diff --git a/mylar/__init__.py b/mylar/__init__.py index 2c853b59..ec07d5f5 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -287,7 +287,6 @@ TAB_DIRECTORY = None STORYARCDIR = 0 COPY2ARCDIR = 0 -CVAPIFIX = 0 CVURL = None WEEKFOLDER = 0 LOCMOVE = 0 @@ -335,12 +334,14 @@ ENABLE_KAT = 0 KAT_PROXY = None ENABLE_32P = 0 -PASSKEY_32P = None +MODE_32P = None #0 = legacymode, #1 = authmode +KEYS_32P = None RSSFEED_32P = None -USERID_32P = None -AUTH_32P = None +PASSKEY_32P = None +USERNAME_32P = None +PASSWORD_32P = None AUTHKEY_32P = None - +FEEDINFO_32P = None SNATCHEDTORRENT_NOTIFY = 0 def CheckSection(sec): @@ -403,10 +404,10 @@ def initialize(): ENABLE_META, CMTAGGER_PATH, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, \ dbUpdateScheduler, searchScheduler, RSSScheduler, WeeklyScheduler, VersionScheduler, FolderMonitorScheduler, \ ENABLE_TORRENTS, MINSEEDS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \ - ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_KAT, KAT_PROXY, ENABLE_32P, PASSKEY_32P, RSSFEED_32P, AUTHKEY_32P, USERID_32P, AUTH_32P, SNATCHEDTORRENT_NOTIFY, \ + ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_KAT, KAT_PROXY, ENABLE_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, SNATCHEDTORRENT_NOTIFY, \ PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \ PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \ - PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CVAPIFIX, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \ + PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \ COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, ANNUALS_ON, CV_ONLY, CV_ONETIMER, WEEKFOLDER, UMASK if __INITIALIZED__: @@ -523,9 +524,6 @@ def initialize(): #default to ComicLocation GRABBAG_DIR = DESTINATION_DIR WEEKFOLDER = bool(check_setting_int(CFG, 'General', 'weekfolder', 0)) - CVAPIFIX = bool(check_setting_int(CFG, 'General', 'cvapifix', 0)) - if CVAPIFIX is None: - CVAPIFIX = 0 LOCMOVE = bool(check_setting_int(CFG, 'General', 'locmove', 0)) if LOCMOVE is None: LOCMOVE = 0 @@ -634,38 +632,52 @@ def initialize(): print 'Converting CBT settings to 32P - ENABLE_32P: ' + str(ENABLE_32P) else: ENABLE_32P = bool(check_setting_int(CFG, 'Torrents', 'enable_32p', 0)) - - CBT_PASSKEY = check_setting_str(CFG, 'Torrents', 'cbt_passkey', '-1') - if CBT_PASSKEY != '-1': - PASSKEY_32P = CBT_PASSKEY - print 'Converting CBT settings to 32P - PASSKEY_32P: ' + str(PASSKEY_32P) - else: - PASSKEY_32P = check_setting_str(CFG, 'Torrents', 'passkey_32p', '') + + MODE_32P = check_setting_int(CFG, 'Torrents', 'mode_32p', 0) + #legacy support of older config - reload into old values for consistency. + try: + if MODE_32P != 0 and MODE_32P != 1: + #default to Legacy mode + MODE_32P = 0 + except: + MODE_32P = 0 RSSFEED_32P = check_setting_str(CFG, 'Torrents', 'rssfeed_32p', '') + PASSKEY_32P = check_setting_str(CFG, 'Torrents', 'passkey_32p', '') - #parse out the keys. - if ENABLE_32P and len(RSSFEED_32P) > 1: - userid_st = RSSFEED_32P.find('&user') - userid_en = RSSFEED_32P.find('&',userid_st+1) - if userid_en == -1: - USERID_32P = RSSFEED_32P[userid_st+6:] - else: - USERID_32P = RSSFEED_32P[userid_st+6:userid_en] + if MODE_32P == 0 and RSSFEED_32P is not None: - auth_st = RSSFEED_32P.find('&auth') - auth_en = RSSFEED_32P.find('&',auth_st+1) - if auth_en == -1: - AUTH_32P = RSSFEED_32P[auth_st+6:] - else: - AUTH_32P = RSSFEED_32P[auth_st+6:auth_en] + #parse out the keys. + if ENABLE_32P and len(RSSFEED_32P) > 1: + userid_st = RSSFEED_32P.find('&user') + userid_en = RSSFEED_32P.find('&',userid_st+1) + if userid_en == -1: + USERID_32P = RSSFEED_32P[userid_st+6:] + else: + USERID_32P = RSSFEED_32P[userid_st+6:userid_en] - authkey_st = RSSFEED_32P.find('&authkey') - authkey_en = RSSFEED_32P.find('&',authkey_st+1) - if authkey_en == -1: - AUTHKEY_32P = RSSFEED_32P[authkey_st+9:] - else: - AUTHKEY_32P = RSSFEED_32P[authkey_st+9:authkey_en] + auth_st = RSSFEED_32P.find('&auth') + auth_en = RSSFEED_32P.find('&',auth_st+1) + if auth_en == -1: + AUTH_32P = RSSFEED_32P[auth_st+6:] + else: + AUTH_32P = RSSFEED_32P[auth_st+6:auth_en] + + authkey_st = RSSFEED_32P.find('&authkey') + authkey_en = RSSFEED_32P.find('&',authkey_st+1) + if authkey_en == -1: + AUTHKEY_32P = RSSFEED_32P[authkey_st+9:] + else: + AUTHKEY_32P = RSSFEED_32P[authkey_st+9:authkey_en] + + KEYS_32P = {} + KEYS_32P = {"user": USERID_32P, + "auth": AUTH_32P, + "authkey": AUTHKEY_32P, + "passkey": PASSKEY_32P} + + USERNAME_32P = check_setting_str(CFG, 'Torrents', 'username_32p', '') + PASSWORD_32P = check_setting_str(CFG, 'Torrents', 'password_32p', '') SNATCHEDTORRENT_NOTIFY = bool(check_setting_int(CFG, 'Torrents', 'snatchedtorrent_notify', 0)) @@ -856,7 +868,6 @@ def initialize(): TMPPR_NUM +=1 - #this isn't ready for primetime just yet... #print 'Provider Order is:' + str(PROV_ORDER) if PROV_ORDER is None: @@ -864,17 +875,14 @@ def initialize(): else: flatt_providers = [] for pro in PROV_ORDER: - #print pro - for key, value in pro.items(): - #print key, value - try: - flatt_providers.append(re.sub('cbt','32p',value)) - except TypeError: - #if the value is None (no Name specified for Newznab entry), break out now - continue + try: + provider_seq = re.sub('cbt','32p', pro['provider']) + flatt_providers.extend([pro['order_seq'], provider_seq]) + except TypeError: + #if the value is None (no Name specified for Newznab entry), break out now + continue PROVIDER_ORDER = list(itertools.izip(*[itertools.islice(flatt_providers, i, None, 2) for i in range(2)])) - #print 'text provider order is: ' + str(PROVIDER_ORDER) config_write() # update folder formats in the config & bump up config version @@ -971,18 +979,6 @@ def initialize(): except Exception, e: logger.error('Cannot connect to the database: %s' % e) - # With the addition of NZBGet, it's possible that both SAB and NZBget are unchecked initially. - # let's force default SAB. - #if NZB_DOWNLOADER == None: - # logger.info('No Download Option selected - default to SABnzbd.') - # NZB_DOWNLOADER = 0 - # USE_SABNZBD = 1 - #else: - # logger.info('nzb_downloader is set to : ' + str(NZB_DOWNLOADER)) - #if USE_NZBGET == 0 and USE_SABNZBD == 0 : - # logger.info('No Download Server option given - defaulting to SABnzbd.') - # USE_SABNZBD = 1 - # Get the currently installed version - returns None, 'win32' or the git hash # Also sets INSTALL_TYPE variable to 'win', 'git' or 'source' CURRENT_VERSION = versioncheck.getVersion() @@ -1027,13 +1023,8 @@ def initialize(): else: logger.info('Synology Parsing Fix already implemented. No changes required at this time.') - #CV sometimes points to the incorrect DNS - here's the fix. - if CVAPIFIX == 1: - CVURL = 'http://beta.comicvine.com/api/' - logger.info('CVAPIFIX enabled: ComicVine set to beta API site') - else: - CVURL = 'http://api.comicvine.com/' - logger.info('CVAPIFIX disabled: Comicvine set to normal API site') + #set the default URL for ComicVine API here. + CVURL = 'http://api.comicvine.com/' if LOCMOVE: helpers.updateComicLocation() @@ -1202,7 +1193,6 @@ def config_write(): new_config['General']['annuals_on'] = int(ANNUALS_ON) new_config['General']['cv_only'] = int(CV_ONLY) new_config['General']['cv_onetimer'] = int(CV_ONETIMER) - new_config['General']['cvapifix'] = int(CVAPIFIX) new_config['General']['check_github'] = int(CHECK_GITHUB) new_config['General']['check_github_on_startup'] = int(CHECK_GITHUB_ON_STARTUP) new_config['General']['check_github_interval'] = CHECK_GITHUB_INTERVAL @@ -1322,8 +1312,11 @@ def config_write(): new_config['Torrents']['enable_kat'] = int(ENABLE_KAT) new_config['Torrents']['kat_proxy'] = KAT_PROXY new_config['Torrents']['enable_32p'] = int(ENABLE_32P) + new_config['Torrents']['mode_32p'] = int(MODE_32P) new_config['Torrents']['passkey_32p'] = PASSKEY_32P new_config['Torrents']['rssfeed_32p'] = RSSFEED_32P + new_config['Torrents']['username_32p'] = USERNAME_32P + new_config['Torrents']['password_32p'] = PASSWORD_32P new_config['Torrents']['snatchedtorrent_notify'] = int(SNATCHEDTORRENT_NOTIFY) new_config['SABnzbd'] = {} #new_config['SABnzbd']['use_sabnzbd'] = int(USE_SABNZBD) diff --git a/mylar/auth32p.py b/mylar/auth32p.py new file mode 100644 index 00000000..d43557a5 --- /dev/null +++ b/mylar/auth32p.py @@ -0,0 +1,123 @@ +import urllib2 +import lib.requests as requests +import re +from bs4 import BeautifulSoup +import mylar +from mylar import logger + +class info32p(object): + + def __init__(self, reauthenticate=False, searchterm=None): + + self.module = '[32P-AUTHENTICATION]' + self.url = 'https://32pag.es/login.php' + self.payload = {'username': mylar.USERNAME_32P, + 'password': mylar.PASSWORD_32P} + self.headers = {'Content-type': 'application/x-www-form-urlencoded', + 'Accept-Charset': 'utf-8', + 'User-Agent': 'Mozilla/5.0'} + self.reauthenticate = reauthenticate + self.searchterm = searchterm + + def authenticate(self): + + feedinfo = [] + + with requests.session() as s: + verify = True + + if not verify: + #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$ + from lib.requests.packages.urllib3.exceptions import InsecureRequestWarning + requests.packages.urllib3.disable_warnings(InsecureRequestWarning) + + + # fetch the login page + + s.headers = self.headers + try: + s.get(self.url, verify=verify) + except requests.exceptions.SSLError as e: + logger.error(self.module + ' Unable to establish connection to 32P: ' + e) + + # post to the login form + r = s.post(self.url, data=self.payload, verify=verify) + + #need a way to find response code (200=OK), but returns 200 for everything even failed signons (returns a blank page) + #logger.info('[32P] response: ' + str(r.content)) + + if self.searchterm: + logger.info('[32P] Successfully authenticated. Initiating search for : ' + self.searchterm) + return self.search32p(s) + soup = BeautifulSoup(r.content) + all_script = soup.find_all("script", {"src":False}) + all_script2 = soup.find_all("link", {"rel":"alternate"}) + + for ind_s in all_script: + all_value = str(ind_s) + all_items = all_value.split() + auth_found = False + user_found = False + for al in all_items: + if al == 'authkey': + auth_found = True + elif auth_found == True and al != '=': + authkey = re.sub('["/;]','', al).strip() + auth_found = False + logger.fdebug(self.module + ' Authkey found: ' + str(authkey)) + if al == 'userid': + user_found = True + elif user_found == True and al != '=': + userid = re.sub('["/;]','', al).strip() + user_found = False + logger.fdebug(self.module + ' Userid found: ' + str(userid)) + + authfound = False + logger.info(self.module + ' Atttempting to integrate with all of your 32P Notification feeds.') + + for al in all_script2: + alurl = al['href'] + if 'auth=' in alurl and 'torrents_notify' in alurl and not authfound: + f1 = alurl.find('auth=') + f2 = alurl.find('&',f1+1) + auth = alurl[f1+5:f2] + logger.fdebug(self.module + ' Auth:' + str(auth)) + authfound = True + p1 = alurl.find('passkey=') + p2 = alurl.find('&',p1+1) + passkey = alurl[p1+8:p2] + logger.fdebug(self.module + ' Passkey:' + str(passkey)) + if self.reauthenticate: break + + if 'torrents_notify' in alurl and ('torrents_notify_' + str(passkey)) not in alurl: + notifyname_st = alurl.find('name=') + notifyname_en = alurl.find('&',notifyname_st+1) + if notifyname_en == -1: notifyname_en = len(alurl) + notifyname = alurl[notifyname_st+5:notifyname_en] + notifynumber_st = alurl.find('torrents_notify_') + notifynumber_en = alurl.find('_', notifynumber_st+17) + notifynumber = alurl[notifynumber_st:notifynumber_en] + logger.fdebug(self.module + ' [NOTIFICATION: ' + str(notifyname) + '] Notification ID: ' + str(notifynumber)) + + #generate the rss-url here + feedinfo.append({'feed': notifynumber + '_' + str(passkey), + 'feedname': notifyname, + 'user': userid, + 'auth': auth, + 'passkey': passkey, + 'authkey': authkey}) + + #set the keys here that will be used to download. + mylar.PASSKEY_32P = passkey + mylar.AUTHKEY_32P = authkey # probably not needed here. + mylar.KEYS_32P = {} + mylar.KEYS_32P = {"user": userid, + "auth": auth, + "passkey": passkey, + "authkey": authkey} + + if self.reauthenticate: + return + else: + mylar.FEEDINFO_32P = feedinfo + return feedinfo diff --git a/mylar/cmtagmylar.py b/mylar/cmtagmylar.py index 051d8f5b..035717f3 100644 --- a/mylar/cmtagmylar.py +++ b/mylar/cmtagmylar.py @@ -452,9 +452,9 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file #shutil.move( nfilename, os.path.join(os.path.abspath(dirName),file_n)) logger.fdebug(module + ' Sucessfully moved file from temporary path.') except: - logger.error(module + ' Unable to move file from temporary path. Deletion of temporary path halted.') + logger.error(module + ' Unable to move file from temporary path [' + os.path.join(comicpath, nfilename) + ']. Deletion of temporary path halted.') logger.error(module + ' attempt to move: ' + os.path.join(comicpath, nfilename) + ' to ' + os.path.join(os.path.abspath(file_dir), file_n)) - return os.path.join(comicpath, nfilename) + return os.path.join(os.path.abspath(file_dir), file_n) #os.path.join(comicpath, nfilename) i = 0 diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 57e1dcb9..1557a31a 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -147,13 +147,18 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non vers4vol = volrem break elif subit.lower()[:3] == 'vol': - #if in format vol.2013 etc - #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely - logger.fdebug('[FILECHECKER] volume indicator detected as version #:' + str(subit)) - subname = re.sub(subit, '', subname) - volrem = subit - vers4year = "yes" - + tsubit = re.sub('vol','', subit.lower()) + try: + if any( [ tsubit.isdigit(), len(tsubit) > 5 ] ): + #if in format vol.2013 etc + #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely + logger.fdebug('[FILECHECKER] volume indicator detected as version #:' + str(subit)) + subname = re.sub(subit, '', subname) + volrem = subit + vers4year = "yes" + except: + continue + #check if a year is present in series title (ie. spider-man 2099) #also check if decimal present in series title (ie. batman beyond 2.0) #- check if brackets present in series title @@ -192,22 +197,28 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non bracket_length_st = watchcomic.find('(') bracket_length_en = watchcomic.find(')', bracket_length_st) bracket_length = bracket_length_en - bracket_length_st - bracket_word = watchcomic[bracket_length_st:bracket_length_en] + bracket_word = watchcomic[bracket_length_st:bracket_length_en+1] logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracket_word)) logger.fdebug('[FILECHECKER] numberinseries: ' + str(numberinseries)) logger.fdebug('[FILECHECKER] decimalinseries: ' + str(decimalinseries)) logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracketsinseries)) + #iniitate the alternate list here so we can add in the different flavours based on above + AS_Alt = [] + #remove the brackets.. if bracketsinseries == 'True': logger.fdebug('[FILECHECKER] modifying subname to accomodate brackets within series title.') - subnm_mod2 = re.findall('[^()]+', subname[bracket_length_en:]) - #logger.fdebug('[FILECHECKER] subnm_mod : ' + subnm_mod2) - - subnm_mod = re.sub('[\(\)]',' ', subname[:bracket_length_en]) + str(subname[bracket_length_en+1:]) - #logger.fdebug('[FILECHECKER] modified subname is now : ' + subnm_mod) - subname = subnm_mod + #subnm_mod2 = re.findall('[^()]+', subname[bracket_length_en:]) + #logger.fdebug('[FILECHECKER] subnm_mod : ' + str(subnm_mod2)) + #subnm_mod = re.sub('[\(\)]',' ', subname[:bracket_length_st]) + str(subname[bracket_length_en:]) + #logger.fdebug('[FILECHECKER] subnm_mod_st: ' + str(subname[:bracket_length_st])) + #logger.fdebug('[FILECHECKER] subnm_mod_en: ' + str(subname[bracket_length_en:])) + #logger.fdebug('[FILECHECKER] modified subname is now : ' + str(subnm_mod)) + if bracket_word in subname: + nobrackets_word = re.sub('[\(\)]','', bracket_word).strip() + subname = re.sub(nobrackets_word, '', subname).strip() subnm = re.findall('[^()]+', subname) logger.fdebug('[FILECHECKER] subnm len : ' + str(len(subnm))) @@ -514,7 +525,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non detectthe_sub = True subname = re.sub('\s+', ' ', subname).strip() - AS_Alt = [] + #AS_Alt = [] AS_Tuple = [] if AlternateSearch is not None: chkthealt = AlternateSearch.split('##') @@ -758,7 +769,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non justthedigits = justthedigits_1.split(' ', 1)[0] digitsvalid = "false" - if not justthedigits.isdigit(): + if not justthedigits.isdigit() and 'annual' not in justthedigits.lower(): logger.fdebug('[FILECHECKER] Invalid character found in filename after item removal - cannot find issue # with this present. Temporarily removing it from the comparison to be able to proceed.') try: justthedigits = justthedigits_1.split(' ', 1)[1] @@ -767,17 +778,18 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non except: pass - if not digitsvalid: - for jdc in list(justthedigits): - if not jdc.isdigit(): - jdc_start = justthedigits.find(jdc) - alpha_isschk = justthedigits[jdc_start:] - for issexcept in issue_exceptions: - if issexcept.lower() in alpha_isschk.lower() and len(alpha_isschk) <= len(issexcept): - logger.fdebug('[FILECHECKER] ALPHANUMERIC EXCEPTION : [' + justthedigits + ']') - digitsvalid = "true" - break - if digitsvalid == "true": break + if digitsvalid == "false": + if 'annual' not in justthedigits.lower(): + for jdc in list(justthedigits): + if not jdc.isdigit(): + jdc_start = justthedigits.find(jdc) + alpha_isschk = justthedigits[jdc_start:] + for issexcept in issue_exceptions: + if issexcept.lower() in alpha_isschk.lower() and len(alpha_isschk) <= len(issexcept): + logger.fdebug('[FILECHECKER] ALPHANUMERIC EXCEPTION : [' + justthedigits + ']') + digitsvalid = "true" + break + if digitsvalid == "true": break try: tmpthedigits = justthedigits_1.split(' ', 1)[1] diff --git a/mylar/importer.py b/mylar/importer.py index 503ae469..5247c208 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -30,7 +30,7 @@ import sqlite3 import cherrypy import mylar -from mylar import logger, helpers, db, mb, albumart, cv, parseit, filechecker, search, updater, moveit, comicbookdb +from mylar import logger, helpers, db, mb, cv, parseit, filechecker, search, updater, moveit, comicbookdb def is_exists(comicid): diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index 297b903a..bed3791f 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -10,7 +10,7 @@ import gzip from StringIO import StringIO import mylar -from mylar import db, logger, ftpsshup, helpers +from mylar import db, logger, ftpsshup, helpers, auth32p def _start_newznab_attr(self, attrsD): @@ -29,54 +29,10 @@ def _start_newznab_attr(self, attrsD): feedparser._FeedParserMixin._start_newznab_attr = _start_newznab_attr -def tehMain(forcerss=None): - logger.info('RSS Feed Check was last run at : ' + str(mylar.RSS_LASTRUN)) - firstrun = "no" - #check the last run of rss to make sure it's not hammering. - if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0' or forcerss == True: - logger.info('RSS Feed Check First Ever Run.') - firstrun = "yes" - mins = 0 - else: - c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN, "%Y-%m-%d %H:%M:%S") - n_date = datetime.datetime.now() - absdiff = abs(n_date - c_obj_date) - mins = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0 #3600 is for hours. - - if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL): - logger.fdebug('RSS Check has taken place less than the threshold - not initiating at this time.') - return - - mylar.RSS_LASTRUN = helpers.now() - logger.fdebug('Updating RSS Run time to : ' + str(mylar.RSS_LASTRUN)) - mylar.config_write() - - #function for looping through nzbs/torrent feed - if mylar.ENABLE_TORRENT_SEARCH: #and mylar.ENABLE_TORRENTS: - logger.info('[RSS] Initiating Torrent RSS Check.') - if mylar.ENABLE_KAT: - logger.info('[RSS] Initiating Torrent RSS Feed Check on KAT.') - torrents(pickfeed='3') - torrents(pickfeed='6') - if mylar.ENABLE_CBT: - logger.info('[RSS] Initiating Torrent RSS Feed Check on CBT.') - torrents(pickfeed='1') - #torrents(pickfeed='4') - logger.info('[RSS] Initiating RSS Feed Check for NZB Providers.') - nzbs(forcerss=forcerss) - logger.info('[RSS] RSS Feed Check/Update Complete') - logger.info('[RSS] Watchlist Check for new Releases') - mylar.search.searchforissue(rsscheck='yes') - logger.info('[RSS] Watchlist Check complete.') - if forcerss: - logger.info('Successfully ran RSS Force Check.') - return - -def torrents(pickfeed=None,seriesname=None,issue=None): +def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None): if pickfeed is None: return - passkey = mylar.PASSKEY_32P srchterm = None if seriesname: @@ -121,46 +77,53 @@ def torrents(pickfeed=None,seriesname=None,issue=None): feedtype = None if pickfeed == "1" and mylar.ENABLE_32P: # 32pages new releases feed. - if mylar.RSSFEED_32P is None or mylar.RSSFEED_32P == 'None' or mylar.RSSFEED_32P == '': - logger.error('[RSS] Warning - you NEED to enter in an RSS Feed URL for 32P in order to use the rss feeds (it can be any feed, it just needs ALL of your keys in the link)') + if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ): + logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.') lp=+1 continue - feed = 'http://32pag.es/feeds.php?feed=torrents_all&user=' + mylar.USERID_32P + '&auth=' + mylar.AUTH_32P + '&passkey=' + mylar.PASSKEY_32P + '&authkey=' + mylar.AUTHKEY_32P + feed = 'https://32pag.es/feeds.php?feed=torrents_all&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] feedtype = ' from the New Releases RSS Feed for comics' elif pickfeed == "2" and srchterm is not None: # kat.ph search feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1" elif pickfeed == "3": # kat.ph rss feed feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1" feedtype = ' from the New Releases RSS Feed for comics' - elif pickfeed == "4": #cbt follow link - feed = "http://comicbt.com/rss.php?action=follow&passkey=" + str(passkey) + "&type=dl" - feedtype = ' from your 32P Followlist RSS Feed' + elif pickfeed == "4": #32p search + if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ): + logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.') + lp=+1 + continue + if mylar.MODE_32P == 0: + logger.warn('[32P] Searching is not available in 32p Legacy mode. Switch to Auth mode to use the search functionality.') + lp=+1 + continue + return elif pickfeed == "5" and srchterm is not None: # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized) feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Aother%20seeds%3A1/?rss=1" elif pickfeed == "6": # kat.ph rss feed (category:other so that we can get them quicker if need-be) feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1" feedtype = ' from the New Releases for category Other RSS Feed that contain comics' - elif pickfeed == "7": # 32p series link -# seriespage = "http://comicbt.com/series.php?passkey=" + str(passkey) - feed = "http://comicbt.com/rss.php?action=series&series=" + str(seriesno) + "&passkey=" + str(passkey) + elif int(pickfeed) >=7 and feedinfo is not None: + #personal 32P notification feeds. + #get the info here + feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname'] + feedtype = ' from your Personal Notification Feed : ' + feedinfo['feedname'] + else: logger.error('invalid pickfeed denoted...') return - #print 'feed URL: ' + str(feed) + #logger.info('feed URL: ' + str(feed)) - if pickfeed == "7": # we need to get the series # first - seriesSearch(seriespage, seriesname) - feedme = feedparser.parse(feed) if pickfeed == "3" or pickfeed == "6" or pickfeed == "2" or pickfeed == "5": picksite = 'KAT' - elif pickfeed == "1" or pickfeed == "4": + elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7: picksite = '32P' i = 0 - + logger.fdebug('results: ' + str(feedme)) for entry in feedme['entries']: if pickfeed == "3" or pickfeed == "6": tmpsz = feedme.entries[i].enclosures[0] @@ -182,8 +145,8 @@ def torrents(pickfeed=None,seriesname=None,issue=None): 'size': tmpsz['length'] }) - elif pickfeed == "1" or pickfeed == "4": - if pickfeed == "1": + elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7: + if pickfeed == "1" or int(pickfeed) > 7: tmpdesc = feedme.entries[i].description st_pub = feedme.entries[i].title.find('(') st_end = feedme.entries[i].title.find(')') @@ -577,8 +540,9 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None): if tor['Site'] == '32P': st_pub = rebuiltline.find('(') - st_end = rebuiltline.find(')') - rebuiltline = rebuiltline[st_end+1:] + if st_pub < 2 and st_pub != -1: + st_end = rebuiltline.find(')') + rebuiltline = rebuiltline[st_end+1:] tortheinfo.append({ 'title': rebuiltline, #cttitle, @@ -731,7 +695,20 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site): if site == '32P': url = 'https://32pag.es/torrents.php' - verify = False + verify = True + + if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ): + logger.error('[RSS] Unable to sign-on to 32P to validate settings and initiate download sequence. Please enter/check your username password in the configuration.') + return "fail" + elif mylar.PASSKEY_32P is None or mylar.AUTHKEY_32P is None or mylar.KEYS_32P is None: + if mylar.MODE_32P == 1: + feed32p = auth32p.info32p(reauthenticate=True) + feedinfo = feed32p.authenticate() + else: + logger.warn('[32P] Unavailable to retrieve keys from provided RSS Feed. Make sure you have provided a CURRENT RSS Feed from 32P') + return "fail" + else: + logger.fdebug('[32P-AUTHENTICATION] 32P Authentication already done. Attempting to use existing keys.') payload = {'action': 'download', 'torrent_pass': mylar.PASSKEY_32P, @@ -758,8 +735,6 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site): payload = None verify = False - #logger.info('payload set to : ' + str(payload)) - if not verify: #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displayed. #disable SSL warnings - too many 'warning' messages about invalid certificates @@ -782,7 +757,21 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site): except Exception, e: logger.warn('Error fetching data from %s: %s' % (site, e)) - return "fail" + if site == '32P': + if mylar.MODE_32P == 1: + logger.info('Attempting to re-authenticate against 32P and poll new keys as required.') + feed32p = auth32p.info32p(reauthenticate=True) + feedinfo = feed32p.authenticate() + try: + r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers) + except Exception, e: + logger.warn('Error fetching data from %s: %s' % (site, e)) + return "fail" + else: + logger.warn('[32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P') + return "fail" + else: + return "fail" if site == 'KAT': if r.headers.get('Content-Encoding') == 'gzip': diff --git a/mylar/rsscheckit.py b/mylar/rsscheckit.py index 14bfc09e..df934ea4 100755 --- a/mylar/rsscheckit.py +++ b/mylar/rsscheckit.py @@ -18,7 +18,7 @@ from __future__ import with_statement import datetime import threading import mylar -from mylar import logger, rsscheck, helpers +from mylar import logger, rsscheck, helpers, auth32p rss_lock = threading.Lock() @@ -62,8 +62,30 @@ class tehMain(): rsscheck.torrents(pickfeed='6') if mylar.ENABLE_32P: logger.info('[RSS] Initiating Torrent RSS Feed Check on 32P.') - rsscheck.torrents(pickfeed='1') - #rsscheck.torrents(pickfeed='4') + if mylar.MODE_32P == 0: + if any( [mylar.PASSKEY_32P is None, mylar.PASSKEY_32P == '', mylar.RSSFEED_32P is None, mylar.RSSFEED_32P == ''] ): + logger.error('[RSS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.') + else: + logger.fdebug('[RSS] 32P mode set to Legacy mode. Monitoring New Releases feed only.') + rsscheck.torrents(pickfeed='1', feedinfo=mylar.KEYS_32P) + else: + logger.fdebug('[RSS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed') + if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None] ): + logger.error('[RSS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.') + else: + if mylar.KEYS_32P is None: + feed32p = auth32p.info32p() + feedinfo = feed32p.authenticate() + if len(feedinfo) >0: + rsscheck.torrents(pickfeed='1', feedinfo=feedinfo[0]) + x = 0 + #assign personal feeds for 32p > +8 + for fi in feedinfo: + x+=1 + pfeed_32p = str(7 + x) + rsscheck.torrents(pickfeed=pfeed_32p, feedinfo=fi) + else: + logger.error('[RSS] Unable to retrieve any information from 32P for RSS Feeds. Skipping for now.') logger.info('[RSS] Initiating RSS Feed Check for NZB Providers.') rsscheck.nzbs(forcerss=self.forcerss) logger.info('[RSS] RSS Feed Check/Update Complete') diff --git a/mylar/search.py b/mylar/search.py index 3edad87d..2f719077 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -27,6 +27,7 @@ import getopt import re import time import urlparse +from urlparse import urljoin from xml.dom.minidom import parseString import urllib2 import email.utils @@ -51,7 +52,10 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD logger.fdebug('Publisher is : ' + Publisher) issuetitle = helpers.get_issue_title(IssueID) - logger.info('Issue Title given as : ' + issuetitle) + if issuetitle: + logger.info('Issue Title given as : ' + issuetitle) + else: + logger.fdebug('Issue Title not found. Setting to None.') if mode == 'want_ann': logger.info("Annual issue search detected. Appending to issue #") @@ -63,14 +67,15 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD if IssueID is None: #one-off the download. - print ("ComicName: " + ComicName) - print ("Issue: " + str(IssueNumber)) - print ("Year: " + str(ComicYear)) - print ("IssueDate:" + str(IssueDate)) + logger.fdebug('One-Off Search parameters:') + logger.fdebug("ComicName: " + ComicName) + logger.fdebug("Issue: " + str(IssueNumber)) + logger.fdebug("Year: " + str(ComicYear)) + logger.fdebug("IssueDate:" + str(IssueDate)) if SARC: - print ("Story-ARC issue!") - print ("Story-ARC: " + str(SARC)) - print ("IssueArcID: " + str(IssueArcID)) + logger.fdebug("Story-ARC Search parameters:") + logger.fdebug("Story-ARC: " + str(SARC)) + logger.fdebug("IssueArcID: " + str(IssueArcID)) torprovider = [] torp = 0 @@ -683,8 +688,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa # -- end size constaints. - - thisentry = entry['title'] + if '(digital first)' in entry['title'].lower(): + dig_moving = re.sub('\(digital first\)', '', entry['title'].lower()).strip() + dig_moving = re.sub('[\s+]', ' ', dig_moving) + dig_mov_end = dig_moving + ' (Digital First)' + thisentry = dig_mov_end + else: + thisentry = entry['title'] logger.fdebug("Entry: " + thisentry) cleantitle = thisentry @@ -1282,7 +1292,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa nzbname = nzbname_create(nzbprov, info=comicinfo, title=entry['title']) #generate the send-to and actually send the nzb / torrent. - searchresult = searcher(nzbprov, nzbname, comicinfo, entry['link'], IssueID, ComicID, tmpprov) + searchresult = searcher(nzbprov, nzbname, comicinfo, entry['link'], IssueID, ComicID, tmpprov, newznab=newznab_host) if searchresult == 'downloadchk-fail': continue @@ -1576,24 +1586,23 @@ def nzbname_create(provider, title=None, info=None): else: # let's change all space to decimals for simplicity - nzbname = re.sub('\s+',' ', title) #make sure we remove the extra spaces. logger.fdebug('[SEARCHER] entry[title]: ' + title) + #gotta replace & or escape it + nzbname = re.sub("\&", 'and', title) + nzbname = re.sub('[\,\:\?\']', '', nzbname) + nzbname = re.sub('[\(\)]', ' ', nzbname) + logger.fdebug('[SEARCHER] nzbname (remove chars): ' + nzbname) + nzbname = re.sub('.cbr', '', nzbname).strip() + nzbname = re.sub('.cbz', '', nzbname).strip() + nzbname = re.sub('\s+',' ', nzbname) #make sure we remove the extra spaces. logger.fdebug('[SEARCHER] nzbname (\s): ' + nzbname) nzbname = re.sub(' ', '.', nzbname) - logger.fdebug('[SEARCHER] nzbname (space to .): ' + nzbname) - #gotta replace & or escape it - nzbname = re.sub("\&", 'and', nzbname) - nzbname = re.sub('[\,\:\?\'\(\)]', '', nzbname) logger.fdebug('[SEARCHER] end nzbname: ' + nzbname) - - nzbname = re.sub('.cbr', '', nzbname).strip() - nzbname = re.sub('.cbz', '', nzbname).strip() - logger.fdebug("nzbname used for post-processing:" + nzbname) return nzbname -def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, directsend=None): +def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, directsend=None, newznab=None): alt_nzbname = None #load in the details of the issue from the tuple. @@ -1624,113 +1633,111 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc #if sab priority isn't selected, default to Normal (0) nzbgetpriority = "0" + nzbid = generate_id(nzbprov, link) + if link and (nzbprov != 'KAT' and nzbprov != '32P'): - opener = urllib.FancyURLopener({}) - opener.addheaders = [] - opener.addheader('User-Agent', str(mylar.USER_AGENT)) + + #generate nzbid here. + nzo_info = {} filen = None + + payload = None + headers = {'User-Agent': str(mylar.USER_AGENT)} + #link doesn't have the apikey - add it and use ?t=get for newznab based. + + if nzbprov == 'newznab': + #need to basename the link so it just has the id/hash. + #rss doesn't store apikey, have to put it back. + name_newznab = newznab[0].rstrip() + host_newznab = newznab[1].rstrip() + if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/': + host_newznab_fix = str(host_newznab) + "/" + else: + host_newznab_fix = host_newznab + + apikey = newznab[2].rstrip() + down_url = host_newznab_fix + 'api' + + payload = {'t': 'get', + 'id': str(nzbid), + 'apikey': str(apikey)} + + logger.info('payload:' + str(payload)) + + elif nzbprov == 'dognzb': + #dognzb - need to add back in the dog apikey + down_url = urljoin(link, str(mylar.DOGNZB_APIKEY)) + else: + #experimental - direct link. + down_url = link + headers = None + + logger.info('download url:' + down_url) + + import lib.requests as requests + try: - fn, header = opener.retrieve(link) - except: - fn = None - for tup in header.items(): - try: - item = tup[0].lower() - value = tup[1].strip() - except: - continue - if item in ('category_id', 'x-dnzb-category'): - category = value - elif item in ('x-dnzb-moreinfo',): - nzo_info['more_info'] = value - elif item in ('x-dnzb-name',): - filen = value - nzo_info['filename'] = filen - elif item == 'x-dnzb-propername': - nzo_info['propername'] = value - elif item == 'x-dnzb-episodename': - nzo_info['episodename'] = value - elif item == 'x-dnzb-year': - nzo_info['year'] = value - elif item == 'x-dnzb-failure': - nzo_info['failure'] = value - elif item == 'x-dnzb-details': - nzo_info['details'] = value - elif item in ('content-length',): - try: - ivalue = int(value) - except: - ivalue = 0 - length = ivalue - nzo_info['length'] = length + r = requests.get(down_url, params=payload, headers=headers) - if not filen: - for item in tup: - if "filename=" in item: - filen = item[item.index("filename=") + 9:].strip(';').strip('"') - logger.fdebug('nzo_info:' + str(nzo_info)) + except Exception, e: + logger.warn('Error fetching data from %s: %s' % (tmpprov, e)) + return "sab-fail" - if filen is None and mylar.FAILED_DOWNLOAD_HANDLING: - logger.fdebug('[FAILED_DOWNLOAD] [' + str(tmpprov) + '] Marked as a bad download : ' + str(link)) - return "downloadchk-fail" + try: + nzo_info['filename'] = r.headers['x-dnzb-name'] + filen = r.headers['x-dnzb-name'] + except KeyError: + filen = None + try: + nzo_info['propername'] = r.headers['x-dnzb-propername'] + except KeyError: + pass + try: + nzo_info['failure'] = r.headers['x-dnzb-failure'] + except KeyError: + pass + try: + nzo_info['details'] = r.headers['x-dnzb-details'] + except KeyError: + pass - #convert to a generic type of format to help with post-processing. - filen = re.sub('.cbr', '', filen).strip() - filen = re.sub('.cbz', '', filen).strip() - filen = re.sub("\&", 'and', filen) - filen = re.sub('[\,\:\?\'\(\)]', '', filen) + if filen is None: + try: + filen = r.headers['content-disposition'][r.headers['content-disposition'].index("filename=") + 9:].strip(';').strip('"') + except: + pass - - if re.sub('.nzb','', filen.lower()).strip() != re.sub('.nzb','', nzbname.lower()).strip(): - alt_nzbname = re.sub('.nzb','', filen).strip() - alt_nzbname = re.sub('[\s+]', ' ', alt_nzbname) - alt_nzbname = re.sub('[\s\_]', '.', alt_nzbname) - logger.info('filen: ' + alt_nzbname + ' -- nzbname: ' + nzbname + ' are not identical. Storing extra value as : ' + alt_nzbname) - - - #check if nzb is in do not download list - if nzbprov == 'experimental': - #id is located after the /download/ portion - url_parts = urlparse.urlparse(link) - path_parts = url_parts[2].rpartition('/') - nzbtempid = path_parts[0].rpartition('/') - nzblen = len(nzbtempid) - nzbid = nzbtempid[nzblen-1] - elif nzbprov == '32P': - #32P just has the torrent id stored. - nzbid = link - elif nzbprov == 'KAT': - if 'http' not in link: - nzbid = link + if filen is None: + logger.error('Unable to download nzb from link: ' + str(link)) else: - #for users that already have the cache in place. - url_parts = urlparse.urlparse(link) - path_parts = url_parts[2].rpartition('/') - nzbtempid = path_parts[2] - nzbid = re.sub('.torrent', '', nzbtempid).rstrip() - elif nzbprov == 'nzb.su': - url_parts = urlparse.urlparse(link) - path_parts = url_parts[2].rpartition('/') - nzbid = re.sub('.nzb&','', path_parts[2]).strip() - elif nzbprov == 'dognzb': - url_parts = urlparse.urlparse(link) - path_parts = url_parts[2].rpartition('/') - nzbid = path_parts[0].rsplit('/',1)[1] - elif nzbprov == 'newznab': - #if in format of http://newznab/getnzb/.nzb&i=1&r=apikey - tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url. - if tmpid == '' or tmpid is None: - nzbid = os.path.splitext(link)[0].rsplit('/', 1)[1] - else: - # for the geek in all of us... - st = tmpid.find('&id') - end = tmpid.find('&',st+1) - nzbid = re.sub('&id=','', tmpid[st:end]).strip() + #convert to a generic type of format to help with post-processing. + filen = re.sub("\&", 'and', filen) + filen = re.sub('[\,\:\?\']', '', filen) + filen = re.sub('[\(\)]', ' ', filen) + logger.fdebug('[FILENAME] filename (remove chars): ' + filen) + filen = re.sub('.cbr', '', filen).strip() + filen = re.sub('.cbz', '', filen).strip() + filen = re.sub('\s+',' ', filen) #make sure we remove the extra spaces. + logger.fdebug('[FILENAME] nzbname (\s): ' + filen) + filen = re.sub(' ', '.', filen) + logger.fdebug('[FILENAME] end nzbname: ' + filen) + + if re.sub('.nzb','', filen.lower()).strip() != re.sub('.nzb','', nzbname.lower()).strip(): + alt_nzbname = re.sub('.nzb','', filen).strip() + alt_nzbname = re.sub('[\s+]', ' ', alt_nzbname) + alt_nzbname = re.sub('[\s\_]', '.', alt_nzbname) + logger.info('filen: ' + alt_nzbname + ' -- nzbname: ' + nzbname + ' are not identical. Storing extra value as : ' + alt_nzbname) if mylar.FAILED_DOWNLOAD_HANDLING: if nzbid is not None: + try: + # only nzb providers will have a filen, try it and pass exception + if filen is None: + return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov) + except: + pass call_the_fail = Failed.FailedProcessor(nzb_name=nzbname, id=nzbid, issueid=IssueID, comicid=ComicID, prov=tmpprov) check_the_fail = call_the_fail.failed_check() if check_the_fail == 'Failed': @@ -1739,6 +1746,13 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc #continue elif check_the_fail == 'Good': logger.fdebug('[FAILED_DOWNLOAD_CHECKER] This is not in the failed downloads list. Will continue with the download.') + else: + try: + # only nzb providers will have a filen, try it and pass exception + if filen is None: + return "sab-fail" + except: + pass logger.fdebug('issues match!') logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov) ) @@ -1749,12 +1763,27 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc #if mylar.USE_SABNZBD: # linkit = linkit.replace("&", "%26") linkapi = linkstart + linkit + if nzbprov == 'newznab': + apikey = newznab[2].rstrip() + if '#' in newznab[3].rstrip(): + uidstart = newznab[3].find('#') + uid = newznab[3][:uidstart] + else: + uid = newznab[3].strip() + + fileURL = urllib.quote_plus(linkapi + '&i=' + uid + '&r=' + apikey) + else: + fileURL = urllib.quote_plus(linkapi + '&r=' + apikey) # + '&i=' + uid + '&r=' + apikey) + elif nzbprov == 'dognzb': + linkapi = down_url + fileURL = urllib.quote_plus(down_url) else: # this should work for every other provider #linkstart = linkstart.replace("&", "%26") linkapi = linkstart - fileURL = urllib.quote_plus(linkapi) + fileURL = urllib.quote_plus(linkapi) + logger.fdebug("link given by: " + str(nzbprov)) #logger.fdebug("link: " + str(linkstart)) #logger.fdebug("linkforapi: " + str(linkapi)) @@ -1875,12 +1904,29 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc #final build of send-to-SAB logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi,'&'))) + logger.info('sab-to-send:' + str(tmpapi)) + try: - urllib2.urlopen(tmpapi) - except urllib2.URLError: - logger.error(u"Unable to send nzb file to SABnzbd") + from lib.requests.packages.urllib3 import disable_warnings + disable_warnings() + except: + logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.') + + import lib.requests as requests + + try: + requests.put(tmpapi, verify=False) + except: + logger.error('Unable to send nzb file to SABnzbd') return "sab-fail" +# this works for non-http sends to sab (when both sab AND provider are non-https) +# try: +# urllib2.urlopen(tmpapi) +# except urllib2.URLError: +# logger.error(u"Unable to send nzb file to SABnzbd") +# return "sab-fail" + sent_to = "SABnzbd+" logger.info(u"Successfully sent nzb file to SABnzbd") @@ -1944,7 +1990,8 @@ def FailedMark(IssueID, ComicID, id, nzbname, prov): FailProcess = Failed.FailedProcessor(issueid=IssueID, comicid=ComicID, id=id, nzb_name=nzbname, prov=prov) Markit = FailProcess.markFailed() - return "torrent-fail" + if prov == '32P' or prov == 'KAT': return "torrent-fail" + else: return "downloadchk-fail" def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstword, hyphensplit, orignzb=None): vals = [] @@ -2074,3 +2121,45 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw "status": 'ok'}) return vals return + +def generate_id(nzbprov, link): + logger.fdebug('[' + nzbprov + '] link: ' + str(link)) + if nzbprov == 'experimental': + #id is located after the /download/ portion + url_parts = urlparse.urlparse(link) + path_parts = url_parts[2].rpartition('/') + nzbtempid = path_parts[0].rpartition('/') + nzblen = len(nzbtempid) + nzbid = nzbtempid[nzblen-1] + elif nzbprov == '32P': + #32P just has the torrent id stored. + nzbid = link + elif nzbprov == 'KAT': + if 'http' not in link: + nzbid = link + else: + #for users that already have the cache in place. + url_parts = urlparse.urlparse(link) + path_parts = url_parts[2].rpartition('/') + nzbtempid = path_parts[2] + nzbid = re.sub('.torrent', '', nzbtempid).rstrip() + elif nzbprov == 'nzb.su': + url_parts = urlparse.urlparse(link) + path_parts = url_parts[2].rpartition('/') + nzbid = re.sub('.nzb&','', path_parts[2]).strip() + elif nzbprov == 'dognzb': + url_parts = urlparse.urlparse(link) + path_parts = url_parts[2].rpartition('/') + nzbid = path_parts[0].rsplit('/',1)[1] + elif nzbprov == 'newznab': + #if in format of http://newznab/getnzb/.nzb&i=1&r=apikey + tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url. + if tmpid == '' or tmpid is None: + nzbid = os.path.splitext(link)[0].rsplit('/', 1)[1] + else: + # for the geek in all of us... + st = tmpid.find('&id') + end = tmpid.find('&',st+1) + nzbid = re.sub('&id=','', tmpid[st:end]).strip() + + return nzbid diff --git a/mylar/updater.py b/mylar/updater.py index 825b261b..56092048 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -228,23 +228,26 @@ def dbUpdate(ComicIDList=None, calledfrom=None): logger.info("In the process of converting the data to CV, I changed the status of " + str(icount) + " issues.") issuesnew = myDB.select('SELECT * FROM issues WHERE ComicID=? AND Status is NULL', [ComicID]) - if mylar.ANNUALS_ON: - annualsnew = myDB.select('SELECT * FROM annuals WHERE ComicID=? AND Status is NULL', [ComicID]) - newiss = [] if mylar.AUTOWANT_UPCOMING: newstatus = "Wanted" else: newstatus = "Skipped" + newiss = [] + for iss in issuesnew: newiss.append({"IssueID": iss['IssueID'], "Status": newstatus, "Annual": False}) - for ann in annualsnew: - newiss.append({"IssueID": iss['IssueID'], - "Status": newstatus, - "Annual": True}) + + if mylar.ANNUALS_ON: + annualsnew = myDB.select('SELECT * FROM annuals WHERE ComicID=? AND Status is NULL', [ComicID]) + + for ann in annualsnew: + newiss.append({"IssueID": iss['IssueID'], + "Status": newstatus, + "Annual": True}) if len(newiss) > 0: for newi in newiss: @@ -408,10 +411,13 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None, if mylar.FAILED_DOWNLOAD_HANDLING: if mylar.FAILED_AUTO: values = { "Status": "Wanted" } + newValue['Status'] = "Wanted" else: values = { "Status": "Failed" } + newValue['Status'] = "Failed" else: values = { "Status": "Skipped" } + newValue['Status'] = "Skipped" else: values = { "Status": "Skipped"} newValue['Status'] = "Skipped" @@ -466,7 +472,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None, else: myDB.upsert("issues", values, control) - if any( [og_status == 'Downloaded', og_status == 'Archived', og_status == 'Snatched', og_status == 'Wanted'] ): + if any( [og_status == 'Downloaded', og_status == 'Archived', og_status == 'Snatched', og_status == 'Wanted', newValue['Status'] == 'Wanted'] ): logger.fdebug('updating Pull-list to reflect status.') downstats = {"Status": og_status, "ComicID": issuechk['ComicID'], @@ -828,6 +834,7 @@ def forceRescan(ComicID,archive=None,module=None): issID_to_ignore = [] issID_to_ignore.append(str(ComicID)) issID_to_write = [] + ANNComicID = None while (fn < fccnt): haveissue = "no" @@ -1048,7 +1055,7 @@ def forceRescan(ComicID,archive=None,module=None): #this will detect duplicate filenames within the same directory. for di in annualdupechk: - if di['fcdigit'] == fcdigit: + if di['fcdigit'] == fcdigit and di['issueid'] == reann['IssueID']: #base off of config - base duplication keep on filesize or file-type (or both) logger.fdebug('[DUPECHECK] Duplicate issue detected [' + di['filename'] + '] [' + tmpfc['ComicFilename'] + ']') # mylar.DUPECONSTRAINT = 'filesize' / 'filetype-cbr' / 'filetype-cbz' @@ -1174,11 +1181,13 @@ def forceRescan(ComicID,archive=None,module=None): issID_to_ignore.append(str(iss_id)) - if 'annual' in temploc.lower(): + if ANNComicID: +# if 'annual' in temploc.lower(): #issID_to_write.append({"tableName": "annuals", # "newValueDict": newValueDict, # "controlValueDict": controlValueDict}) myDB.upsert("annuals", newValueDict, controlValueDict) + ANNComicID = None else: #issID_to_write.append({"tableName": "issues", # "valueDict": newValueDict, @@ -1315,10 +1324,11 @@ def forceRescan(ComicID,archive=None,module=None): logger.fdebug(module + ' havefiles: ' + str(havefiles)) logger.fdebug(module + ' I have changed the status of ' + str(archivedissues) + ' issues to a status of Archived, as I now cannot locate them in the series directory.') - + #combined total for dispay total purposes only. combined_total = iscnt + anncnt #(rescan['Total'] + anncnt) #let's update the total count of comics that was found. + #store just the total of issues, since annuals gets tracked seperately. controlValueStat = {"ComicID": rescan['ComicID']} newValueStat = {"Have": havefiles, "Total": iscnt} diff --git a/mylar/webserve.py b/mylar/webserve.py index fa8870db..7f0a0ca8 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -3222,8 +3222,12 @@ class WebInterface(object): "enable_torrent_search" : helpers.checked(mylar.ENABLE_TORRENT_SEARCH), "enable_kat" : helpers.checked(mylar.ENABLE_KAT), "enable_32p" : helpers.checked(mylar.ENABLE_32P), - "passkey_32p" : mylar.PASSKEY_32P, + "legacymode_32p" : helpers.radio(mylar.MODE_32P, 0), + "authmode_32p" : helpers.radio(mylar.MODE_32P, 1), "rssfeed_32p" : mylar.RSSFEED_32P, + "passkey_32p" : mylar.PASSKEY_32P, + "username_32p" : mylar.USERNAME_32P, + "password_32p" : mylar.PASSWORD_32P, "snatchedtorrent_notify" : helpers.checked(mylar.SNATCHEDTORRENT_NOTIFY), "destination_dir" : mylar.DESTINATION_DIR, "create_folders" : helpers.checked(mylar.CREATE_FOLDERS), @@ -3253,7 +3257,6 @@ class WebInterface(object): "cvinfo" : helpers.checked(mylar.CVINFO), "lowercase_filenames" : helpers.checked(mylar.LOWERCASE_FILENAMES), "syno_fix" : helpers.checked(mylar.SYNO_FIX), - "cvapifix" : helpers.checked(mylar.CVAPIFIX), "prowl_enabled": helpers.checked(mylar.PROWL_ENABLED), "prowl_onsnatch": helpers.checked(mylar.PROWL_ONSNATCH), "prowl_keys": mylar.PROWL_KEYS, @@ -3489,11 +3492,11 @@ class WebInterface(object): nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None, nzbget_directory=None, usenet_retention=None, nzbsu=0, nzbsu_uid=None, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, newznab=0, newznab_host=None, newznab_name=None, newznab_apikey=None, newznab_uid=None, newznab_enabled=0, raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0, check_folder=None, enable_check_folder=0, - enable_meta=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, ct_cbz_overwrite=0, unrar_cmd=None, enable_rss=0, rss_checkinterval=None, failed_download_handling=0, failed_auto=0, enable_torrent_search=0, enable_kat=0, enable_32p=0, passkey_32p=None, rssfeed_32p=None, snatchedtorrent_notify=0, + enable_meta=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, ct_cbz_overwrite=0, unrar_cmd=None, enable_rss=0, rss_checkinterval=None, failed_download_handling=0, failed_auto=0, enable_torrent_search=0, enable_kat=0, enable_32p=0, mode_32p=0, rssfeed_32p=None, passkey_32p=None, username_32p=None, password_32p=None, snatchedtorrent_notify=0, enable_torrents=0, minseeds=0, torrent_local=0, local_watchdir=None, torrent_seedbox=0, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None, prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None, pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0, - preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, cvapifix=0, + preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, **kwargs): mylar.COMICVINE_API = comicvine_api mylar.HTTP_HOST = http_host @@ -3569,8 +3572,11 @@ class WebInterface(object): mylar.ENABLE_TORRENT_SEARCH = int(enable_torrent_search) mylar.ENABLE_KAT = int(enable_kat) mylar.ENABLE_32P = int(enable_32p) - mylar.PASSKEY_32P = passkey_32p + mylar.MODE_32P = int(mode_32p) mylar.RSSFEED_32P = rssfeed_32p + mylar.PASSKEY_32P = passkey_32p + mylar.USERNAME_32P = username_32p + mylar.PASSWORD_32P = password_32p mylar.SNATCHEDTORRENT_NOTIFY = int(snatchedtorrent_notify) mylar.PREFERRED_QUALITY = int(preferred_quality) mylar.MOVE_FILES = move_files @@ -3583,7 +3589,6 @@ class WebInterface(object): mylar.CVINFO = cvinfo mylar.LOWERCASE_FILENAMES = lowercase_filenames mylar.SYNO_FIX = syno_fix - mylar.CVAPIFIX = cvapifix mylar.PROWL_ENABLED = prowl_enabled mylar.PROWL_ONSNATCH = prowl_onsnatch mylar.PROWL_KEYS = prowl_keys @@ -3968,4 +3973,3 @@ class WebInterface(object): read = readinglist.Readinglist() threading.Thread(target=read.syncreading).start() syncfiles.exposed = True - diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index 63546a2b..b9ee5cbb 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -725,7 +725,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep altvalues = loaditup(watchcomic, comicid[cnt], altissuenum, chktype) if altvalues == 'no results': logger.fdebug('No alternate Issue numbering - something is probably wrong somewhere.') - pass + break validcheck = checkthis(altvalues[0]['issuedate'], altvalues[0]['status'], usedate) if validcheck == False: