From c510860c466b4df7adafe8e3db53f65abc6dc0d9 Mon Sep 17 00:00:00 2001 From: evilhero Date: Wed, 18 Jun 2014 15:58:19 -0400 Subject: [PATCH] FIX: (#746) updated autoProcessComics.py / ComicRN.py's which will now send proper completion messeages to clients, FIX: (#752) refresh series will now test if a series is 'out of whack' with it's numerical issue count (ie. 5/4) or it has no issue data due to a bad refresh / api maxing out and will adjust it's processing to accomodate either, IMP: (#750) Added ComicVine API Checker which will check API counts at regular intervals to inform/warn users of usage, as well as adding a screen-time display of API hits / mins used at the bottom of every page (refreshing/reloading pages will update counts), FIX: (#747)EOL normalization (dos2unix) on search.py - removed classes & exceptions as not being used, IMP: (#747) Skip processing issues with an invalid store date & issue date (thnx rupaschomaker), FIX: Removed strings when searching/logging torrents as was causing ascii errors especially with KAT, IMP: Added [META-TAGGING] to logging for meta-tagging module, IMP: Added ability in GUI to select CR or Cbl tags (or both) when writing metadata to cbz files, IMP: Improved support/usage with ComicTagger v1.1.15 which allows for personal CV API Key usage - if supplied to Mylar, will use when tagging with ComicTagger, IMP: Added Manual Search option to allow for individual searches of issues without changing initial status. --- data/css/style.css | 2 + data/interfaces/default/base.html | 3 + data/interfaces/default/comicdetails.html | 15 +- data/interfaces/default/config.html | 13 +- data/interfaces/default/images/search.png | Bin 0 -> 321 bytes mylar/__init__.py | 25 +- mylar/classes.py | 130 - mylar/cmtagmylar.py | 147 +- mylar/cv.py | 8 +- mylar/exceptions.py | 41 - mylar/helpers.py | 61 +- mylar/mb.py | 7 +- mylar/rsscheck.py | 22 +- mylar/search.py | 3053 +++++++++++---------- mylar/updater.py | 155 +- mylar/webserve.py | 204 +- post-processing/autoProcessComics.py | 4 +- post-processing/nzbget/ComicRN.py | 4 +- post-processing/sabnzbd/ComicRN.py | 4 +- 19 files changed, 1957 insertions(+), 1941 deletions(-) create mode 100755 data/interfaces/default/images/search.png delete mode 100755 mylar/classes.py delete mode 100755 mylar/exceptions.py diff --git a/data/css/style.css b/data/css/style.css index 9cd53507..fad3dca8 100755 --- a/data/css/style.css +++ b/data/css/style.css @@ -37,6 +37,7 @@ body { font:13px/1.231 sans-serif; *font-size:small; } select, input, textarea, button { font:99% sans-serif; } pre, code, kbd, samp { font-family: monospace, sans-serif; } + html { overflow-y: scroll; } a:hover, a:active { outline: none; } ul, ol { margin-left: 2em; } @@ -160,6 +161,7 @@ table#artist_table td#album { vertical-align: middle; text-align: left; min-widt table#artist_table td#have { vertical-align: middle; } div#paddingheader { padding-top: 48px; font-size: 24px; font-weight: bold; text-align: center; } +div#paddingheadertitle { padding-top: 24px; font-size: 24px; font-weight: bold; text-align: center; } div#nopaddingheader { font-size: 24px; font-weight: bold; text-align: center; } table#issue_table { background-color: grey; width: 100%; padding: 10px; } diff --git a/data/interfaces/default/base.html b/data/interfaces/default/base.html index 462cdce1..8a427ac4 100755 --- a/data/interfaces/default/base.html +++ b/data/interfaces/default/base.html @@ -1,6 +1,7 @@ <% import mylar from mylar import version + from mylar.helpers import cvapi_check %> @@ -95,6 +96,8 @@ %if version.MYLAR_VERSION != 'master': (${version.MYLAR_VERSION}) %endif +
+ API Usage: ${cvapi_check(True)} Back to top diff --git a/data/interfaces/default/comicdetails.html b/data/interfaces/default/comicdetails.html index c71938ee..246be201 100644 --- a/data/interfaces/default/comicdetails.html +++ b/data/interfaces/default/comicdetails.html @@ -33,13 +33,12 @@ loading %endif
- - ${comic['ComicName']} (${comic['ComicYear']}) + ${comic['ComicName']} (${comic['ComicYear']}) %if comic['Status'] == 'Loading':

(Comic information is currently being loaded)

%endif
- +
@@ -363,12 +362,13 @@ %endif + %if issue['Status'] == 'Skipped' or issue['Status'] == 'Ignored': - + %elif (issue['Status'] == 'Wanted'): %elif (issue['Status'] == 'Snatched'): - + %elif (issue['Status'] == 'Downloaded'): <% @@ -384,7 +384,7 @@ %endif %else: - + %endif @@ -479,6 +479,7 @@ <% amode = 'want_ann' %> + @@ -543,7 +544,7 @@ data: { resources: "volume", format: "json", - api_key: "583939a3df0a25fc4e8b7a29934a13078002dc27", + api_key: "", query: request.term }, success: function( data ) { diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 85489675..31a0c89d 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -609,10 +609,9 @@
- Metadata Tagging + Metadata TaggingComicTagger and configparser are required
- You need to have ComicTagger and configparser installed
@@ -620,6 +619,16 @@ If left blank, will assume it's in root of mylar
+
+ +
+
+ +
+
+
If ComicVine API Key specified, will use with ComicTagger
+
Writing each type of metadata will increase API count respectively
+
diff --git a/data/interfaces/default/images/search.png b/data/interfaces/default/images/search.png new file mode 100755 index 0000000000000000000000000000000000000000..38046cd6798de09edf78005c05183b05e4b7a7aa GIT binary patch literal 321 zcmV-H0lxl;P)#yBz#dDcdXFU!{D=B@J~^2#9*2R*7RICH36d=V zWuShvAiEDW2qs6h;sYDd$KlISO9r1M`Lwj;JdV0: - logger.info('Setting monitor on folder : ' + str(CHECK_FOLDER)) + logger.info('Enabling folder monitor for : ' + str(CHECK_FOLDER) + ' every ' + str(DOWNLOAD_SCAN_INTERVAL) + ' minutes.') #FolderMonitorScheduler.thread.start() SCHED.add_interval_job(helpers.checkFolder, minutes=int(DOWNLOAD_SCAN_INTERVAL)) else: diff --git a/mylar/classes.py b/mylar/classes.py deleted file mode 100755 index 85bd1736..00000000 --- a/mylar/classes.py +++ /dev/null @@ -1,130 +0,0 @@ -# This file is part of Mylar. -# -# Mylar is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Mylar is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Mylar. If not, see . - -######################################### -## Stolen from Sick-Beard's classes.py ## -######################################### - -import mylar - -import urllib -import datetime - -from common import USER_AGENT - -class mylarURLopener(urllib.FancyURLopener): - version = USER_AGENT - -class AuthURLOpener(mylarURLopener): - """ - URLOpener class that supports http auth without needing interactive password entry. - If the provided username/password don't work it simply fails. - - user: username to use for HTTP auth - pw: password to use for HTTP auth - """ - def __init__(self, user, pw): - self.username = user - self.password = pw - - # remember if we've tried the username/password before - self.numTries = 0 - - # call the base class - urllib.FancyURLopener.__init__(self) - - def prompt_user_passwd(self, host, realm): - """ - Override this function and instead of prompting just give the - username/password that were provided when the class was instantiated. - """ - - # if this is the first try then provide a username/password - if self.numTries == 0: - self.numTries = 1 - return (self.username, self.password) - - # if we've tried before then return blank which cancels the request - else: - return ('', '') - - # this is pretty much just a hack for convenience - def openit(self, url): - self.numTries = 0 - return mylarURLopener.open(self, url) - -class SearchResult: - """ - Represents a search result from an indexer. - """ - - def __init__(self): - self.provider = -1 - - # URL to the NZB/torrent file - self.url = "" - - # used by some providers to store extra info associated with the result - self.extraInfo = [] - - # quality of the release - self.quality = -1 - - # release name - self.name = "" - - def __str__(self): - - if self.provider == None: - return "Invalid provider, unable to print self" - - myString = self.provider.name + " @ " + self.url + "\n" - myString += "Extra Info:\n" - for extra in self.extraInfo: - myString += " " + extra + "\n" - return myString - -class NZBSearchResult(SearchResult): - """ - Regular NZB result with an URL to the NZB - """ - resultType = "nzb" - -class NZBDataSearchResult(SearchResult): - """ - NZB result where the actual NZB XML data is stored in the extraInfo - """ - resultType = "nzbdata" - -class TorrentSearchResult(SearchResult): - """ - Torrent result with an URL to the torrent - """ - resultType = "torrent" - -class Proper: - def __init__(self, name, url, date): - self.name = name - self.url = url - self.date = date - self.provider = None - self.quality = -1 - - self.tvdbid = -1 - self.season = -1 - self.episode = -1 - - def __str__(self): - return str(self.date)+" "+self.name+" "+str(self.season)+"x"+str(self.episode)+" of "+str(self.tvdbid) diff --git a/mylar/cmtagmylar.py b/mylar/cmtagmylar.py index a74b7f04..7269634b 100644 --- a/mylar/cmtagmylar.py +++ b/mylar/cmtagmylar.py @@ -15,9 +15,10 @@ from subprocess import CalledProcessError, check_output import mylar from mylar import logger +from mylar.helpers import cvapi_check def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): - logger.fdebug('dirName:' + dirName) + logger.fdebug('[META-TAGGING] dirName:' + dirName) ## Set the directory in which comictagger and other external commands are located - IMPORTANT - ## # ( User may have to modify, depending on their setup, but these are some guesses for now ) @@ -40,8 +41,8 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): if not os.path.isfile(unrar_cmd): unrar_cmd = "C:\Program Files (x86)\WinRAR\UnRAR.exe" if not os.path.isfile(unrar_cmd): - logger.fdebug('Unable to locate UnRAR.exe - make sure it is installed.') - logger.fdebug('Aborting meta-tagging.') + logger.fdebug('[META-TAGGING] Unable to locate UnRAR.exe - make sure it is installed.') + logger.fdebug('[META-TAGGING] Aborting meta-tagging.') return "fail" @@ -55,8 +56,8 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): try: import configparser except ImportError: - logger.fdebug('configparser not found on system. Please install manually in order to write metadata') - logger.fdebug('continuing with PostProcessing, but I am not using metadata.') + logger.fdebug('[META-TAGGING] configparser not found on system. Please install manually in order to write metadata') + logger.fdebug('[META-TAGGING] continuing with PostProcessing, but I am not using metadata.') return "fail" #set this to the lib path (ie. '/lib') @@ -71,9 +72,9 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): file_conversion = True file_extension_fixing = True if not os.path.exists( unrar_cmd ): - logger.fdebug('WARNING: cannot find the unrar command.') - logger.fdebug('File conversion and extension fixing not available') - logger.fdebug('You probably need to edit this script, or install the missing tool, or both!') + logger.fdebug('[META-TAGGING] WARNING: cannot find the unrar command.') + logger.fdebug('[META-TAGGING] File conversion and extension fixing not available') + logger.fdebug('[META-TAGGING] You probably need to edit this script, or install the missing tool, or both!') file_conversion = False file_extension_fixing = False @@ -88,32 +89,32 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): comicpath = os.path.join( downloadpath, issueid ) unrar_folder = os.path.join( comicpath , "unrard" ) - logger.fdebug('---directory settings.') - logger.fdebug('scriptname : ' + scriptname) - logger.fdebug('downloadpath : ' + downloadpath) - logger.fdebug('sabnzbdscriptpath : ' + sabnzbdscriptpath) - logger.fdebug('comicpath : ' + comicpath) - logger.fdebug('unrar_folder : ' + unrar_folder) - logger.fdebug('Running the Post-SabNZBd/Mylar script') + logger.fdebug('[META-TAGGING] ---directory settings.') + logger.fdebug('[META-TAGGING] scriptname : ' + scriptname) + logger.fdebug('[META-TAGGING] downloadpath : ' + downloadpath) + logger.fdebug('[META-TAGGING] sabnzbdscriptpath : ' + sabnzbdscriptpath) + logger.fdebug('[META-TAGGING] comicpath : ' + comicpath) + logger.fdebug('[META-TAGGING] unrar_folder : ' + unrar_folder) + logger.fdebug('[META-TAGGING] Running the Post-SabNZBd/Mylar script') if os.path.exists( comicpath ): shutil.rmtree( comicpath ) - logger.fdebug('attempting to create directory @: ' + str(comicpath)) + logger.fdebug('[META-TAGGING] Attempting to create directory @: ' + str(comicpath)) try: os.makedirs(comicpath) except OSError: raise - logger.fdebug('created directory @ : ' + str(comicpath)) - logger.fdebug('filename is : ' + str(filename)) + logger.fdebug('[META-TAGGING] Created directory @ : ' + str(comicpath)) + logger.fdebug('[META-TAGGING] Filename is : ' + str(filename)) if filename is None: filename_list = glob.glob( os.path.join( downloadpath, "*.cbz" ) ) filename_list.extend( glob.glob( os.path.join( downloadpath, "*.cbr" ) ) ) fcount = 1 for f in filename_list: if fcount > 1: - logger.fdebug('More than one cbr/cbz within path, performing Post-Process on first file detected: ' + f) + logger.fdebug('[META-TAGGING] More than one cbr/cbz within path, performing Post-Process on first file detected: ' + f) break shutil.move( f, comicpath ) filename = f #just the filename itself @@ -128,10 +129,10 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): if filename.endswith('.cbr'): f = os.path.join( comicpath, filename ) if zipfile.is_zipfile( f ): - logger.fdebug('zipfile detected') + logger.fdebug('[META-TAGGING] zipfile detected') base = os.path.splitext( f )[0] shutil.move( f, base + ".cbz" ) - logger.fdebug('{0}: renaming {1} to be a cbz'.format( scriptname, os.path.basename( f ) )) + logger.fdebug('[META-TAGGING] {0}: renaming {1} to be a cbz'.format( scriptname, os.path.basename( f ) )) if file_extension_fixing: if filename.endswith('.cbz'): @@ -146,7 +147,7 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): if not "is not RAR archive" in rar_test_cmd_output: base = os.path.splitext( f )[0] shutil.move( f, base + ".cbr" ) - logger.fdebug('{0}: renaming {1} to be a cbr'.format( scriptname, os.path.basename( f ) )) + logger.fdebug('[META-TAGGING] {0}: renaming {1} to be a cbr'.format( scriptname, os.path.basename( f ) )) # Now rename all CBR files to RAR if filename.endswith('.cbr'): @@ -159,7 +160,7 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): ## Changes any cbr files to cbz files for insertion of metadata ## if file_conversion: f = os.path.join( comicpath, filename ) - logger.fdebug('{0}: converting {1} to be zip format'.format( scriptname, os.path.basename( f ) )) + logger.fdebug('[META-TAGGING] {0}: converting {1} to be zip format'.format( scriptname, os.path.basename( f ) )) basename = os.path.splitext( f )[0] zipname = basename + ".cbz" @@ -168,17 +169,17 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): os.chdir( unrar_folder ) # Extract and zip up - logger.fdebug('{0}: Comicpath is ' + baserar) #os.path.join(comicpath,basename)) - logger.fdebug('{0}: Unrar is ' + unrar_folder ) + logger.fdebug('[META-TAGGING] {0}: Comicpath is ' + baserar) #os.path.join(comicpath,basename)) + logger.fdebug('[META-TAGGING] {0}: Unrar is ' + unrar_folder ) try: #subprocess.Popen( [ unrar_cmd, "x", os.path.join(comicpath,basename) ] ).communicate() output = subprocess.check_output( [ unrar_cmd, 'x', baserar ] ) #os.path.join(comicpath,basename) ] ) except CalledProcessError as e: if e.returncode == 3: - logger.fdebug('[Unrar Error 3] - Broken Archive.') + logger.fdebug('[META-TAGGING] [Unrar Error 3] - Broken Archive.') elif e.returncode == 1: - logger.fdebug('[Unrar Error 1] - No files to extract.') - logger.fdebug('Marking this as an incomplete download.') + logger.fdebug('[META-TAGGING] [Unrar Error 1] - No files to extract.') + logger.fdebug('[META-TAGGING] Marking this as an incomplete download.') return "unrar error" shutil.make_archive( basename, "zip", unrar_folder ) @@ -194,27 +195,27 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): try: with open(f): pass except: - logger.fdebug('No zip file present') + logger.warn('[META-TAGGING] No zip file present') return "fail" base = os.path.splitext( f )[0] shutil.move( f, base + ".cbz" ) nfilename = base + ".cbz" else: - logger.fdebug('filename:' + filename) + logger.fdebug('[META-TAGGING] Filename:' + filename) nfilename = filename if os.path.isfile( nfilename ): - logger.fdebug('file exists in given location already.') + logger.fdebug('[META-TAGGING] File exists in given location already.') file_dir, file_n = os.path.split(nfilename) else: #remove the IssueID from the path file_dir = re.sub(issueid, '', comicpath) file_n = os.path.split(nfilename)[1] - logger.fdebug('converted directory: ' + str(file_dir)) - logger.fdebug('converted filename: ' + str(file_n)) - logger.fdebug('destination path: ' + os.path.join(dirName,file_n)) - logger.fdebug('dirName: ' + dirName) - logger.fdebug('absDirName: ' + os.path.abspath(dirName)) + logger.fdebug('[META-TAGGING] Converted directory: ' + str(file_dir)) + logger.fdebug('[META-TAGGING] Converted filename: ' + str(file_n)) + logger.fdebug('[META-TAGGING] Destination path: ' + os.path.join(dirName,file_n)) + logger.fdebug('[META-TAGGING] dirName: ' + dirName) + logger.fdebug('[META-TAGGING] absDirName: ' + os.path.abspath(dirName)) ## check comictagger version - less than 1.15.beta - take your chances. ctversion = subprocess.check_output( [ comictagger_cmd, "--version" ] ) ctend = ctversion.find(':') @@ -222,45 +223,75 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): ctcheck = re.sub('\.', '', ctcheck).strip() if int(ctcheck) >= int('1115'): #(v1.1.15) if mylar.COMICVINE_API == mylar.DEFAULT_CVAPI: - logger.fdebug(ctversion[:ctend] + ' being used - no personal ComicVine API Key supplied. Take your chances.') + logger.fdebug('[META-TAGGING] ' + ctversion[:ctend] + ' being used - no personal ComicVine API Key supplied. Take your chances.') use_cvapi = "False" else: - logger.fdebug(ctversion[:ctend] + ' being used - using personal ComicVine API key supplied via mylar.') + logger.fdebug('[META-TAGGING] ' + ctversion[:ctend] + ' being used - using personal ComicVine API key supplied via mylar.') use_cvapi = "True" else: - logger.fdebug(ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.') + logger.fdebug('[META-TAGGING] ' + ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.') use_cvapi = "False" - if use_cvapi == "True": - ## Tag each CBZ, and move it back to original directory ## - if issueid is None: - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "--cv-api-key", mylar.COMICVINE_API, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate() - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "--cv-api-key", mylar.COMICVINE_API, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate() - else: - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "--cv-api-key", mylar.COMICVINE_API, "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate() - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "--cv-api-key", mylar.COMICVINE_API, "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate() - else: - if issueid is None: - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate() - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate() - else: - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate() - subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate() + i = 1 + tagcnt = 0 + if mylar.CT_TAG_CR: + tagcnt = 1 + logger.info('[META-TAGGING] CR Tagging enabled.') + + if mylar.CT_TAG_CBL: + if not mylar.CT_TAG_CR: i = 2 #set the tag to start at cbl and end without doing another tagging. + tagcnt = 2 + logger.info('[META-TAGGING] CBL Tagging enabled.') + + if tagcnt == 0: + logger.warn('[META-TAGGING] You have metatagging enabled, but you have not selected the type(s) of metadata to write. Please fix and re-run manually') + return "fail" + + while ( i <= tagcnt ): + if i == 1: + tagtype = "cr" # CR meta-tagging cycle. + tagdisp = 'ComicRack tagging' + elif i == 2: + tagtype = "cbl" #Cbl meta-tagging cycle + tagdisp = 'Comicbooklover tagging' + logger.info('[META-TAGGING] ' + tagdisp + ' meta-tagging processing started.') + + #CV API Check here. + if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= 200: + cvapi_check() + + ## Tag each CBZ, and move it back to original directory ## + if use_cvapi == "True": + if issueid is None: + subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "--cv-api-key", mylar.COMICVINE_API, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate() + else: + subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "--cv-api-key", mylar.COMICVINE_API, "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate() + logger.info('[META-TAGGING] ' + tagtype + ' meta-tagging complete') + #increment CV API counter. + mylar.CVAPI_COUNT +=1 + else: + if issueid is None: + subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate() + else: + subprocess.Popen( [ comictagger_cmd, "-s", "-t", tagtype, "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate() + #increment CV API counter. + mylar.CVAPI_COUNT +=1 + i+=1 if os.path.exists(os.path.join(os.path.abspath(dirName),file_n)): - logger.fdebug('Unable to move - file already exists.') + logger.fdebug('[META-TAGGING] Unable to move - file already exists.') else: shutil.move( os.path.join(comicpath, nfilename), os.path.join(os.path.abspath(dirName),file_n)) #shutil.move( nfilename, os.path.join(os.path.abspath(dirName),file_n)) - logger.fdebug('Sucessfully moved file from temporary path.') + logger.fdebug('[META-TAGGING] Sucessfully moved file from temporary path.') i = 0 os.chdir( mylar.PROG_DIR ) while i < 10: try: - logger.fdebug('Attempting to remove: ' + comicpath) + logger.fdebug('[META-TAGGING] Attempting to remove: ' + comicpath) shutil.rmtree( comicpath ) except: time.sleep(.1) @@ -268,7 +299,7 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): return os.path.join(os.path.abspath(dirName), file_n) i+=1 - logger.fdebug('Failed to remove temporary path : ' + str(comicpath)) + logger.fdebug('[META-TAGGING] Failed to remove temporary path : ' + str(comicpath)) return os.path.join(os.path.abspath(dirName),file_n) diff --git a/mylar/cv.py b/mylar/cv.py index 14421a4e..fad5e8a4 100755 --- a/mylar/cv.py +++ b/mylar/cv.py @@ -21,6 +21,8 @@ import string import urllib import lib.feedparser import mylar +from mylar.helpers import cvapi_check + from bs4 import BeautifulSoup as Soup def pulldetails(comicid,type,issueid=None,offset=1): @@ -52,9 +54,13 @@ def pulldetails(comicid,type,issueid=None,offset=1): elif type == 'storyarc': PULLURL = mylar.CVURL + 'story_arc/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(issueid) + '&field_list=cover_date' - + #CV API Check here. + if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= 200: + cvapi_check() #download the file: file = urllib2.urlopen(PULLURL) + #increment CV API counter. + mylar.CVAPI_COUNT +=1 #convert to string: data = file.read() #close file because we dont need it anymore: diff --git a/mylar/exceptions.py b/mylar/exceptions.py deleted file mode 100755 index 5e61f83c..00000000 --- a/mylar/exceptions.py +++ /dev/null @@ -1,41 +0,0 @@ -# This file is part of Mylar. -# -# Mylar is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Mylar is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Mylar. If not, see . - -def ex(e): - """ - Returns a string from the exception text if it exists. - """ - - # sanity check - if not e.args or not e.args[0]: - return "" - - e_message = e.args[0] - - # if fixStupidEncodings doesn't fix it then maybe it's not a string, in which case we'll try printing it anyway - if not e_message: - try: - e_message = str(e.args[0]) - except: - e_message = "" - - return e_message - - -class mylarException(Exception): - "Generic mylar Exception - should never be thrown, only subclassed" - -class NewzbinAPIThrottled(mylarException): - "Newzbin has throttled us, deal with it" diff --git a/mylar/helpers.py b/mylar/helpers.py index fc421e57..9c86068f 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -978,15 +978,30 @@ def LoadAlternateSearchNames(seriesname_alt, comicid): return Alternate_Names -def havetotals(): +def havetotals(refreshit=None): import db, logger comics = [] myDB = db.DBConnection() - comiclist = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE') + + if refreshit is None: + comiclist = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE') + else: + comiclist = [] + comicref = myDB.selectone("SELECT * from comics WHERE ComicID=?", [refreshit]).fetchone() + #refreshit is the ComicID passed from the Refresh Series to force/check numerical have totals + comiclist.append({"ComicID": comicref[0], + "Have": comicref[7], + "Total": comicref[8]}) for comic in comiclist: - issue = myDB.select("SELECT * FROM issues WHERE ComicID=?", [comic['ComicID']]) + issue = myDB.selectone("SELECT COUNT(*) as count FROM issues WHERE ComicID=?", [comic['ComicID']]).fetchone() + if issue is None: + if refreshit is not None: + logger.fdebug(str(comic['ComicID']) + ' has no issuedata available. Forcing complete Refresh/Rescan') + return True + else: + continue if mylar.ANNUALS_ON: annuals_on = True annual = myDB.selectone("SELECT COUNT(*) as count FROM annuals WHERE ComicID=?", [comic['ComicID']]).fetchone() @@ -1007,7 +1022,13 @@ def havetotals(): continue if not haveissues: - havetracks = 0 + havetracks = 0 + + if refreshit is not None: + if haveissues > totalissues: + return True # if it's 5/4, send back to updater and don't restore previous status' + else: + return False # if it's 5/5 or 4/5, send back to updater and restore previous status' try: percent = (haveissues*100.0)/totalissues @@ -1051,6 +1072,38 @@ def havetotals(): return comics +def cvapi_check(web=None): + import logger + if web is None: logger.fdebug('[ComicVine API] ComicVine API Check Running...') + if mylar.CVAPI_TIME is None: + c_date = now() + c_obj_date = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S") + mylar.CVAPI_TIME = c_obj_date + else: + c_obj_date = mylar.CVAPI_TIME + if web is None: logger.fdebug('[ComicVine API] API Start Monitoring Time (~15mins): ' + str(mylar.CVAPI_TIME)) + now_date = now() + n_date = datetime.datetime.strptime(now_date,"%Y-%m-%d %H:%M:%S") + if web is None: logger.fdebug('[ComicVine API] Time now: ' + str(n_date)) + absdiff = abs(n_date - c_obj_date) + mins = round(((absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0),2) + if mins < 15: + if web is None: logger.info('[ComicVine API] Comicvine API count now at : ' + str(mylar.CVAPI_COUNT) + ' in ' + str(mins) + ' minutes.') + if mylar.CVAPI_COUNT > 200: + cvleft = 15 - mins + if web is None: logger.warn('[ComicVine API] You have already hit your API limit with ' + str(cvleft) + ' minutes. Best be slowing down, cowboy.') + elif mins > 15: + mylar.CVAPI_COUNT = 0 + c_date = now() + mylar.CVAPI_TIME = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S") + if web is None: logger.info('[ComicVine API] 15 minute API interval resetting [' + str(mylar.CVAPI_TIME) + ']. Resetting API count to : ' + str(mylar.CVAPI_COUNT)) + + if web is None: + return + else: + line = str(mylar.CVAPI_COUNT) + ' hits / ' + str(mins) + ' minutes' + return line + from threading import Thread class ThreadWithReturnValue(Thread): diff --git a/mylar/mb.py b/mylar/mb.py index defc5fcd..35f7d4b2 100755 --- a/mylar/mb.py +++ b/mylar/mb.py @@ -22,7 +22,7 @@ from xml.dom.minidom import parseString, Element import mylar from mylar import logger, db, cv -from mylar.helpers import multikeysort, replace_all, cleanName +from mylar.helpers import multikeysort, replace_all, cleanName, cvapi_check mb_lock = threading.Lock() @@ -39,6 +39,9 @@ def pullsearch(comicapi,comicquery,offset,explicit): PULLURL = mylar.CVURL + 'volumes?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page #all these imports are standard on most modern python implementations + #CV API Check here. + if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= 200: + cvapi_check() #download the file: try: file = urllib2.urlopen(PULLURL) @@ -46,6 +49,8 @@ def pullsearch(comicapi,comicquery,offset,explicit): logger.error('err : ' + str(err)) logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.") return + #increment CV API counter. + mylar.CVAPI_COUNT +=1 #convert to string: data = file.read() #close file because we dont need it anymore: diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py index fc4634cf..4b5317cc 100755 --- a/mylar/rsscheck.py +++ b/mylar/rsscheck.py @@ -531,20 +531,20 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None): #0 holds the title/issue and format-type. while (i < len(torsplit)): #we'll rebuild the string here so that it's formatted accordingly to be passed back to the parser. - logger.fdebug('section(' + str(i) + '): ' + str(torsplit[i])) + logger.fdebug('section(' + str(i) + '): ' + torsplit[i]) #remove extensions titletemp = torsplit[i] - titletemp = re.sub('cbr', '', str(titletemp)) - titletemp = re.sub('cbz', '', str(titletemp)) - titletemp = re.sub('none', '', str(titletemp)) + titletemp = re.sub('cbr', '', titletemp) + titletemp = re.sub('cbz', '', titletemp) + titletemp = re.sub('none', '', titletemp) if i == 0: - rebuiltline = str(titletemp) + rebuiltline = titletemp else: - rebuiltline = rebuiltline + ' (' + str(titletemp) + ')' + rebuiltline = rebuiltline + ' (' + titletemp + ')' i+=1 - logger.fdebug('rebuiltline is :' + str(rebuiltline)) + logger.fdebug('rebuiltline is :' + rebuiltline) seriesname_mod = seriesname foundname_mod = torsplit[0] @@ -575,10 +575,10 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None): titleend = formatrem_torsplit[len(formatrem_seriesname):] titleend = re.sub('\-', '', titleend) #remove the '-' which is unnecessary #remove extensions - titleend = re.sub('cbr', '', str(titleend)) - titleend = re.sub('cbz', '', str(titleend)) - titleend = re.sub('none', '', str(titleend)) - logger.fdebug('titleend: ' + str(titleend)) + titleend = re.sub('cbr', '', titleend) + titleend = re.sub('cbz', '', titleend) + titleend = re.sub('none', '', titleend) + logger.fdebug('titleend: ' + titleend) sptitle = titleend.split() extra = '' diff --git a/mylar/search.py b/mylar/search.py index ee2578dc..04b8bca4 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1,1523 +1,1530 @@ -# This file is part of Mylar. -# -# Mylar is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Mylar is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Mylar. If not, see . - -from __future__ import division - -import mylar -from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck - -import lib.feedparser as feedparser -import urllib -import os, errno -import string -import sqlite3 as lite -import sys -import getopt -import re -import time -import urlparse -from xml.dom.minidom import parseString -import urllib2 -import email.utils -import datetime -from wsgiref.handlers import format_date_time - -def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None): - if ComicYear == None: ComicYear = '2014' - else: ComicYear = str(ComicYear)[:4] - if Publisher == 'IDW Publishing': Publisher = 'IDW' - logger.fdebug('Publisher is : ' + str(Publisher)) - if mode == 'want_ann': - logger.info("Annual issue search detected. Appending to issue #") - #anything for mode other than None indicates an annual. - ComicName = ComicName + " annual" - if AlternateSearch is not None and AlternateSearch != "None": - AlternateSearch = AlternateSearch + " annual" - - if IssueID is None: - #one-off the download. - print ("ComicName: " + ComicName) - print ("Issue: " + str(IssueNumber)) - print ("Year: " + str(ComicYear)) - print ("IssueDate:" + str(IssueDate)) - if SARC: - print ("Story-ARC issue!") - print ("Story-ARC: " + str(SARC)) - print ("IssueArcID: " + str(IssueArcID)) - - torprovider = [] - torp = 0 - logger.fdebug("Checking for torrent enabled.") - if mylar.ENABLE_TORRENT_SEARCH: #and mylar.ENABLE_TORRENTS: - if mylar.ENABLE_CBT: - torprovider.append('cbt') - torp+=1 - #print torprovider[0] - if mylar.ENABLE_KAT: - torprovider.append('kat') - torp+=1 - ##nzb provider selection## - ##'dognzb' or 'nzb.su' or 'experimental' - nzbprovider = [] - nzbp = 0 - if mylar.NZBSU == 1: - nzbprovider.append('nzb.su') - nzbp+=1 - if mylar.DOGNZB == 1: - nzbprovider.append('dognzb') - nzbp+=1 - # -------- - # Xperimental - if mylar.EXPERIMENTAL == 1: - nzbprovider.append('experimental') - nzbp+=1 - - newznabs = 0 - - newznab_hosts = [] - - if mylar.NEWZNAB == 1: - #if len(mylar.EXTRA_NEWZNABS > 0): - for newznab_host in mylar.EXTRA_NEWZNABS: - if newznab_host[4] == '1' or newznab_host[4] == 1: - newznab_hosts.append(newznab_host) - #if newznab_host[0] == newznab_host[1]: - # nzbprovider.append('newznab') - #else: - nzbprovider.append('newznab:' + str(newznab_host[0])) - newznabs+=1 - logger.fdebug("newznab name:" + str(newznab_host[0]) + " @ " + str(newznab_host[1])) - - logger.fdebug('newznab hosts: ' + str(newznab_hosts)) - logger.fdebug('nzbprovider: ' + str(nzbprovider)) - # -------- - logger.fdebug("there are : " + str(torp) + " torrent providers you have selected.") - torpr = torp - 1 - if torpr < 0: - torpr = -1 - providercount = int(nzbp + newznabs) - logger.fdebug("there are : " + str(providercount) + " nzb providers you have selected.") - logger.fdebug("Usenet Retention : " + str(mylar.USENET_RETENTION) + " days") - #nzbpr = providercount - 1 - #if nzbpr < 0: - # nzbpr == 0 - findit = 'no' - - totalproviders = providercount + torp - - if totalproviders == 0: - logger.error('[WARNING] You have ' + str(totalproviders) + ' search providers enabled. I need at least ONE provider to work. Aborting search.') - findit = "no" - nzbprov = None - return findit, nzbprov - - prov_order,newznab_info = provider_sequence(nzbprovider,torprovider,newznab_hosts) - # end provider order sequencing - logger.info('search provider order is ' + str(prov_order)) - - #fix for issue dates between Nov-Dec/(Jan-Feb-Mar) - IssDt = str(IssueDate)[5:7] - if IssDt == "12" or IssDt == "11" or IssDt == "01" or IssDt == "02" or IssDt == "03": - IssDateFix = IssDt - else: - IssDateFix = "no" - - searchcnt = 0 - i = 1 - - if rsscheck: - if mylar.ENABLE_RSS: - searchcnt = 1 # rss-only - else: - searchcnt = 0 # if it's not enabled, don't even bother. - else: - if mylar.ENABLE_RSS: - searchcnt = 2 # rss first, then api on non-matches - else: - searchcnt = 2 #set the searchcnt to 2 (api) - i = 2 #start the counter at api, so it will exit without running RSS - - while ( i <= searchcnt ): - #searchmodes: - # rss - will run through the built-cached db of entries - # api - will run through the providers via api (or non-api in the case of Experimental) - # the trick is if the search is done during an rss compare, it needs to exit when done. - # otherwise, the order of operations is rss feed check first, followed by api on non-results. - - if i == 1: searchmode = 'rss' #order of ops - this will be used first. - elif i == 2: searchmode = 'api' - - if findit == 'yes': - logger.fdebug('Found result on first run, exiting search module now.') - break - - logger.fdebug("Initiating Search via : " + str(searchmode)) - - #torprtmp = 0 # torprtmp = torpr - prov_count = 0 - - while (prov_count <= len(prov_order)-1): - #while (torprtmp <= torpr): #(torprtmp >=0 ): - newznab_host = None - if prov_order[prov_count] == 'cbt': - searchprov = 'CBT' - elif prov_order[prov_count] == 'kat': - searchprov = 'KAT' - elif 'newznab' in prov_order[prov_count]: - #this is for newznab - searchprov = 'newznab' - for nninfo in newznab_info: - if nninfo['provider'] == prov_order[prov_count]: - newznab_host = nninfo['info'] - if newznab_host is None: - logger.fdebug('there was an error - newznab information was blank and it should not be.') - else: - newznab_host = None - searchprov = prov_order[prov_count].lower() - - if searchmode == 'rss': - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID) - if findit == 'yes': - logger.fdebug("findit = found!") - break - else: - if AlternateSearch is not None and AlternateSearch != "None": - chkthealt = AlternateSearch.split('##') - if chkthealt == 0: - AS_Alternate = AlternateSearch - loopit = len(chkthealt) - for calt in chkthealt: - AS_Alternate = re.sub('##','',calt) - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) - findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID) - if findit == 'yes': - break - if findit == 'yes': break - - else: - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID) - if findit == 'yes': - logger.fdebug("findit = found!") - break - else: - if AlternateSearch is not None and AlternateSearch != "None": - chkthealt = AlternateSearch.split('##') - if chkthealt == 0: - AS_Alternate = AlternateSearch - loopit = len(chkthealt) - for calt in chkthealt: - AS_Alternate = re.sub('##','',calt) - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) - findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID) - if findit == 'yes': - break - if findit == 'yes': break - - if searchprov == 'newznab': - searchprov = newznab_host[0].rstrip() - logger.info('Could not find Issue ' + str(IssueNumber) + ' of ' + ComicName + '(' + str(SeriesYear) + ') using ' + str(searchprov)) - prov_count+=1 - #torprtmp+=1 #torprtmp-=1 - - if findit == 'yes': - return findit, searchprov - else: - logger.fdebug("Finished searching via : " + str(searchmode)) - i+=1 - - if findit == 'no': - logger.info('Issue not found. Status kept as Wanted.') - - return findit, 'None' - -def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None): - - if nzbprov == 'nzb.su': - apikey = mylar.NZBSU_APIKEY - elif nzbprov == 'dognzb': - apikey = mylar.DOGNZB_APIKEY - elif nzbprov == 'experimental': - apikey = 'none' - elif nzbprov == 'newznab': - #updated to include Newznab Name now - name_newznab = newznab_host[0].rstrip() - host_newznab = newznab_host[1].rstrip() - apikey = newznab_host[2].rstrip() - if '#' in newznab_host[3].rstrip(): - catstart = newznab_host[3].find('#') - category_newznab = newznab_host[3][catstart+1:] - logger.fdebug('non-default Newznab category set to :' + str(category_newznab)) - else: - category_newznab = '7030' - logger.fdebug("using Newznab host of : " + str(name_newznab)) - - if RSS == "yes": - if 'newznab' in nzbprov: - tmpprov = name_newznab + '(' + nzbprov + ')' + ' [RSS]' - else: - tmpprov = str(nzbprov) + " [RSS]" - else: - if 'newznab' in nzbprov: - tmpprov = name_newznab + ' (' + nzbprov + ')' - else: - tmpprov = nzbprov - logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " (" + str(ComicYear) + ") using " + str(tmpprov)) - - #load in do not download db here for given series - #myDB = db.DBConnection() - #nodown = myDB.action('SELECT * FROM nzblog') - - if mylar.PREFERRED_QUALITY == 0: filetype = "" - elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr" - elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz" - - if mylar.SAB_PRIORITY: - if mylar.SAB_PRIORITY == "Default": sabpriority = "-100" - elif mylar.SAB_PRIORITY == "Low": sabpriority = "-1" - elif mylar.SAB_PRIORITY == "Normal": sabpriority = "0" - elif mylar.SAB_PRIORITY == "High": sabpriority = "1" - elif mylar.SAB_PRIORITY == "Paused": sabpriority = "-2" - else: - #if sab priority isn't selected, default to Normal (0) - sabpriority = "0" - - if mylar.NZBGET_PRIORITY: - if mylar.NZBGET_PRIORITY == "Default": nzbgetpriority = "0" - elif mylar.NZBGET_PRIORITY == "Low": nzbgetpriority = "-50" - elif mylar.NZBGET_PRIORITY == "Normal": nzbgetpriority = "0" - elif mylar.NZBGET_PRIORITY == "High": nzbgetpriority = "50" - #there's no priority for "paused", so set "Very Low" and deal with that later... - elif mylar.NZBGET_PRIORITY == "Paused": nzbgetpriority = "-100" - else: - #if sab priority isn't selected, default to Normal (0) - nzbgetpriority = "0" - - #UseFuzzy == 0: Normal - #UseFuzzy == 1: Remove Year - #UseFuzzy == 2: Fuzzy Year - # figure out what was missed via rss feeds and do a manual search via api - #tsc = int(tot-1) - -# findcomic = [] -# findcomiciss = [] -# findcount = 0 - ci = "" - comsearch = [] - isssearch = [] - comyear = str(ComicYear) - - #print ("-------SEARCH FOR MISSING------------------") - #ComicName is unicode - let's unicode and ascii it cause we'll be comparing filenames against it. - u_ComicName = ComicName.encode('ascii', 'ignore').strip() - findcomic = u_ComicName - # this should be called elsewhere..redudant code. - -# elif 'au' in IssueNumber.lower(): -# iss = re.sub("[^0-9]", "", IssueNumber) # get just the digits -# intIss = int(iss) * 1000 -# issue_except = 'AU' # if it contains AU, mark it as an exception (future dict possibly) -# elif 'ai' in IssueNumber.lower(): -# iss = re.sub("[^0-9]", "", IssueNumber) # get just the digits -# intIss = int(iss) * 1000 -# issue_except = 'AI' # if it contains AI, mark it as an exception (future dict possibly) -# else: -# iss = IssueNumber -# intIss = int(iss) * 1000 -# #issue_decimal = re.compile(r'[^\d.]+') -# #issue = issue_decimal.sub('', str(IssueNumber)) - #NEW --- - intIss = helpers.issuedigits(IssueNumber) - iss = IssueNumber - findcomiciss = iss - - #print ("we need : " + str(findcomic[findcount]) + " issue: #" + str(findcomiciss[findcount])) - cm1 = re.sub("[\/]", " ", findcomic) - # replace whitespace in comic name with %20 for api search - #cm = re.sub("\&", "%26", str(cm1)) - cm = re.sub("\\band\\b", "", cm1.lower()) # remove 'and' & '&' from the search pattern entirely (broader results, will filter out later) - cm = re.sub("\\bthe\\b", "", cm.lower()) # remove 'the' from the search pattern to accomodate naming differences - cm = re.sub(" ", "%20", str(cm)) - cm = re.sub("[\&\:\?\,]", "", str(cm)) - - #determine the amount of loops here - i = 0 - c_alpha = None - dsp_c_alpha = None - c_number = None - c_num_a4 = None - while i < len(findcomiciss): - #take first occurance of alpha in string and carry it through - if findcomiciss[i].isalpha(): - c_alpha = findcomiciss[i:].rstrip() - c_number = findcomiciss[:i].rstrip() - break - elif '.' in findcomiciss[i]: - c_number = findcomiciss[:i].rstrip() - c_num_a4 = findcomiciss[i+1:].rstrip() - #if decimal seperates numeric from alpha (ie - 7.INH) - #don't give calpha a value or else will seperate with a space further down - #assign it to dsp_c_alpha so that it can be displayed for debugging. - if not c_num_a4.isdigit(): - dsp_c_alpha = c_num_a4 - else: - c_number = str(c_number) + '.' + str(c_num_a4) - break - i+=1 - logger.fdebug("calpha/cnumber: " + str(dsp_c_alpha) + " / " + str(c_number)) - - if c_number is None: - c_number = findcomiciss # if it's None, means no special alphas or decimals - - if len(c_number) == 1: - cmloopit = 3 - elif len(c_number) == 2: - cmloopit = 2 - else: - cmloopit = 1 - - isssearch = str(findcomiciss) - comsearch = cm - origcmloopit = cmloopit - findcount = 1 # this could be a loop in the future possibly - - # ---- - - #print ("------RESULTS OF SEARCH-------------------") - findloop = 0 - foundcomic = [] - done = False - seperatealpha = "no" - #---issue problem - # if issue is '011' instead of '11' in nzb search results, will not have same - # results. '011' will return different than '11', as will '009' and '09'. - - while (findloop < findcount ): - comsrc = comsearch - while (cmloopit >= 1 ): - #if issue_except is None: issue_exc = '' - #else: issue_exc = issue_except - if done is True and seperatealpha == "no": - logger.fdebug("we should break out now - sucessful search previous") - findloop == 99 - break - # here we account for issue pattern variations - if seperatealpha == "yes": - isssearch = str(c_number) + "%20" + str(c_alpha) - - if cmloopit == 3: - comsearch = comsrc + "%2000" + str(isssearch) + "%20" + str(filetype) - issdig = '00' - elif cmloopit == 2: - comsearch = comsrc + "%200" + str(isssearch) + "%20" + str(filetype) - issdig = '0' - elif cmloopit == 1: - comsearch = comsrc + "%20" + str(isssearch) + "%20" + str(filetype) - issdig = '' - - mod_isssearch = str(issdig) + str(isssearch) - - #--- this is basically for RSS Feeds --- - logger.fdebug('RSS Check: ' + str(RSS)) - logger.fdebug('nzbprov: ' + str(nzbprov)) - logger.fdebug('comicid: ' + str(ComicID)) - if RSS == "yes" or nzbprov == 'CBT': - if nzbprov == 'CBT' or nzbprov == 'KAT': - cmname = re.sub("%20", " ", str(comsrc)) - logger.fdebug("Sending request to [" + str(nzbprov) + "] RSS for " + str(findcomic) + " : " + str(mod_isssearch)) - bb = rsscheck.torrentdbsearch(findcomic,mod_isssearch,ComicID,nzbprov) - rss = "yes" - if bb is not None: logger.fdebug("bb results: " + str(bb)) - else: - cmname = re.sub("%20", " ", str(comsrc)) - logger.fdebug("Sending request to RSS for " + str(findcomic) + " : " + str(mod_isssearch) + " (" + str(ComicYear) + ")") - bb = rsscheck.nzbdbsearch(findcomic,mod_isssearch,ComicID,nzbprov,ComicYear,ComicVersion) - rss = "yes" - if bb is not None: logger.fdebug("bb results: " + str(bb)) - #this is the API calls - else: - #CBT is redudant now since only RSS works - # - just getting it ready for when it's not redudant :) - if nzbprov == 'CBT': - # cmname = re.sub("%20", " ", str(comsrc)) - # logger.fdebug("Sending request to [CBT] RSS for " + str(cmname) + " : " + str(mod_isssearch)) - # bb = rsscheck.torrentdbsearch(cmname,mod_isssearch,ComicID) - # rss = "yes" - # if bb is not None: logger.fdebug("results: " + str(bb)) - bb = "no results" - elif nzbprov == 'KAT': - cmname = re.sub("%20", " ", str(comsrc)) - logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch)) - bb = rsscheck.torrents(pickfeed='KAT',seriesname=cmname,issue=mod_isssearch) - rss = "no" - #if bb is not None: logger.fdebug("results: " + str(bb)) - elif nzbprov != 'experimental': - if nzbprov == 'dognzb': - findurl = "https://api.dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030" - elif nzbprov == 'nzb.su': - findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030" - elif nzbprov == 'newznab': - #let's make sure the host has a '/' at the end, if not add it. - if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/': - host_newznab_fix = str(host_newznab) + "/" - else: host_newznab_fix = host_newznab - findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch) + "&o=xml&cat=" + str(category_newznab) - if nzbprov != 'nzbx': - # helper function to replace apikey here so we avoid logging it ;) - findurl = findurl + "&apikey=" + str(apikey) - logsearch = helpers.apiremove(str(findurl),'nzb') - logger.fdebug("search-url: " + str(logsearch)) - - ### IF USENET_RETENTION is set, honour it - ### For newznab sites, that means appending "&maxage=" on the URL - if mylar.USENET_RETENTION != None: - findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION) - - # Add a user-agent - #print ("user-agent:" + str(mylar.USER_AGENT)) - request = urllib2.Request(findurl) - request.add_header('User-Agent', str(mylar.USER_AGENT)) - opener = urllib2.build_opener() - - #set a delay between searches here. Default is for 30 seconds... - if mylar.SEARCH_DELAY == 'None' or mylar.SEARCH_DELAY is None: - pause_the_search = 1 * 60 # (it's in seconds) - elif str(mylar.SEARCH_DELAY).isdigit(): - pause_the_search = mylar.SEARCH_DELAY * 60 - else: - logger.info("Check Search Delay - invalid numerical given. Force-setting to 1 minute.") - pause_the_search = 1 * 60 - - #bypass for local newznabs - #remove the protocol string (http/https) - localbypass = False - if nzbprov == 'newznab': - if host_newznab_fix.startswith('http'): - hnc = host_newznab_fix.replace('http://', '') - elif host_newznab_fix.startswith('https'): - hnc = host_newznab_fix.replace('https://', '') - else: - hnc = host_newznab_fix - - if hnc[:3] == '10.' or hnc[:4] == '172.' or hnc[:4] == '192.' or hnc.startswith('localhost'): - localbypass = True - - if localbypass == False: - logger.info("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering") - time.sleep(pause_the_search) - - try: - data = opener.open(request).read() - except Exception, e: - logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) - data = False - #logger.info('data: ' + data) - if data: - bb = feedparser.parse(data) - else: - bb = "no results" - #logger.info('Search results:' + str(bb)) - try: - if bb['feed']['error']: - logger.error('[ERROR CODE: ' + str(bb['feed']['error']['code']) + '] ' + str(bb['feed']['error']['description'])) - bb = "no results" - except: - #logger.info('no errors on data retrieval...proceeding') - pass - elif nzbprov == 'experimental': - #bb = parseit.MysterBinScrape(comsearch[findloop], comyear) - bb = findcomicfeed.Startit(u_ComicName, isssearch, comyear, ComicVersion, IssDateFix) - # since the regexs in findcomicfeed do the 3 loops, lets force the exit after - #cmloopit == 1 - - done = False - foundc = "no" - log2file = "" - if bb == "no results": - pass - foundc = "no" - else: - for entry in bb['entries']: - logger.fdebug("checking search result: " + entry['title']) - if nzbprov != "experimental" and nzbprov != "dognzb": - if RSS == "yes": - comsize_b = entry['length'] - else: - #Experimental already has size constraints done. - if nzbprov == 'CBT': - comsize_b = entry['length'] - elif nzbprov == 'KAT': - comsize_b = entry['size'] - else: - tmpsz = entry.enclosures[0] - comsize_b = tmpsz['length'] - if comsize_b is None: - logger.fdebug('Size of file cannot be retrieved. Ignoring size-comparison and continuing.') - #comsize_b = 0 - else: - comsize_m = helpers.human_size(comsize_b) - logger.fdebug("size given as: " + str(comsize_m)) - #----size constraints. - #if it's not within size constaints - dump it now and save some time. - if mylar.USE_MINSIZE: - conv_minsize = helpers.human2bytes(mylar.MINSIZE + "M") - logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b)) - if int(conv_minsize) > int(comsize_b): - logger.fdebug("Failure to meet the Minimum size threshold - skipping") - continue - if mylar.USE_MAXSIZE: - conv_maxsize = helpers.human2bytes(mylar.MAXSIZE + "M") - logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b)) - if int(comsize_b) > int(conv_maxsize): - logger.fdebug("Failure to meet the Maximium size threshold - skipping") - continue - -#---- date constaints. - # if the posting date is prior to the publication date, dump it and save the time. - #logger.info('entry' + str(entry)) - if nzbprov == 'experimental' or nzbprov =='CBT': - pubdate = entry['pubdate'] - else: - try: - pubdate = entry['updated'] - except: - try: - pubdate = entry['pubdate'] - except: - logger.fdebug('invalid date found. Unable to continue - skipping result.') - continue - #use store date instead of publication date for comparisons since publication date is usually +2 months - if StoreDate is None or StoreDate == '0000-00-00': - stdate = IssueDate - else: - stdate = StoreDate - #logger.fdebug('Posting date of : ' + str(pubdate)) - # convert it to a tuple - dateconv = email.utils.parsedate_tz(pubdate) - #logger.fdebug('dateconv of : ' + str(dateconv)) - # convert it to a numeric time, then subtract the timezone difference (+/- GMT) - if dateconv[-1] is not None: - postdate_int = time.mktime(dateconv[:len(dateconv)-1]) - dateconv[-1] - else: - postdate_int = time.mktime(dateconv[:len(dateconv)-1]) - #logger.fdebug('postdate_int of : ' + str(postdate_int)) - #logger.fdebug('Issue date of : ' + str(stdate)) - #convert it to a Thu, 06 Feb 2014 00:00:00 format - issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d') - #logger.fdebug('issue_convert:' + str(issue_convert)) - #issconv = issue_convert.strftime('%a, %d %b %Y %H:%M:%S') - # to get past different locale's os-dependent dates, let's convert it to a generic datetime format - stamp = time.mktime(issue_convert.timetuple()) - #logger.fdebug('stamp: ' + str(stamp)) - issconv = format_date_time(stamp) - #logger.fdebug('issue date is :' + str(issconv)) - #convert it to a tuple - econv = email.utils.parsedate_tz(issconv) - #logger.fdebug('econv:' + str(econv)) - #convert it to a numeric and drop the GMT/Timezone - issuedate_int = time.mktime(econv[:len(econv)-1]) - #logger.fdebug('issuedate_int:' + str(issuedate_int)) - if postdate_int < issuedate_int: - logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') - continue - else: - logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) - -# -- end size constaints. - - - thisentry = entry['title'] - logger.fdebug("Entry: " + thisentry) - cleantitle = thisentry - - #remove the extension. - extensions = ('.cbr', '.cbz') - if cleantitle.lower().endswith(extensions): - fd, ext = os.path.splitext(cleantitle) - logger.fdebug("Removed extension from filename: " + ext) - #name = re.sub(str(ext), '', str(subname)) - cleantitle = fd - - if 'mixed format' in cleantitle.lower(): - cleantitle = re.sub('mixed format', '', cleantitle).strip() - logger.fdebug('removed extra information after issue # that is not necessary: ' + str(cleantitle)) - - cleantitle = re.sub('[\_\.]', ' ', cleantitle) - cleantitle = helpers.cleanName(cleantitle) - # this is new - if title contains a '&' in the title it will assume the filename has ended at that point - # which causes false positives (ie. wolverine & the x-men becomes the x-men, which matches on x-men. - # 'the' is removed for comparisons later on - if '&' in cleantitle: cleantitle = re.sub('[\&]','and', cleantitle) - - nzbname = cleantitle - - # if it's coming from CBT, remove the ' -' at the end as it screws it up. - if nzbprov == 'CBT': - if cleantitle.endswith(' - '): - cleantitle = cleantitle[:-3] - logger.fdebug("cleaned up title to : " + str(cleantitle)) - - #adjust for covers only by removing them entirely... - logger.fdebug("Cleantitle: " + str(cleantitle)) - vers4year = "no" - vers4vol = "no" - - if 'cover only' in cleantitle.lower(): - logger.fdebug("Ignoring title as Cover Only detected.") - cleantitle = "abcdefghijk 0 (1901).cbz" - continue - - if ComicVersion: - ComVersChk = re.sub("[^0-9]", "", ComicVersion) - if ComVersChk == '' or ComVersChk == '1': - ComVersChk = 0 - else: - ComVersChk = 0 - - ctchk = cleantitle.split() - for ct in ctchk: - if ct.lower().startswith('v') and ct[1:].isdigit(): - logger.fdebug("possible versioning..checking") - #we hit a versioning # - account for it - if ct[1:].isdigit(): - if len(ct[1:]) == 4: #v2013 - logger.fdebug("Version detected as " + str(ct)) - vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v - #cleantitle = re.sub(ct, "(" + str(vers4year) + ")", cleantitle) - #logger.fdebug("volumized cleantitle : " + cleantitle) - break - else: - if len(ct) < 4: - logger.fdebug("Version detected as " + str(ct)) - vers4vol = str(ct) - break - logger.fdebug("false version detection..ignoring.") - - - if len(re.findall('[^()]+', cleantitle)) == 1 or 'cover only' in cleantitle.lower(): - #some sites don't have (2013) or whatever..just v2 / v2013. Let's adjust: - #this handles when there is NO YEAR present in the title, otherwise versioning is way below. - if vers4year == "no" and vers4vol == "no": - # if the series is a v1, let's remove the requirements for year and volume label - # even if it's a v1, the nzbname might not contain a valid year format (20xx) or v3, - # and since it's already known that there is no (year) or vYEAR given - # let's push it through (and edit out the following if constraint)... - - #if ComVersChk != 0: - # if there are no () in the string, try to add them if it looks like a year (19xx or 20xx) - if len(re.findall('[^()]+', cleantitle)): - logger.fdebug("detected invalid nzb filename - attempting to detect year to continue") - cleantitle = re.sub('(.*)\s+(19\d{2}|20\d{2})(.*)', '\\1 (\\2) \\3', cleantitle) - else: - logger.fdebug("invalid nzb and/or cover only - skipping.") - cleantitle = "abcdefghijk 0 (1901).cbz" - continue - - #adjust for covers only by removing them entirely... - logger.fdebug("Cleantitle: " + str(cleantitle)) - - - if done: - break - #let's narrow search down - take out year (2010), (2011), etc - #let's check for first occurance of '(' as generally indicates - #that the 'title' has ended - - ripperlist=['digital-', - 'empire', - 'dcp'] - #this takes care of the brackets :) - m = re.findall('[^()]+', cleantitle) - lenm = len(m) - - #print ("there are " + str(lenm) + " words.") - cnt = 0 - yearmatch = "false" - pub_removed = None - - while (cnt < lenm): - if m[cnt] is None: break - if m[cnt] == ' ': - pass - else: - logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt])) - if cnt == 0: - comic_andiss = m[cnt] - if 'mixed format' in comic_andiss.lower(): - comic_andiss = re.sub('mixed format', '', comic_andiss).strip() - logger.fdebug('removed extra information after issue # that is not necessary: ' + str(comic_andiss)) - logger.fdebug("Comic: " + str(comic_andiss)) - logger.fdebug("UseFuzzy is : " + str(UseFuzzy)) - logger.fdebug('ComVersChk : ' + str(ComVersChk)) - if vers4vol != "no" or vers4year != "no": - logger.fdebug("Year not given properly formatted but Version detected.Bypassing Year Match.") - yearmatch = "true" - elif ComVersChk == 0: - logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check") - yearmatch = "true" - elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no": - if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': - logger.fdebug('year detected: ' + str(m[cnt])) - result_comyear = m[cnt] - logger.fdebug('year looking for: ' + str(comyear)) - if str(comyear) in result_comyear: - logger.fdebug(str(comyear) + " - right years match baby!") - yearmatch = "true" - else: - logger.fdebug(str(comyear) + " - not right - years do not match") - yearmatch = "false" - if UseFuzzy == "2": - #Fuzzy the year +1 and -1 - ComUp = int(ComicYear) + 1 - ComDwn = int(ComicYear) - 1 - if str(ComUp) in result_comyear or str(ComDwn) in result_comyear: - logger.fdebug("Fuzzy Logic'd the Year and got a match with a year of " + str(result_comyear)) - yearmatch = "true" - else: - logger.fdebug(str(comyear) + "Fuzzy logic'd the Year and year still didn't match.") - #let's do this here and save a few extra loops ;) - #fix for issue dates between Nov-Dec/Jan - if IssDateFix != "no" and UseFuzzy is not "2": - if IssDateFix == "01" or IssDateFix == "02" or IssDateFix == "03": ComicYearFix = int(ComicYear) - 1 - else: ComicYearFix = int(ComicYear) + 1 - if str(ComicYearFix) in result_comyear: - logger.fdebug("further analysis reveals this was published inbetween Nov-Jan, incrementing year to " + str(ComicYearFix) + " has resulted in a match!") - yearmatch = "true" - else: - logger.fdebug(str(comyear) + " - not the right year.") - - elif UseFuzzy == "1": yearmatch = "true" - - if Publisher.lower() in m[cnt].lower() and cnt >= 1: - #if the Publisher is given within the title or filename even (for some reason, some people - #have this to distinguish different titles), let's remove it entirely. - logger.fdebug('Publisher detected within title : ' + str(m[cnt])) - logger.fdebug('cnt is : ' + str(cnt) + ' --- Publisher is: ' + Publisher) - pub_removed = m[cnt] - #-strip publisher if exists here- - logger.fdebug('removing publisher from title') - cleantitle_pubremoved = re.sub(pub_removed, '', cleantitle) - logger.fdebug('pubremoved : ' + str(cleantitle_pubremoved)) - cleantitle_pubremoved = re.sub('\(\)', '', cleantitle_pubremoved) #remove empty brackets - cleantitle_pubremoved = re.sub('\s+', ' ', cleantitle_pubremoved) #remove spaces > 1 - logger.fdebug('blank brackets removed: ' + str(cleantitle_pubremoved)) - #reset the values to initial without the publisher in the title - m = re.findall('[^()]+', cleantitle_pubremoved) - lenm = len(m) - cnt = 0 - yearmatch = "false" - continue - if 'digital' in m[cnt] and len(m[cnt]) == 7: - logger.fdebug("digital edition detected") - pass - if ' of ' in m[cnt]: - logger.fdebug("mini-series detected : " + str(m[cnt])) - result_of = m[cnt] - if 'cover' in m[cnt]: - logger.fdebug("covers detected: " + str(m[cnt])) - result_comcovers = m[cnt] - for ripper in ripperlist: - if ripper in m[cnt]: - logger.fdebug("Scanner detected: " + str(m[cnt])) - result_comscanner = m[cnt] - cnt+=1 - - if yearmatch == "false": continue - - splitit = [] - watchcomic_split = [] - logger.fdebug("original nzb comic and issue: " + str(comic_andiss)) - #changed this from '' to ' ' - comic_iss_b4 = re.sub('[\-\:\,\?]', ' ', str(comic_andiss)) - comic_iss = comic_iss_b4.replace('.',' ') - #if issue_except: comic_iss = re.sub(issue_except.lower(), '', comic_iss) - logger.fdebug("adjusted nzb comic and issue: " + str(comic_iss)) - splitit = comic_iss.split(None) - #something happened to dognzb searches or results...added a '.' in place of spaces - #screwed up most search results with dognzb. Let's try to adjust. - #watchcomic_split = findcomic[findloop].split(None) - - if splitit[(len(splitit)-1)].isdigit(): - #compares - if the last digit and second last digit are #'s seperated by spaces assume decimal - comic_iss = splitit[(len(splitit)-1)] - splitst = len(splitit) - 1 - if splitit[(len(splitit)-2)].isdigit(): - # for series that have a digit at the end, it screws up the logistics. - i = 1 - chg_comic = splitit[0] - while (i < (len(splitit)-1)): - chg_comic = chg_comic + " " + splitit[i] - i+=1 - logger.fdebug("chg_comic:" + str(chg_comic)) - findcomic_chksplit = re.sub('[\-\:\,\.\?]', ' ', findcomic) - findcomic_chksplit = re.sub('[\&]', 'and', findcomic_chksplit) - findcomic_chksplit = re.sub('[\s]', '', findcomic_chksplit) - chg_comic = re.sub('[\-\:\,\.\?]', ' ', chg_comic) - chg_comic = re.sub('[\&]', 'and', chg_comic) - chg_comic = re.sub('[\s]', '', chg_comic) - logger.fdebug('chg_comic: ' + chg_comic.upper()) - logger.fdebug('findcomic_chksplit: ' + findcomic_chksplit.upper()) - if chg_comic.upper() in findcomic_chksplit.upper(): - logger.fdebug("series contains numerics...adjusting..") - else: - changeup = "." + splitit[(len(splitit)-1)] - logger.fdebug("changeup to decimal: " + str(changeup)) - comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss - splitst = len(splitit) - 2 - else: - #if the issue is alphanumeric (ie. 15AU, 12A) it'll error. - tmpiss = splitit[(len(splitit)-1)] - i = 0 - alphas = None - a_issno = None - while (i < len(tmpiss)): - if tmpiss[i].isalpha(): - #take first occurance of alpha in string and carry it through - alphas = tmpiss[i:].rstrip() - a_issno = tmpiss[:i].rstrip() - break - i+=1 - logger.fdebug("alphas: " + str(alphas)) - logger.fdebug("a_issno: " + str(a_issno)) - if alphas is None: - # if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb - logger.fdebug("invalid naming format of nzb detected - cannot properly determine issue") - continue - else: - if a_issno == '' and alphas is not None: - #print 'issno & alphas blank' - #print 'splitit: ' + splitit[(len(splitit)-2)] - #print 'splitit: ' + splitit[(len(splitit)-1)] - #if there' a space between the issue & alpha, join them. - findstart = thisentry.find(splitit[(len(splitit)-1)]) - #print 'thisentry : ' + thisentry - #print 'decimal location : ' + str(findstart) - if thisentry[findstart-1] == '.': - comic_iss = splitit[(len(splitit)-2)] + '.' + splitit[(len(splitit)-1)] - else: - comic_iss = splitit[(len(splitit)-2)] + splitit[(len(splitit)-1)] - logger.fdebug('comic_iss is : ' + str(comic_iss)) - splitst = len(splitit) - 2 - else: - comic_iss = tmpiss - splitst = len(splitit) - 1 - logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss)) - #bmm = re.findall('v\d', comic_iss) - #if len(bmm) > 0: splitst = len(splitit) - 2 - #else: splitst = len(splitit) - 1 - - # make sure that things like - in watchcomic are accounted for when comparing to nzb. - findcomic = re.sub('[\/]', ' ', findcomic) - watchcomic_split = helpers.cleanName(str(findcomic)) - if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]','and', watchcomic_split) - watchcomic_nonsplit = re.sub('[\-\:\,\.\?]', ' ', watchcomic_split) - watchcomic_split = watchcomic_nonsplit.split(None) - - logger.fdebug(str(splitit) + " nzb series word count: " + str(splitst)) - logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split))) - #account for possible version inclusion here and annual inclusions. - cvers = "false" - annualize = "false" - if 'annual' in ComicName.lower(): - logger.fdebug("IssueID of : " + str(IssueID) + " - This is an annual...let's adjust.") - annualize = "true" - #splitst = splitst - 1 - - for tstsplit in splitit: - if tstsplit.lower().startswith('v') and tstsplit[1:].isdigit(): - logger.fdebug("this has a version #...let's adjust") - if len(tstsplit[1:]) == 4: #v2013 - logger.fdebug("Version detected as " + str(tstsplit)) - vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v - elif len(tstsplit[1:]) == 1: #v2 - logger.fdebug("Version detected as " + str(tstsplit)) - vers4vol = str(tstsplit) - elif tstsplit[1:].isdigit() and len(tstsplit) < 4: - logger.fdebug('Version detected as ' +str(tstsplit)) - vers4vol = str(tstsplit) - else: - logger.fdebug("error - unknown length for : " + str(tstsplit)) - logger.fdebug("volume detection commencing - adjusting length.") - cvers = "true" - splitst = splitst - 1 - break - - #do an initial check - initialchk = 'ok' - if (splitst) != len(watchcomic_split): - logger.fdebug("incorrect comic lengths...not a match") - #because the word 'the' can appear anywhere and really mess up matches... -# if str(splitit[0]).lower() == "the" or str(watchcomic_split[0]).lower() == "the": -# if str(splitit[0]).lower() == "the": - for tstsplit in splitit: - if tstsplit.lower() == 'the': - logger.fdebug("THE word detected in found comic...attempting to adjust pattern matching") - #print comic_iss_b4 - #print comic_iss_b4[4:] - #splitit = comic_iss_b4[4:].split(None) - cissb4this = re.sub("\\bthe\\b", "", comic_iss_b4) - splitit = cissb4this.split(None) - splitst = splitst - 1 #remove 'the' from start - logger.fdebug("comic is now : " + str(splitit))#str(comic_iss[4:])) - #if str(watchcomic_split[0]).lower() == "the": - for tstsplit in watchcomic_split: - if tstsplit.lower() == 'the': - logger.fdebug("THE word detected in watchcomic - attempting to adjust match.") - #wtstart = watchcomic_nonsplit[4:] - #watchcomic_split = wtstart.split(None) - wtstart = re.sub("\\bthe\\b", "", watchcomic_nonsplit) - watchcomic_split = wtstart.split(None) - logger.fdebug("new watchcomic string:" + str(watchcomic_split)) - initialchk = 'no' - else: - initialchk = 'ok' - - logger.fdebug("splitst : " + str(splitst)) - logger.fdebug("len-watchcomic : " + str(len(watchcomic_split))) - if (splitst) != len(watchcomic_split) and initialchk == 'no': - logger.fdebug("incorrect comic lengths after removal...not a match.") - else: - logger.fdebug("length match..proceeding") - n = 0 - scount = 0 - logger.fdebug("search-length: " + str(splitst)) - logger.fdebug("Watchlist-length: " + str(len(watchcomic_split))) - if cvers == "true": splitst = splitst + 1 - while ( n <= (splitst)-1 ): - logger.fdebug("splitit: " + str(splitit[n])) - logger.fdebug("scount : " + str(scount)) - if n < (splitst) and n < len(watchcomic_split): - logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n])) - if '+' in watchcomic_split[n]: - watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n])) - if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]): - logger.fdebug("word matched on : " + str(splitit[n])) - scount+=1 - #elif ':' in splitit[n] or '-' in splitit[n]: - # splitrep = splitit[n].replace('-', '') - # print ("non-character keyword...skipped on " + splitit[n]) - elif str(splitit[n].lower()).startswith('v'): - logger.fdebug("possible versioning..checking") - #we hit a versioning # - account for it - if splitit[n][1:].isdigit(): - logger.fdebug("watch comicversion is " + str(ComicVersion)) - fndcomicversion = str(splitit[n]) - logger.fdebug("version found: " + str(fndcomicversion)) - logger.fdebug("vers4year: " + str(vers4year)) - logger.fdebug("vers4vol: " + str(vers4vol)) - if vers4year is not "no" or vers4vol is not "no": - - #if the volume is None, assume it's a V1 to increase % hits - if ComVersChk == 0: - D_ComicVersion = 1 - else: - D_ComicVersion = ComVersChk - - F_ComicVersion = re.sub("[^0-9]", "", fndcomicversion) - #if this is a one-off, SeriesYear will be None and cause errors. - if SeriesYear is None: - S_ComicVersion = 0 - else: - S_ComicVersion = str(SeriesYear) - logger.fdebug("FCVersion: " + str(F_ComicVersion)) - logger.fdebug("DCVersion: " + str(D_ComicVersion)) - logger.fdebug("SCVersion: " + str(S_ComicVersion)) - - #here's the catch, sometimes annuals get posted as the Pub Year - # instead of the Series they belong to (V2012 vs V2013) - if annualize == "true" and int(ComicYear) == int(F_ComicVersion): - logger.fdebug("We matched on versions for annuals " + str(fndcomicversion)) - scount+=1 - - elif int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion): - logger.fdebug("We matched on versions..." + str(fndcomicversion)) - scount+=1 - else: - logger.fdebug("Versions wrong. Ignoring possible match.") - scount = 0 - else: - logger.fdebug("Comic / Issue section") - if splitit[n].isdigit(): - logger.fdebug("issue detected") - #comiss = splitit[n] - comicNAMER = n - 1 - comNAME = splitit[0] - cmnam = 1 - while (cmnam <= comicNAMER): - comNAME = str(comNAME) + " " + str(splitit[cmnam]) - cmnam+=1 - logger.fdebug("comic: " + str(comNAME)) - else: - logger.fdebug("non-match for: "+ str(splitit[n])) - pass - n+=1 - #set the match threshold to 80% (for now) - # if it's less than 80% consider it a non-match and discard. - #splitit has to splitit-1 because last position is issue. - wordcnt = int(scount) - logger.fdebug("scount:" + str(wordcnt)) - totalcnt = int(splitst) - logger.fdebug("splitit-len:" + str(totalcnt)) - try: - spercent = (wordcnt/totalcnt) * 100 - except ZeroDivisionError: - spercent = 0 - logger.fdebug("we got " + str(spercent) + " percent.") - if int(spercent) >= 80: - logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!") - if int(spercent) < 80: - logger.fdebug("failure - we only got " + str(spercent) + "% right!") - continue - logger.fdebug("this should be a match!") - logger.fdebug("issue we are looking for is : " + str(findcomiciss)) - logger.fdebug("integer value of issue we are looking for : " + str(intIss)) - - fnd_iss_except = None - logger.fdebug("issue we found for is : " + str(comic_iss)) - comintIss = helpers.issuedigits(comic_iss) - logger.fdebug("integer value of issue we are found : " + str(comintIss)) - - #issue comparison now as well - if int(intIss) == int(comintIss): - #check if nzb is in do not download list ;) - if nzbprov == 'experimental': - #id is located after the /download/ portion - url_parts = urlparse.urlparse(entry['link']) - path_parts = url_parts[2].rpartition('/') - nzbtempid = path_parts[0].rpartition('/') - nzblen = len(nzbtempid) - nzbid = nzbtempid[nzblen-1] - elif nzbprov == 'CBT': - url_parts = urlparse.urlparse(entry['link']) - nzbtemp = url_parts[4] # get the query paramater string - nzbtemp = re.sub('torrent=', '', nzbtemp).rstrip() - nzbid = re.sub('.torrent', '', nzbtemp).rstrip() - elif nzbprov == 'KAT': - url_parts = urlparse.urlparse(entry['link']) - path_parts = url_parts[2].rpartition('/') - nzbtempid = path_parts[2] - nzbid = re.sub('.torrent', '', nzbtempid).rstrip() - elif nzbprov == 'nzb.su': - pass - elif nzbprov == 'dognzb': - pass - elif nzbprov == 'newznab': - #if in format of http://newznab/getnzb/.nzb&i=1&r=apikey - nzbid = os.path.splitext(entry['link'])[0].rsplit('/', 1)[1] - - logger.fdebug('issues match!') - logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(tmpprov) ) - ## -- inherit issue. Comic year is non-standard. nzb year is the year - ## -- comic was printed, not the start year of the comic series and - ## -- thus the deciding component if matches are correct or not - linkstart = os.path.splitext(entry['link'])[0] - #following is JUST for nzb.su - if nzbprov == 'nzb.su' or nzbprov == 'newznab': - linkit = os.path.splitext(entry['link'])[1] - if mylar.USE_SABNZBD: - linkit = linkit.replace("&", "%26") - logger.fdebug('new linkit:' + linkit) - linkapi = str(linkstart) + str(linkit) - else: - # this should work for every other provider - linkstart = linkstart.replace("&", "%26") - linkapi = str(linkstart) - logger.fdebug("link given by: " + str(nzbprov)) - #logger.fdebug("link: " + str(linkstart)) - #logger.fdebug("linkforapi: " + str(linkapi)) - #here we distinguish between rename and not. - #blackhole functinality--- - #let's download the file to a temporary cache. - sent_to = None - if mylar.USE_BLACKHOLE and nzbprov != 'CBT' and nzbprov != 'KAT': - logger.fdebug("using blackhole directory at : " + str(mylar.BLACKHOLE_DIR)) - if os.path.exists(mylar.BLACKHOLE_DIR): - #pretty this biatch up. - BComicName = re.sub('[\:\,\/\?]', '', str(ComicName)) - Bl_ComicName = re.sub('[\&]', 'and', str(BComicName)) - filenamenzb = str(re.sub(" ", ".", str(Bl_ComicName))) + "." + str(IssueNumber) + ".(" + str(comyear) + ").nzb" - # Add a user-agent - request = urllib2.Request(linkapi) #(str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) - request.add_header('User-Agent', str(mylar.USER_AGENT)) - try: - opener = helpers.urlretrieve(urllib2.urlopen(request), str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) - except Exception, e: - logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) - return - logger.fdebug("filename saved to your blackhole as : " + str(filenamenzb)) - logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) ) - extensions = ('.cbr', '.cbz') - - if filenamenzb.lower().endswith(extensions): - fd, ext = os.path.splitext(filenamenzb) - logger.fdebug("Removed extension from nzb: " + ext) - nzbname = re.sub(str(ext), '', str(filenamenzb)) - logger.fdebug("nzb name to be used for post-processing is : " + str(nzbname)) - sent_to = "your Blackhole Directory" - #end blackhole - elif nzbprov == 'CBT' or nzbprov == 'KAT': - logger.fdebug("sending .torrent to watchdir.") - logger.fdebug("ComicName:" + ComicName) - logger.fdebug("link:" + entry['link']) - logger.fdebug("Torrent Provider:" + nzbprov) - foundc = "yes" - - #let's change all space to decimals for simplicity - nzbname = re.sub(" ", ".", str(entry['title'])) - #gotta replace & or escape it - nzbname = re.sub("\&", 'and', str(nzbname)) - nzbname = re.sub('[\,\:\?]', '', str(nzbname)) - if nzbname.lower().endswith('.torrent'): - nzbname = re.sub('.torrent', '', nzbname) - rcheck = rsscheck.torsend2client(ComicName, IssueNumber, comyear, entry['link'], nzbprov) - if rcheck == "fail": - logger.error("Unable to send torrent - check logs and settings.") - return - if mylar.TORRENT_LOCAL: - sent_to = "your local Watch folder" - else: - sent_to = "your seedbox Watch folder" - else: - tmppath = mylar.CACHE_DIR - if os.path.exists(tmppath): - logger.fdebug("cache directory successfully found at : " + str(tmppath)) - pass - else: - #let's make the dir. - logger.fdebug("couldn't locate cache directory, attempting to create at : " + str(mylar.CACHE_DIR)) - try: - os.makedirs(str(mylar.CACHE_DIR)) - logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) - - except OSError.e: - if e.errno != errno.EEXIST: - raise - logger.fdebug("link to retrieve via api:" + str(helpers.apiremove(linkapi,'$'))) - - #let's change all space to decimals for simplicity - nzbname = re.sub(" ", ".", str(entry['title'])) - #gotta replace & or escape it - nzbname = re.sub("\&", 'and', str(nzbname)) - nzbname = re.sub('[\,\:\?]', '', str(nzbname)) - extensions = ('.cbr', '.cbz') - - if nzbname.lower().endswith(extensions): - fd, ext = os.path.splitext(nzbname) - logger.fdebug("Removed extension from nzb: " + ext) - nzbname = re.sub(str(ext), '', str(nzbname)) - - logger.fdebug("nzbname used for post-processing:" + str(nzbname)) - -# #test nzb.get - if mylar.USE_NZBGET: - from xmlrpclib import ServerProxy - if mylar.NZBGET_HOST[:4] == 'http': - tmpapi = "http://" - nzbget_host = mylar.NZBGET_HOST[7:] - elif mylar.NZBGET_HOST[:5] == 'https': - tmpapi = "https://" - nzbget_host = mylar.NZBGET_HOST[8:] - else: - logger.error("You have an invalid nzbget hostname specified. Exiting") - return - tmpapi = str(tmpapi) + str(mylar.NZBGET_USERNAME) + ":" + str(mylar.NZBGET_PASSWORD) - tmpapi = str(tmpapi) + "@" + str(nzbget_host) + ":" + str(mylar.NZBGET_PORT) + "/xmlrpc" - server = ServerProxy(tmpapi) - send_to_nzbget = server.appendurl(nzbname + ".nzb", str(mylar.NZBGET_CATEGORY), int(nzbgetpriority), True, linkapi) - sent_to = "NZBGet" - if send_to_nzbget is True: - logger.info("Successfully sent nzb to NZBGet!") - else: - logger.info("Unable to send nzb to NZBGet - check your configs.") -# #end nzb.get test - - elif mylar.USE_SABNZBD: - # let's build the send-to-SAB string now: - tmpapi = str(mylar.SAB_HOST) - logger.fdebug("send-to-SAB host string: " + str(tmpapi)) - # changed to just work with direct links now... - SABtype = "/api?mode=addurl&name=" - fileURL = str(linkapi) - tmpapi = tmpapi + str(SABtype) - logger.fdebug("...selecting API type: " + str(tmpapi)) - tmpapi = tmpapi + str(fileURL) - - logger.fdebug("...attaching nzb provider link: " + str(helpers.apiremove(tmpapi,'$'))) - # determine SAB priority - if mylar.SAB_PRIORITY: - tmpapi = tmpapi + "&priority=" + str(sabpriority) - logger.fdebug("...setting priority: " + str(helpers.apiremove(tmpapi,'&'))) - # if category is blank, let's adjust - if mylar.SAB_CATEGORY: - tmpapi = tmpapi + "&cat=" + str(mylar.SAB_CATEGORY) - logger.fdebug("...attaching category: " + str(helpers.apiremove(tmpapi,'&'))) - if mylar.RENAME_FILES or mylar.POST_PROCESSING: - tmpapi = tmpapi + "&script=ComicRN.py" - logger.fdebug("...attaching rename script: " + str(helpers.apiremove(tmpapi,'&'))) - #final build of send-to-SAB - tmpapi = tmpapi + "&apikey=" + str(mylar.SAB_APIKEY) - - logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi,'&'))) - - try: - urllib2.urlopen(tmpapi) - except urllib2.URLError: - logger.error(u"Unable to send nzb file to SABnzbd") - return - - sent_to = "SABnzbd+" - logger.info(u"Successfully sent nzb file to SABnzbd") - - if annualize == True: - modcomicname = ComicName + ' Annual' - else: - modcomicname = ComicName - if mylar.PROWL_ENABLED and mylar.PROWL_ONSNATCH: - logger.info(u"Sending Prowl notification") - prowl = notifiers.PROWL() - prowl.notify(nzbname,"Download started using " + sent_to) - if mylar.NMA_ENABLED and mylar.NMA_ONSNATCH: - logger.info(u"Sending NMA notification") - nma = notifiers.NMA() - snline = modcomicname + ' (' + comyear + ') - Issue #' + IssueNumber + ' snatched!' - nma.notify(snline=snline,snatched_nzb=nzbname,sent_to=sent_to,prov=nzbprov) - if mylar.PUSHOVER_ENABLED and mylar.PUSHOVER_ONSNATCH: - logger.info(u"Sending Pushover notification") - pushover = notifiers.PUSHOVER() - pushover.notify(nzbname,"Download started using " + sent_to) - if mylar.BOXCAR_ENABLED and mylar.BOXCAR_ONSNATCH: - logger.info(u"Sending Boxcar notification") - boxcar = notifiers.BOXCAR() - boxcar.notify(snatched_nzb=nzbname,sent_to=sent_to) - if mylar.PUSHBULLET_ENABLED and mylar.PUSHBULLET_ONSNATCH: - logger.info(u"Sending Pushbullet notification") - pushbullet = notifiers.PUSHBULLET() - snline = modcomicname + ' (' + comyear + ') - Issue #' + IssueNumber + ' snatched!' - pushbullet.notify(snline=snline,snatched=nzbname,sent_to=sent_to,prov=nzbprov) - - foundc = "yes" - done = True - break - else: - log2file = log2file + "issues don't match.." + "\n" - foundc = "no" - if done == True: - cmloopit == 1 #let's make sure it STOPS searching after a sucessful match. - break - cmloopit-=1 - if cmloopit < 1 and c_alpha is not None and seperatealpha == "no" and foundc == "no": - logger.info("Alphanumerics detected within IssueNumber. Seperating from Issue # and re-trying.") - cmloopit = origcmloopit - seperatealpha = "yes" - findloop+=1 - if foundc == "yes": - foundcomic.append("yes") - logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname)) - updater.nzblog(IssueID, nzbname, ComicName, SARC, IssueArcID) - prov_count == 0 - #break - return foundc - elif foundc == "no" and prov_count == 0: - foundcomic.append("no") - #logger.fdebug('Could not find a matching comic using ' + str(tmpprov)) - if IssDateFix == "no": - #logger.info('Could not find Issue ' + str(IssueNumber) + ' of ' + ComicName + '(' + str(comyear) + ') using ' + str(tmpprov) + '. Status kept as wanted.' ) - break - return foundc - -def searchforissue(issueid=None, new=False, rsscheck=None): - myDB = db.DBConnection() - - if not issueid or rsscheck: - - if rsscheck: - logger.info(u"Initiating RSS Search Scan at scheduled interval of " + str(mylar.RSS_CHECKINTERVAL) + " minutes.") - else: - logger.info(u"Initiating NZB Search scan at requested interval of " + str(mylar.SEARCH_INTERVAL) + " minutes.") - - myDB = db.DBConnection() - - stloop = 1 - results = [] - - if mylar.ANNUALS_ON: - stloop+=1 - while (stloop > 0): - if stloop == 1: - issues_1 = myDB.select('SELECT * from issues WHERE Status="Wanted"') - for iss in issues_1: - results.append({'ComicID': iss['ComicID'], - 'IssueID': iss['IssueID'], - 'Issue_Number': iss['Issue_Number'], - 'IssueDate': iss['IssueDate'], - 'StoreDate': iss['ReleaseDate'], - 'mode': 'want' - }) - elif stloop == 2: - issues_2 = myDB.select('SELECT * from annuals WHERE Status="Wanted"') - for iss in issues_2: - results.append({'ComicID': iss['ComicID'], - 'IssueID': iss['IssueID'], - 'Issue_Number': iss['Issue_Number'], - 'IssueDate': iss['IssueDate'], - 'StoreDate': iss['ReleaseDate'], #need to replace with Store date - 'mode': 'want_ann' - }) - stloop-=1 - - new = True - - for result in results: - comic = myDB.selectone("SELECT * from comics WHERE ComicID=? AND ComicName != 'None'", [result['ComicID']]).fetchone() - if comic is None: - logger.fdebug(str(result['ComicID']) + ' has no associated comic information. Skipping searching for this series.') - continue - foundNZB = "none" - SeriesYear = comic['ComicYear'] - Publisher = comic['ComicPublisher'] - AlternateSearch = comic['AlternateSearch'] - IssueDate = result['IssueDate'] - StoreDate = result['StoreDate'] - UseFuzzy = comic['UseFuzzy'] - ComicVersion = comic['ComicVersion'] - if result['IssueDate'] == None: - ComicYear = comic['ComicYear'] - else: - ComicYear = str(result['IssueDate'])[:4] - mode = result['mode'] - if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE): - foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID']) - if foundNZB == "yes": - #print ("found!") - updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov) - else: - pass - #print ("not found!") - - if rsscheck: - logger.info('Completed RSS Search scan') - else: - logger.info('Completed NZB Search scan') - - - else: - result = myDB.selectone('SELECT * FROM issues where IssueID=?', [issueid]).fetchone() - mode = 'want' - if result is None: - result = myDB.selectone('SELECT * FROM annuals where IssueID=?', [issueid]).fetchone() - mode = 'want_ann' - if result is None: - logger.info("Unable to locate IssueID - you probably should delete/refresh the series.") - return - ComicID = result['ComicID'] - comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() - SeriesYear = comic['ComicYear'] - Publisher = comic['ComicPublisher'] - AlternateSearch = comic['AlternateSearch'] - IssueDate = result['IssueDate'] - StoreDate = result['ReleaseDate'] - UseFuzzy = comic['UseFuzzy'] - ComicVersion = comic['ComicVersion'] - if result['IssueDate'] == None: - IssueYear = comic['ComicYear'] - else: - IssueYear = str(result['IssueDate'])[:4] - - foundNZB = "none" - if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE): - foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID']) - if foundNZB == "yes": - logger.fdebug("I found " + comic['ComicName'] + ' #:' + str(result['Issue_Number'])) - updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'], mode=mode, provider=prov) - else: - pass - #print ("not found!") - return - -def searchIssueIDList(issuelist): - myDB = db.DBConnection() - for issueid in issuelist: - issue = myDB.selectone('SELECT * from issues WHERE IssueID=?', [issueid]).fetchone() - mode = 'want' - if issue is None: - issue = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issueid]).fetchone() - mode = 'want_ann' - if issue is None: - logger.info("unable to determine IssueID - perhaps you need to delete/refresh series?") - break - comic = myDB.selectone('SELECT * from comics WHERE ComicID=?', [issue['ComicID']]).fetchone() - print ("Checking for issue: " + str(issue['Issue_Number'])) - foundNZB = "none" - SeriesYear = comic['ComicYear'] - AlternateSearch = comic['AlternateSearch'] - Publisher = comic['ComicPublisher'] - UseFuzzy = comic['UseFuzzy'] - ComicVersion = comic['ComicVersion'] - if issue['IssueDate'] == None: - IssueYear = comic['ComicYear'] - else: - IssueYear = str(issue['IssueDate'])[:4] - if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_CBT or mylar.ENABLE_KAT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE): - foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID']) - if foundNZB == "yes": - #print ("found!") - updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov) - else: - pass - #print ("not found!") - - - -def provider_sequence(nzbprovider, torprovider, newznab_hosts): - #provider order sequencing here. - newznab_info = [] - prov_order = [] - - nzbproviders_lower = [x.lower() for x in nzbprovider] - - if len(mylar.PROVIDER_ORDER) > 0: - for pr_order in mylar.PROVIDER_ORDER: - logger.fdebug('looking for ' + str(pr_order[1]).lower()) - logger.fdebug('nzbproviders ' + str(nzbproviders_lower)) - logger.fdebug('torproviders ' + str(torprovider)) - if (pr_order[1].lower() in torprovider) or any(pr_order[1].lower() in x for x in nzbproviders_lower): - logger.fdebug('found provider in existing enabled providers.') - if any(pr_order[1].lower() in x for x in nzbproviders_lower): - # this is for nzb providers - for np in nzbprovider: - logger.fdebug('checking against nzb provider: ' + str(np)) - if all( [ 'newznab' in np, pr_order[1].lower() in np.lower() ] ): - logger.fdebug('newznab match against: ' + str(np)) - for newznab_host in newznab_hosts: - logger.fdebug('comparing ' + str(pr_order[1]).lower() + ' against: ' + str(newznab_host[0]).lower()) - if newznab_host[0].lower() == pr_order[1].lower(): - logger.fdebug('sucessfully matched - appending to provider.order sequence') - prov_order.append(np) #newznab_host) - newznab_info.append({"provider": np, - "info": newznab_host}) - break - elif pr_order[1].lower() in np.lower(): - prov_order.append(pr_order[1]) - break - else: - for tp in torprovider: - logger.fdebug('checking against torrent provider: ' + str(tp)) - if (pr_order[1].lower() in tp.lower()): - logger.fdebug('torrent match against: ' + str(tp)) - prov_order.append(tp) #torrent provider - break - - logger.fdebug('sequence is now to start with ' + pr_order[1] + ' at spot #' + str(pr_order[0])) - - return prov_order,newznab_info +# This file is part of Mylar. +# +# Mylar is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Mylar is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Mylar. If not, see . + +from __future__ import division + +import mylar +from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck + +import lib.feedparser as feedparser +import urllib +import os, errno +import string +import sqlite3 as lite +import sys +import getopt +import re +import time +import urlparse +from xml.dom.minidom import parseString +import urllib2 +import email.utils +import datetime +from wsgiref.handlers import format_date_time + +def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None): + if ComicYear == None: ComicYear = '2014' + else: ComicYear = str(ComicYear)[:4] + if Publisher == 'IDW Publishing': Publisher = 'IDW' + logger.fdebug('Publisher is : ' + str(Publisher)) + if mode == 'want_ann': + logger.info("Annual issue search detected. Appending to issue #") + #anything for mode other than None indicates an annual. + ComicName = ComicName + " annual" + if AlternateSearch is not None and AlternateSearch != "None": + AlternateSearch = AlternateSearch + " annual" + + if IssueID is None: + #one-off the download. + print ("ComicName: " + ComicName) + print ("Issue: " + str(IssueNumber)) + print ("Year: " + str(ComicYear)) + print ("IssueDate:" + str(IssueDate)) + if SARC: + print ("Story-ARC issue!") + print ("Story-ARC: " + str(SARC)) + print ("IssueArcID: " + str(IssueArcID)) + + torprovider = [] + torp = 0 + logger.fdebug("Checking for torrent enabled.") + if mylar.ENABLE_TORRENT_SEARCH: #and mylar.ENABLE_TORRENTS: + if mylar.ENABLE_CBT: + torprovider.append('cbt') + torp+=1 + #print torprovider[0] + if mylar.ENABLE_KAT: + torprovider.append('kat') + torp+=1 + ##nzb provider selection## + ##'dognzb' or 'nzb.su' or 'experimental' + nzbprovider = [] + nzbp = 0 + if mylar.NZBSU == 1: + nzbprovider.append('nzb.su') + nzbp+=1 + if mylar.DOGNZB == 1: + nzbprovider.append('dognzb') + nzbp+=1 + # -------- + # Xperimental + if mylar.EXPERIMENTAL == 1: + nzbprovider.append('experimental') + nzbp+=1 + + newznabs = 0 + + newznab_hosts = [] + + if mylar.NEWZNAB == 1: + #if len(mylar.EXTRA_NEWZNABS > 0): + for newznab_host in mylar.EXTRA_NEWZNABS: + if newznab_host[4] == '1' or newznab_host[4] == 1: + newznab_hosts.append(newznab_host) + #if newznab_host[0] == newznab_host[1]: + # nzbprovider.append('newznab') + #else: + nzbprovider.append('newznab:' + str(newznab_host[0])) + newznabs+=1 + logger.fdebug("newznab name:" + str(newznab_host[0]) + " @ " + str(newznab_host[1])) + + logger.fdebug('newznab hosts: ' + str(newznab_hosts)) + logger.fdebug('nzbprovider: ' + str(nzbprovider)) + # -------- + logger.fdebug("there are : " + str(torp) + " torrent providers you have selected.") + torpr = torp - 1 + if torpr < 0: + torpr = -1 + providercount = int(nzbp + newznabs) + logger.fdebug("there are : " + str(providercount) + " nzb providers you have selected.") + logger.fdebug("Usenet Retention : " + str(mylar.USENET_RETENTION) + " days") + #nzbpr = providercount - 1 + #if nzbpr < 0: + # nzbpr == 0 + findit = 'no' + + totalproviders = providercount + torp + + if totalproviders == 0: + logger.error('[WARNING] You have ' + str(totalproviders) + ' search providers enabled. I need at least ONE provider to work. Aborting search.') + findit = "no" + nzbprov = None + return findit, nzbprov + + prov_order,newznab_info = provider_sequence(nzbprovider,torprovider,newznab_hosts) + # end provider order sequencing + logger.info('search provider order is ' + str(prov_order)) + + #fix for issue dates between Nov-Dec/(Jan-Feb-Mar) + IssDt = str(IssueDate)[5:7] + if IssDt == "12" or IssDt == "11" or IssDt == "01" or IssDt == "02" or IssDt == "03": + IssDateFix = IssDt + else: + IssDateFix = "no" + + searchcnt = 0 + i = 1 + + if rsscheck: + if mylar.ENABLE_RSS: + searchcnt = 1 # rss-only + else: + searchcnt = 0 # if it's not enabled, don't even bother. + else: + if mylar.ENABLE_RSS: + searchcnt = 2 # rss first, then api on non-matches + else: + searchcnt = 2 #set the searchcnt to 2 (api) + i = 2 #start the counter at api, so it will exit without running RSS + + while ( i <= searchcnt ): + #searchmodes: + # rss - will run through the built-cached db of entries + # api - will run through the providers via api (or non-api in the case of Experimental) + # the trick is if the search is done during an rss compare, it needs to exit when done. + # otherwise, the order of operations is rss feed check first, followed by api on non-results. + + if i == 1: searchmode = 'rss' #order of ops - this will be used first. + elif i == 2: searchmode = 'api' + + if findit == 'yes': + logger.fdebug('Found result on first run, exiting search module now.') + break + + logger.fdebug("Initiating Search via : " + str(searchmode)) + + #torprtmp = 0 # torprtmp = torpr + prov_count = 0 + + while (prov_count <= len(prov_order)-1): + #while (torprtmp <= torpr): #(torprtmp >=0 ): + newznab_host = None + if prov_order[prov_count] == 'cbt': + searchprov = 'CBT' + elif prov_order[prov_count] == 'kat': + searchprov = 'KAT' + elif 'newznab' in prov_order[prov_count]: + #this is for newznab + searchprov = 'newznab' + for nninfo in newznab_info: + if nninfo['provider'] == prov_order[prov_count]: + newznab_host = nninfo['info'] + if newznab_host is None: + logger.fdebug('there was an error - newznab information was blank and it should not be.') + else: + newznab_host = None + searchprov = prov_order[prov_count].lower() + + if searchmode == 'rss': + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID) + if findit == 'yes': + logger.fdebug("findit = found!") + break + else: + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + loopit = len(chkthealt) + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID) + if findit == 'yes': + break + if findit == 'yes': break + + else: + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID) + if findit == 'yes': + logger.fdebug("findit = found!") + break + else: + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + loopit = len(chkthealt) + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID) + if findit == 'yes': + break + if findit == 'yes': break + + if searchprov == 'newznab': + searchprov = newznab_host[0].rstrip() + logger.info('Could not find Issue ' + str(IssueNumber) + ' of ' + ComicName + '(' + str(SeriesYear) + ') using ' + str(searchprov)) + prov_count+=1 + #torprtmp+=1 #torprtmp-=1 + + if findit == 'yes': + return findit, searchprov + else: + logger.fdebug("Finished searching via : " + str(searchmode)) + i+=1 + + if findit == 'no': + logger.info('Issue not found. Status kept as Wanted.') + + return findit, 'None' + +def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None): + + if nzbprov == 'nzb.su': + apikey = mylar.NZBSU_APIKEY + elif nzbprov == 'dognzb': + apikey = mylar.DOGNZB_APIKEY + elif nzbprov == 'experimental': + apikey = 'none' + elif nzbprov == 'newznab': + #updated to include Newznab Name now + name_newznab = newznab_host[0].rstrip() + host_newznab = newznab_host[1].rstrip() + apikey = newznab_host[2].rstrip() + if '#' in newznab_host[3].rstrip(): + catstart = newznab_host[3].find('#') + category_newznab = newznab_host[3][catstart+1:] + logger.fdebug('non-default Newznab category set to :' + str(category_newznab)) + else: + category_newznab = '7030' + logger.fdebug("using Newznab host of : " + str(name_newznab)) + + if RSS == "yes": + if 'newznab' in nzbprov: + tmpprov = name_newznab + '(' + nzbprov + ')' + ' [RSS]' + else: + tmpprov = str(nzbprov) + " [RSS]" + else: + if 'newznab' in nzbprov: + tmpprov = name_newznab + ' (' + nzbprov + ')' + else: + tmpprov = nzbprov + logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " (" + str(ComicYear) + ") using " + str(tmpprov)) + + #load in do not download db here for given series + #myDB = db.DBConnection() + #nodown = myDB.action('SELECT * FROM nzblog') + + if mylar.PREFERRED_QUALITY == 0: filetype = "" + elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr" + elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz" + + if mylar.SAB_PRIORITY: + if mylar.SAB_PRIORITY == "Default": sabpriority = "-100" + elif mylar.SAB_PRIORITY == "Low": sabpriority = "-1" + elif mylar.SAB_PRIORITY == "Normal": sabpriority = "0" + elif mylar.SAB_PRIORITY == "High": sabpriority = "1" + elif mylar.SAB_PRIORITY == "Paused": sabpriority = "-2" + else: + #if sab priority isn't selected, default to Normal (0) + sabpriority = "0" + + if mylar.NZBGET_PRIORITY: + if mylar.NZBGET_PRIORITY == "Default": nzbgetpriority = "0" + elif mylar.NZBGET_PRIORITY == "Low": nzbgetpriority = "-50" + elif mylar.NZBGET_PRIORITY == "Normal": nzbgetpriority = "0" + elif mylar.NZBGET_PRIORITY == "High": nzbgetpriority = "50" + #there's no priority for "paused", so set "Very Low" and deal with that later... + elif mylar.NZBGET_PRIORITY == "Paused": nzbgetpriority = "-100" + else: + #if sab priority isn't selected, default to Normal (0) + nzbgetpriority = "0" + + #UseFuzzy == 0: Normal + #UseFuzzy == 1: Remove Year + #UseFuzzy == 2: Fuzzy Year + # figure out what was missed via rss feeds and do a manual search via api + #tsc = int(tot-1) + +# findcomic = [] +# findcomiciss = [] +# findcount = 0 + ci = "" + comsearch = [] + isssearch = [] + comyear = str(ComicYear) + + #print ("-------SEARCH FOR MISSING------------------") + #ComicName is unicode - let's unicode and ascii it cause we'll be comparing filenames against it. + u_ComicName = ComicName.encode('ascii', 'ignore').strip() + findcomic = u_ComicName + # this should be called elsewhere..redudant code. + +# elif 'au' in IssueNumber.lower(): +# iss = re.sub("[^0-9]", "", IssueNumber) # get just the digits +# intIss = int(iss) * 1000 +# issue_except = 'AU' # if it contains AU, mark it as an exception (future dict possibly) +# elif 'ai' in IssueNumber.lower(): +# iss = re.sub("[^0-9]", "", IssueNumber) # get just the digits +# intIss = int(iss) * 1000 +# issue_except = 'AI' # if it contains AI, mark it as an exception (future dict possibly) +# else: +# iss = IssueNumber +# intIss = int(iss) * 1000 +# #issue_decimal = re.compile(r'[^\d.]+') +# #issue = issue_decimal.sub('', str(IssueNumber)) + #NEW --- + intIss = helpers.issuedigits(IssueNumber) + iss = IssueNumber + findcomiciss = iss + + #print ("we need : " + str(findcomic[findcount]) + " issue: #" + str(findcomiciss[findcount])) + cm1 = re.sub("[\/]", " ", findcomic) + # replace whitespace in comic name with %20 for api search + #cm = re.sub("\&", "%26", str(cm1)) + cm = re.sub("\\band\\b", "", cm1.lower()) # remove 'and' & '&' from the search pattern entirely (broader results, will filter out later) + cm = re.sub("\\bthe\\b", "", cm.lower()) # remove 'the' from the search pattern to accomodate naming differences + cm = re.sub(" ", "%20", str(cm)) + cm = re.sub("[\&\:\?\,]", "", str(cm)) + + #determine the amount of loops here + i = 0 + c_alpha = None + dsp_c_alpha = None + c_number = None + c_num_a4 = None + while i < len(findcomiciss): + #take first occurance of alpha in string and carry it through + if findcomiciss[i].isalpha(): + c_alpha = findcomiciss[i:].rstrip() + c_number = findcomiciss[:i].rstrip() + break + elif '.' in findcomiciss[i]: + c_number = findcomiciss[:i].rstrip() + c_num_a4 = findcomiciss[i+1:].rstrip() + #if decimal seperates numeric from alpha (ie - 7.INH) + #don't give calpha a value or else will seperate with a space further down + #assign it to dsp_c_alpha so that it can be displayed for debugging. + if not c_num_a4.isdigit(): + dsp_c_alpha = c_num_a4 + else: + c_number = str(c_number) + '.' + str(c_num_a4) + break + i+=1 + logger.fdebug("calpha/cnumber: " + str(dsp_c_alpha) + " / " + str(c_number)) + + if c_number is None: + c_number = findcomiciss # if it's None, means no special alphas or decimals + + if len(c_number) == 1: + cmloopit = 3 + elif len(c_number) == 2: + cmloopit = 2 + else: + cmloopit = 1 + + isssearch = str(findcomiciss) + comsearch = cm + origcmloopit = cmloopit + findcount = 1 # this could be a loop in the future possibly + + # ---- + + #print ("------RESULTS OF SEARCH-------------------") + findloop = 0 + foundcomic = [] + done = False + seperatealpha = "no" + #---issue problem + # if issue is '011' instead of '11' in nzb search results, will not have same + # results. '011' will return different than '11', as will '009' and '09'. + + while (findloop < findcount ): + comsrc = comsearch + while (cmloopit >= 1 ): + #if issue_except is None: issue_exc = '' + #else: issue_exc = issue_except + if done is True and seperatealpha == "no": + logger.fdebug("we should break out now - sucessful search previous") + findloop == 99 + break + # here we account for issue pattern variations + if seperatealpha == "yes": + isssearch = str(c_number) + "%20" + str(c_alpha) + + if cmloopit == 3: + comsearch = comsrc + "%2000" + str(isssearch) + "%20" + str(filetype) + issdig = '00' + elif cmloopit == 2: + comsearch = comsrc + "%200" + str(isssearch) + "%20" + str(filetype) + issdig = '0' + elif cmloopit == 1: + comsearch = comsrc + "%20" + str(isssearch) + "%20" + str(filetype) + issdig = '' + + mod_isssearch = str(issdig) + str(isssearch) + + #--- this is basically for RSS Feeds --- + logger.fdebug('RSS Check: ' + str(RSS)) + logger.fdebug('nzbprov: ' + str(nzbprov)) + logger.fdebug('comicid: ' + str(ComicID)) + if RSS == "yes" or nzbprov == 'CBT': + if nzbprov == 'CBT' or nzbprov == 'KAT': + cmname = re.sub("%20", " ", str(comsrc)) + logger.fdebug("Sending request to [" + str(nzbprov) + "] RSS for " + str(findcomic) + " : " + str(mod_isssearch)) + bb = rsscheck.torrentdbsearch(findcomic,mod_isssearch,ComicID,nzbprov) + rss = "yes" + if bb is not None: logger.fdebug("bb results: " + str(bb)) + else: + cmname = re.sub("%20", " ", str(comsrc)) + logger.fdebug("Sending request to RSS for " + str(findcomic) + " : " + str(mod_isssearch) + " (" + str(ComicYear) + ")") + bb = rsscheck.nzbdbsearch(findcomic,mod_isssearch,ComicID,nzbprov,ComicYear,ComicVersion) + rss = "yes" + if bb is not None: logger.fdebug("bb results: " + str(bb)) + #this is the API calls + else: + #CBT is redudant now since only RSS works + # - just getting it ready for when it's not redudant :) + if nzbprov == 'CBT': + # cmname = re.sub("%20", " ", str(comsrc)) + # logger.fdebug("Sending request to [CBT] RSS for " + str(cmname) + " : " + str(mod_isssearch)) + # bb = rsscheck.torrentdbsearch(cmname,mod_isssearch,ComicID) + # rss = "yes" + # if bb is not None: logger.fdebug("results: " + str(bb)) + bb = "no results" + elif nzbprov == 'KAT': + cmname = re.sub("%20", " ", str(comsrc)) + logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch)) + bb = rsscheck.torrents(pickfeed='KAT',seriesname=cmname,issue=mod_isssearch) + rss = "no" + #if bb is not None: logger.fdebug("results: " + str(bb)) + elif nzbprov != 'experimental': + if nzbprov == 'dognzb': + findurl = "https://api.dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030" + elif nzbprov == 'nzb.su': + findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030" + elif nzbprov == 'newznab': + #let's make sure the host has a '/' at the end, if not add it. + if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/': + host_newznab_fix = str(host_newznab) + "/" + else: host_newznab_fix = host_newznab + findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch) + "&o=xml&cat=" + str(category_newznab) + if nzbprov != 'nzbx': + # helper function to replace apikey here so we avoid logging it ;) + findurl = findurl + "&apikey=" + str(apikey) + logsearch = helpers.apiremove(str(findurl),'nzb') + logger.fdebug("search-url: " + str(logsearch)) + + ### IF USENET_RETENTION is set, honour it + ### For newznab sites, that means appending "&maxage=" on the URL + if mylar.USENET_RETENTION != None: + findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION) + + # Add a user-agent + #print ("user-agent:" + str(mylar.USER_AGENT)) + request = urllib2.Request(findurl) + request.add_header('User-Agent', str(mylar.USER_AGENT)) + opener = urllib2.build_opener() + + #set a delay between searches here. Default is for 30 seconds... + if mylar.SEARCH_DELAY == 'None' or mylar.SEARCH_DELAY is None: + pause_the_search = 1 * 60 # (it's in seconds) + elif str(mylar.SEARCH_DELAY).isdigit(): + pause_the_search = mylar.SEARCH_DELAY * 60 + else: + logger.info("Check Search Delay - invalid numerical given. Force-setting to 1 minute.") + pause_the_search = 1 * 60 + + #bypass for local newznabs + #remove the protocol string (http/https) + localbypass = False + if nzbprov == 'newznab': + if host_newznab_fix.startswith('http'): + hnc = host_newznab_fix.replace('http://', '') + elif host_newznab_fix.startswith('https'): + hnc = host_newznab_fix.replace('https://', '') + else: + hnc = host_newznab_fix + + if hnc[:3] == '10.' or hnc[:4] == '172.' or hnc[:4] == '192.' or hnc.startswith('localhost'): + localbypass = True + + if localbypass == False: + logger.info("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering") + time.sleep(pause_the_search) + + try: + data = opener.open(request).read() + except Exception, e: + logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) + data = False + #logger.info('data: ' + data) + if data: + bb = feedparser.parse(data) + else: + bb = "no results" + #logger.info('Search results:' + str(bb)) + try: + if bb['feed']['error']: + logger.error('[ERROR CODE: ' + str(bb['feed']['error']['code']) + '] ' + str(bb['feed']['error']['description'])) + bb = "no results" + except: + #logger.info('no errors on data retrieval...proceeding') + pass + elif nzbprov == 'experimental': + #bb = parseit.MysterBinScrape(comsearch[findloop], comyear) + bb = findcomicfeed.Startit(u_ComicName, isssearch, comyear, ComicVersion, IssDateFix) + # since the regexs in findcomicfeed do the 3 loops, lets force the exit after + #cmloopit == 1 + + done = False + foundc = "no" + log2file = "" + if bb == "no results": + pass + foundc = "no" + else: + for entry in bb['entries']: + logger.fdebug("checking search result: " + entry['title']) + if nzbprov != "experimental" and nzbprov != "dognzb": + if RSS == "yes": + comsize_b = entry['length'] + else: + #Experimental already has size constraints done. + if nzbprov == 'CBT': + comsize_b = entry['length'] + elif nzbprov == 'KAT': + comsize_b = entry['size'] + else: + tmpsz = entry.enclosures[0] + comsize_b = tmpsz['length'] + if comsize_b is None: + logger.fdebug('Size of file cannot be retrieved. Ignoring size-comparison and continuing.') + #comsize_b = 0 + else: + comsize_m = helpers.human_size(comsize_b) + logger.fdebug("size given as: " + str(comsize_m)) + #----size constraints. + #if it's not within size constaints - dump it now and save some time. + if mylar.USE_MINSIZE: + conv_minsize = helpers.human2bytes(mylar.MINSIZE + "M") + logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b)) + if int(conv_minsize) > int(comsize_b): + logger.fdebug("Failure to meet the Minimum size threshold - skipping") + continue + if mylar.USE_MAXSIZE: + conv_maxsize = helpers.human2bytes(mylar.MAXSIZE + "M") + logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b)) + if int(comsize_b) > int(conv_maxsize): + logger.fdebug("Failure to meet the Maximium size threshold - skipping") + continue + +#---- date constaints. + # if the posting date is prior to the publication date, dump it and save the time. + #logger.info('entry' + str(entry)) + if nzbprov == 'experimental' or nzbprov =='CBT': + pubdate = entry['pubdate'] + else: + try: + pubdate = entry['updated'] + except: + try: + pubdate = entry['pubdate'] + except: + logger.fdebug('invalid date found. Unable to continue - skipping result.') + continue + #use store date instead of publication date for comparisons since publication date is usually +2 months + if StoreDate is None or StoreDate == '0000-00-00': + if IssueDate is None or IssueDate == '0000-00-00': + logger.fdebug('Invalid store date & issue date detected - you probably should refresh the series or wait for CV to correct the data') + continue + else: + stdate = IssueDate + else: + stdate = StoreDate + #logger.fdebug('Posting date of : ' + str(pubdate)) + # convert it to a tuple + dateconv = email.utils.parsedate_tz(pubdate) + #logger.fdebug('dateconv of : ' + str(dateconv)) + # convert it to a numeric time, then subtract the timezone difference (+/- GMT) + if dateconv[-1] is not None: + postdate_int = time.mktime(dateconv[:len(dateconv)-1]) - dateconv[-1] + else: + postdate_int = time.mktime(dateconv[:len(dateconv)-1]) + #logger.fdebug('postdate_int of : ' + str(postdate_int)) + #logger.fdebug('Issue date of : ' + str(stdate)) + #convert it to a Thu, 06 Feb 2014 00:00:00 format + issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d') + #logger.fdebug('issue_convert:' + str(issue_convert)) + #issconv = issue_convert.strftime('%a, %d %b %Y %H:%M:%S') + # to get past different locale's os-dependent dates, let's convert it to a generic datetime format + stamp = time.mktime(issue_convert.timetuple()) + #logger.fdebug('stamp: ' + str(stamp)) + issconv = format_date_time(stamp) + #logger.fdebug('issue date is :' + str(issconv)) + #convert it to a tuple + econv = email.utils.parsedate_tz(issconv) + #logger.fdebug('econv:' + str(econv)) + #convert it to a numeric and drop the GMT/Timezone + issuedate_int = time.mktime(econv[:len(econv)-1]) + #logger.fdebug('issuedate_int:' + str(issuedate_int)) + if postdate_int < issuedate_int: + logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') + continue + else: + logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) + +# -- end size constaints. + + + thisentry = entry['title'] + logger.fdebug("Entry: " + thisentry) + cleantitle = thisentry + + #remove the extension. + extensions = ('.cbr', '.cbz') + if cleantitle.lower().endswith(extensions): + fd, ext = os.path.splitext(cleantitle) + logger.fdebug("Removed extension from filename: " + ext) + #name = re.sub(str(ext), '', str(subname)) + cleantitle = fd + + if 'mixed format' in cleantitle.lower(): + cleantitle = re.sub('mixed format', '', cleantitle).strip() + logger.fdebug('removed extra information after issue # that is not necessary: ' + str(cleantitle)) + + cleantitle = re.sub('[\_\.]', ' ', cleantitle) + cleantitle = helpers.cleanName(cleantitle) + # this is new - if title contains a '&' in the title it will assume the filename has ended at that point + # which causes false positives (ie. wolverine & the x-men becomes the x-men, which matches on x-men. + # 'the' is removed for comparisons later on + if '&' in cleantitle: cleantitle = re.sub('[\&]','and', cleantitle) + + nzbname = cleantitle + + # if it's coming from CBT, remove the ' -' at the end as it screws it up. + if nzbprov == 'CBT': + if cleantitle.endswith(' - '): + cleantitle = cleantitle[:-3] + logger.fdebug("cleaned up title to : " + str(cleantitle)) + + #adjust for covers only by removing them entirely... + logger.fdebug("Cleantitle: " + str(cleantitle)) + vers4year = "no" + vers4vol = "no" + + if 'cover only' in cleantitle.lower(): + logger.fdebug("Ignoring title as Cover Only detected.") + cleantitle = "abcdefghijk 0 (1901).cbz" + continue + + if ComicVersion: + ComVersChk = re.sub("[^0-9]", "", ComicVersion) + if ComVersChk == '' or ComVersChk == '1': + ComVersChk = 0 + else: + ComVersChk = 0 + + ctchk = cleantitle.split() + for ct in ctchk: + if ct.lower().startswith('v') and ct[1:].isdigit(): + logger.fdebug("possible versioning..checking") + #we hit a versioning # - account for it + if ct[1:].isdigit(): + if len(ct[1:]) == 4: #v2013 + logger.fdebug("Version detected as " + str(ct)) + vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v + #cleantitle = re.sub(ct, "(" + str(vers4year) + ")", cleantitle) + #logger.fdebug("volumized cleantitle : " + cleantitle) + break + else: + if len(ct) < 4: + logger.fdebug("Version detected as " + str(ct)) + vers4vol = str(ct) + break + logger.fdebug("false version detection..ignoring.") + + + if len(re.findall('[^()]+', cleantitle)) == 1 or 'cover only' in cleantitle.lower(): + #some sites don't have (2013) or whatever..just v2 / v2013. Let's adjust: + #this handles when there is NO YEAR present in the title, otherwise versioning is way below. + if vers4year == "no" and vers4vol == "no": + # if the series is a v1, let's remove the requirements for year and volume label + # even if it's a v1, the nzbname might not contain a valid year format (20xx) or v3, + # and since it's already known that there is no (year) or vYEAR given + # let's push it through (and edit out the following if constraint)... + + #if ComVersChk != 0: + # if there are no () in the string, try to add them if it looks like a year (19xx or 20xx) + if len(re.findall('[^()]+', cleantitle)): + logger.fdebug("detected invalid nzb filename - attempting to detect year to continue") + cleantitle = re.sub('(.*)\s+(19\d{2}|20\d{2})(.*)', '\\1 (\\2) \\3', cleantitle) + else: + logger.fdebug("invalid nzb and/or cover only - skipping.") + cleantitle = "abcdefghijk 0 (1901).cbz" + continue + + #adjust for covers only by removing them entirely... + logger.fdebug("Cleantitle: " + str(cleantitle)) + + + if done: + break + #let's narrow search down - take out year (2010), (2011), etc + #let's check for first occurance of '(' as generally indicates + #that the 'title' has ended + + ripperlist=['digital-', + 'empire', + 'dcp'] + #this takes care of the brackets :) + m = re.findall('[^()]+', cleantitle) + lenm = len(m) + + #print ("there are " + str(lenm) + " words.") + cnt = 0 + yearmatch = "false" + pub_removed = None + + while (cnt < lenm): + if m[cnt] is None: break + if m[cnt] == ' ': + pass + else: + logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt])) + if cnt == 0: + comic_andiss = m[cnt] + if 'mixed format' in comic_andiss.lower(): + comic_andiss = re.sub('mixed format', '', comic_andiss).strip() + logger.fdebug('removed extra information after issue # that is not necessary: ' + str(comic_andiss)) + logger.fdebug("Comic: " + str(comic_andiss)) + logger.fdebug("UseFuzzy is : " + str(UseFuzzy)) + logger.fdebug('ComVersChk : ' + str(ComVersChk)) + if vers4vol != "no" or vers4year != "no": + logger.fdebug("Year not given properly formatted but Version detected.Bypassing Year Match.") + yearmatch = "true" + elif ComVersChk == 0: + logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check") + yearmatch = "true" + elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no": + if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': + logger.fdebug('year detected: ' + str(m[cnt])) + result_comyear = m[cnt] + logger.fdebug('year looking for: ' + str(comyear)) + if str(comyear) in result_comyear: + logger.fdebug(str(comyear) + " - right years match baby!") + yearmatch = "true" + else: + logger.fdebug(str(comyear) + " - not right - years do not match") + yearmatch = "false" + if UseFuzzy == "2": + #Fuzzy the year +1 and -1 + ComUp = int(ComicYear) + 1 + ComDwn = int(ComicYear) - 1 + if str(ComUp) in result_comyear or str(ComDwn) in result_comyear: + logger.fdebug("Fuzzy Logic'd the Year and got a match with a year of " + str(result_comyear)) + yearmatch = "true" + else: + logger.fdebug(str(comyear) + "Fuzzy logic'd the Year and year still didn't match.") + #let's do this here and save a few extra loops ;) + #fix for issue dates between Nov-Dec/Jan + if IssDateFix != "no" and UseFuzzy is not "2": + if IssDateFix == "01" or IssDateFix == "02" or IssDateFix == "03": ComicYearFix = int(ComicYear) - 1 + else: ComicYearFix = int(ComicYear) + 1 + if str(ComicYearFix) in result_comyear: + logger.fdebug("further analysis reveals this was published inbetween Nov-Jan, incrementing year to " + str(ComicYearFix) + " has resulted in a match!") + yearmatch = "true" + else: + logger.fdebug(str(comyear) + " - not the right year.") + + elif UseFuzzy == "1": yearmatch = "true" + + if Publisher.lower() in m[cnt].lower() and cnt >= 1: + #if the Publisher is given within the title or filename even (for some reason, some people + #have this to distinguish different titles), let's remove it entirely. + logger.fdebug('Publisher detected within title : ' + str(m[cnt])) + logger.fdebug('cnt is : ' + str(cnt) + ' --- Publisher is: ' + Publisher) + pub_removed = m[cnt] + #-strip publisher if exists here- + logger.fdebug('removing publisher from title') + cleantitle_pubremoved = re.sub(pub_removed, '', cleantitle) + logger.fdebug('pubremoved : ' + str(cleantitle_pubremoved)) + cleantitle_pubremoved = re.sub('\(\)', '', cleantitle_pubremoved) #remove empty brackets + cleantitle_pubremoved = re.sub('\s+', ' ', cleantitle_pubremoved) #remove spaces > 1 + logger.fdebug('blank brackets removed: ' + str(cleantitle_pubremoved)) + #reset the values to initial without the publisher in the title + m = re.findall('[^()]+', cleantitle_pubremoved) + lenm = len(m) + cnt = 0 + yearmatch = "false" + continue + if 'digital' in m[cnt] and len(m[cnt]) == 7: + logger.fdebug("digital edition detected") + pass + if ' of ' in m[cnt]: + logger.fdebug("mini-series detected : " + str(m[cnt])) + result_of = m[cnt] + if 'cover' in m[cnt]: + logger.fdebug("covers detected: " + str(m[cnt])) + result_comcovers = m[cnt] + for ripper in ripperlist: + if ripper in m[cnt]: + logger.fdebug("Scanner detected: " + str(m[cnt])) + result_comscanner = m[cnt] + cnt+=1 + + if yearmatch == "false": continue + + splitit = [] + watchcomic_split = [] + logger.fdebug("original nzb comic and issue: " + str(comic_andiss)) + #changed this from '' to ' ' + comic_iss_b4 = re.sub('[\-\:\,\?]', ' ', str(comic_andiss)) + comic_iss = comic_iss_b4.replace('.',' ') + #if issue_except: comic_iss = re.sub(issue_except.lower(), '', comic_iss) + logger.fdebug("adjusted nzb comic and issue: " + str(comic_iss)) + splitit = comic_iss.split(None) + #something happened to dognzb searches or results...added a '.' in place of spaces + #screwed up most search results with dognzb. Let's try to adjust. + #watchcomic_split = findcomic[findloop].split(None) + + if splitit[(len(splitit)-1)].isdigit(): + #compares - if the last digit and second last digit are #'s seperated by spaces assume decimal + comic_iss = splitit[(len(splitit)-1)] + splitst = len(splitit) - 1 + if splitit[(len(splitit)-2)].isdigit(): + # for series that have a digit at the end, it screws up the logistics. + i = 1 + chg_comic = splitit[0] + while (i < (len(splitit)-1)): + chg_comic = chg_comic + " " + splitit[i] + i+=1 + logger.fdebug("chg_comic:" + str(chg_comic)) + findcomic_chksplit = re.sub('[\-\:\,\.\?]', ' ', findcomic) + findcomic_chksplit = re.sub('[\&]', 'and', findcomic_chksplit) + findcomic_chksplit = re.sub('[\s]', '', findcomic_chksplit) + chg_comic = re.sub('[\-\:\,\.\?]', ' ', chg_comic) + chg_comic = re.sub('[\&]', 'and', chg_comic) + chg_comic = re.sub('[\s]', '', chg_comic) + logger.fdebug('chg_comic: ' + chg_comic.upper()) + logger.fdebug('findcomic_chksplit: ' + findcomic_chksplit.upper()) + if chg_comic.upper() in findcomic_chksplit.upper(): + logger.fdebug("series contains numerics...adjusting..") + else: + changeup = "." + splitit[(len(splitit)-1)] + logger.fdebug("changeup to decimal: " + str(changeup)) + comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss + splitst = len(splitit) - 2 + else: + #if the issue is alphanumeric (ie. 15AU, 12A) it'll error. + tmpiss = splitit[(len(splitit)-1)] + i = 0 + alphas = None + a_issno = None + while (i < len(tmpiss)): + if tmpiss[i].isalpha(): + #take first occurance of alpha in string and carry it through + alphas = tmpiss[i:].rstrip() + a_issno = tmpiss[:i].rstrip() + break + i+=1 + logger.fdebug("alphas: " + str(alphas)) + logger.fdebug("a_issno: " + str(a_issno)) + if alphas is None: + # if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb + logger.fdebug("invalid naming format of nzb detected - cannot properly determine issue") + continue + else: + if a_issno == '' and alphas is not None: + #print 'issno & alphas blank' + #print 'splitit: ' + splitit[(len(splitit)-2)] + #print 'splitit: ' + splitit[(len(splitit)-1)] + #if there' a space between the issue & alpha, join them. + findstart = thisentry.find(splitit[(len(splitit)-1)]) + #print 'thisentry : ' + thisentry + #print 'decimal location : ' + str(findstart) + if thisentry[findstart-1] == '.': + comic_iss = splitit[(len(splitit)-2)] + '.' + splitit[(len(splitit)-1)] + else: + comic_iss = splitit[(len(splitit)-2)] + splitit[(len(splitit)-1)] + logger.fdebug('comic_iss is : ' + str(comic_iss)) + splitst = len(splitit) - 2 + else: + comic_iss = tmpiss + splitst = len(splitit) - 1 + logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss)) + #bmm = re.findall('v\d', comic_iss) + #if len(bmm) > 0: splitst = len(splitit) - 2 + #else: splitst = len(splitit) - 1 + + # make sure that things like - in watchcomic are accounted for when comparing to nzb. + findcomic = re.sub('[\/]', ' ', findcomic) + watchcomic_split = helpers.cleanName(str(findcomic)) + if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]','and', watchcomic_split) + watchcomic_nonsplit = re.sub('[\-\:\,\.\?]', ' ', watchcomic_split) + watchcomic_split = watchcomic_nonsplit.split(None) + + logger.fdebug(str(splitit) + " nzb series word count: " + str(splitst)) + logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split))) + #account for possible version inclusion here and annual inclusions. + cvers = "false" + annualize = "false" + if 'annual' in ComicName.lower(): + logger.fdebug("IssueID of : " + str(IssueID) + " - This is an annual...let's adjust.") + annualize = "true" + #splitst = splitst - 1 + + for tstsplit in splitit: + if tstsplit.lower().startswith('v') and tstsplit[1:].isdigit(): + logger.fdebug("this has a version #...let's adjust") + if len(tstsplit[1:]) == 4: #v2013 + logger.fdebug("Version detected as " + str(tstsplit)) + vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v + elif len(tstsplit[1:]) == 1: #v2 + logger.fdebug("Version detected as " + str(tstsplit)) + vers4vol = str(tstsplit) + elif tstsplit[1:].isdigit() and len(tstsplit) < 4: + logger.fdebug('Version detected as ' +str(tstsplit)) + vers4vol = str(tstsplit) + else: + logger.fdebug("error - unknown length for : " + str(tstsplit)) + logger.fdebug("volume detection commencing - adjusting length.") + cvers = "true" + splitst = splitst - 1 + break + + #do an initial check + initialchk = 'ok' + if (splitst) != len(watchcomic_split): + logger.fdebug("incorrect comic lengths...not a match") + #because the word 'the' can appear anywhere and really mess up matches... +# if str(splitit[0]).lower() == "the" or str(watchcomic_split[0]).lower() == "the": +# if str(splitit[0]).lower() == "the": + for tstsplit in splitit: + if tstsplit.lower() == 'the': + logger.fdebug("THE word detected in found comic...attempting to adjust pattern matching") + #print comic_iss_b4 + #print comic_iss_b4[4:] + #splitit = comic_iss_b4[4:].split(None) + cissb4this = re.sub("\\bthe\\b", "", comic_iss_b4) + splitit = cissb4this.split(None) + splitst = splitst - 1 #remove 'the' from start + logger.fdebug("comic is now : " + str(splitit))#str(comic_iss[4:])) + #if str(watchcomic_split[0]).lower() == "the": + for tstsplit in watchcomic_split: + if tstsplit.lower() == 'the': + logger.fdebug("THE word detected in watchcomic - attempting to adjust match.") + #wtstart = watchcomic_nonsplit[4:] + #watchcomic_split = wtstart.split(None) + wtstart = re.sub("\\bthe\\b", "", watchcomic_nonsplit) + watchcomic_split = wtstart.split(None) + logger.fdebug("new watchcomic string:" + str(watchcomic_split)) + initialchk = 'no' + else: + initialchk = 'ok' + + logger.fdebug("splitst : " + str(splitst)) + logger.fdebug("len-watchcomic : " + str(len(watchcomic_split))) + if (splitst) != len(watchcomic_split) and initialchk == 'no': + logger.fdebug("incorrect comic lengths after removal...not a match.") + else: + logger.fdebug("length match..proceeding") + n = 0 + scount = 0 + logger.fdebug("search-length: " + str(splitst)) + logger.fdebug("Watchlist-length: " + str(len(watchcomic_split))) + if cvers == "true": splitst = splitst + 1 + while ( n <= (splitst)-1 ): + logger.fdebug("splitit: " + str(splitit[n])) + logger.fdebug("scount : " + str(scount)) + if n < (splitst) and n < len(watchcomic_split): + logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n])) + if '+' in watchcomic_split[n]: + watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n])) + if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]): + logger.fdebug("word matched on : " + str(splitit[n])) + scount+=1 + #elif ':' in splitit[n] or '-' in splitit[n]: + # splitrep = splitit[n].replace('-', '') + # print ("non-character keyword...skipped on " + splitit[n]) + elif str(splitit[n].lower()).startswith('v'): + logger.fdebug("possible versioning..checking") + #we hit a versioning # - account for it + if splitit[n][1:].isdigit(): + logger.fdebug("watch comicversion is " + str(ComicVersion)) + fndcomicversion = str(splitit[n]) + logger.fdebug("version found: " + str(fndcomicversion)) + logger.fdebug("vers4year: " + str(vers4year)) + logger.fdebug("vers4vol: " + str(vers4vol)) + if vers4year is not "no" or vers4vol is not "no": + + #if the volume is None, assume it's a V1 to increase % hits + if ComVersChk == 0: + D_ComicVersion = 1 + else: + D_ComicVersion = ComVersChk + + F_ComicVersion = re.sub("[^0-9]", "", fndcomicversion) + #if this is a one-off, SeriesYear will be None and cause errors. + if SeriesYear is None: + S_ComicVersion = 0 + else: + S_ComicVersion = str(SeriesYear) + logger.fdebug("FCVersion: " + str(F_ComicVersion)) + logger.fdebug("DCVersion: " + str(D_ComicVersion)) + logger.fdebug("SCVersion: " + str(S_ComicVersion)) + + #here's the catch, sometimes annuals get posted as the Pub Year + # instead of the Series they belong to (V2012 vs V2013) + if annualize == "true" and int(ComicYear) == int(F_ComicVersion): + logger.fdebug("We matched on versions for annuals " + str(fndcomicversion)) + scount+=1 + + elif int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion): + logger.fdebug("We matched on versions..." + str(fndcomicversion)) + scount+=1 + else: + logger.fdebug("Versions wrong. Ignoring possible match.") + scount = 0 + else: + logger.fdebug("Comic / Issue section") + if splitit[n].isdigit(): + logger.fdebug("issue detected") + #comiss = splitit[n] + comicNAMER = n - 1 + comNAME = splitit[0] + cmnam = 1 + while (cmnam <= comicNAMER): + comNAME = str(comNAME) + " " + str(splitit[cmnam]) + cmnam+=1 + logger.fdebug("comic: " + str(comNAME)) + else: + logger.fdebug("non-match for: "+ str(splitit[n])) + pass + n+=1 + #set the match threshold to 80% (for now) + # if it's less than 80% consider it a non-match and discard. + #splitit has to splitit-1 because last position is issue. + wordcnt = int(scount) + logger.fdebug("scount:" + str(wordcnt)) + totalcnt = int(splitst) + logger.fdebug("splitit-len:" + str(totalcnt)) + try: + spercent = (wordcnt/totalcnt) * 100 + except ZeroDivisionError: + spercent = 0 + logger.fdebug("we got " + str(spercent) + " percent.") + if int(spercent) >= 80: + logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!") + if int(spercent) < 80: + logger.fdebug("failure - we only got " + str(spercent) + "% right!") + continue + logger.fdebug("this should be a match!") + logger.fdebug("issue we are looking for is : " + str(findcomiciss)) + logger.fdebug("integer value of issue we are looking for : " + str(intIss)) + + fnd_iss_except = None + logger.fdebug("issue we found for is : " + str(comic_iss)) + comintIss = helpers.issuedigits(comic_iss) + logger.fdebug("integer value of issue we are found : " + str(comintIss)) + + #issue comparison now as well + if int(intIss) == int(comintIss): + #check if nzb is in do not download list ;) + if nzbprov == 'experimental': + #id is located after the /download/ portion + url_parts = urlparse.urlparse(entry['link']) + path_parts = url_parts[2].rpartition('/') + nzbtempid = path_parts[0].rpartition('/') + nzblen = len(nzbtempid) + nzbid = nzbtempid[nzblen-1] + elif nzbprov == 'CBT': + url_parts = urlparse.urlparse(entry['link']) + nzbtemp = url_parts[4] # get the query paramater string + nzbtemp = re.sub('torrent=', '', nzbtemp).rstrip() + nzbid = re.sub('.torrent', '', nzbtemp).rstrip() + elif nzbprov == 'KAT': + url_parts = urlparse.urlparse(entry['link']) + path_parts = url_parts[2].rpartition('/') + nzbtempid = path_parts[2] + nzbid = re.sub('.torrent', '', nzbtempid).rstrip() + elif nzbprov == 'nzb.su': + pass + elif nzbprov == 'dognzb': + pass + elif nzbprov == 'newznab': + #if in format of http://newznab/getnzb/.nzb&i=1&r=apikey + nzbid = os.path.splitext(entry['link'])[0].rsplit('/', 1)[1] + + logger.fdebug('issues match!') + logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(tmpprov) ) + ## -- inherit issue. Comic year is non-standard. nzb year is the year + ## -- comic was printed, not the start year of the comic series and + ## -- thus the deciding component if matches are correct or not + linkstart = os.path.splitext(entry['link'])[0] + #following is JUST for nzb.su + if nzbprov == 'nzb.su' or nzbprov == 'newznab': + linkit = os.path.splitext(entry['link'])[1] + if mylar.USE_SABNZBD: + linkit = linkit.replace("&", "%26") + logger.fdebug('new linkit:' + linkit) + linkapi = str(linkstart) + str(linkit) + else: + # this should work for every other provider + linkstart = linkstart.replace("&", "%26") + linkapi = str(linkstart) + logger.fdebug("link given by: " + str(nzbprov)) + #logger.fdebug("link: " + str(linkstart)) + #logger.fdebug("linkforapi: " + str(linkapi)) + #here we distinguish between rename and not. + #blackhole functinality--- + #let's download the file to a temporary cache. + sent_to = None + if mylar.USE_BLACKHOLE and nzbprov != 'CBT' and nzbprov != 'KAT': + logger.fdebug("using blackhole directory at : " + str(mylar.BLACKHOLE_DIR)) + if os.path.exists(mylar.BLACKHOLE_DIR): + #pretty this biatch up. + BComicName = re.sub('[\:\,\/\?]', '', str(ComicName)) + Bl_ComicName = re.sub('[\&]', 'and', str(BComicName)) + filenamenzb = str(re.sub(" ", ".", str(Bl_ComicName))) + "." + str(IssueNumber) + ".(" + str(comyear) + ").nzb" + # Add a user-agent + request = urllib2.Request(linkapi) #(str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) + request.add_header('User-Agent', str(mylar.USER_AGENT)) + try: + opener = helpers.urlretrieve(urllib2.urlopen(request), str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) + except Exception, e: + logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) + return + logger.fdebug("filename saved to your blackhole as : " + str(filenamenzb)) + logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) ) + extensions = ('.cbr', '.cbz') + + if filenamenzb.lower().endswith(extensions): + fd, ext = os.path.splitext(filenamenzb) + logger.fdebug("Removed extension from nzb: " + ext) + nzbname = re.sub(str(ext), '', str(filenamenzb)) + logger.fdebug("nzb name to be used for post-processing is : " + str(nzbname)) + sent_to = "your Blackhole Directory" + #end blackhole + elif nzbprov == 'CBT' or nzbprov == 'KAT': + logger.fdebug("sending .torrent to watchdir.") + logger.fdebug("ComicName:" + ComicName) + logger.fdebug("link:" + entry['link']) + logger.fdebug("Torrent Provider:" + nzbprov) + foundc = "yes" + + #let's change all space to decimals for simplicity + nzbname = re.sub(" ", ".", str(entry['title'])) + #gotta replace & or escape it + nzbname = re.sub("\&", 'and', str(nzbname)) + nzbname = re.sub('[\,\:\?]', '', str(nzbname)) + if nzbname.lower().endswith('.torrent'): + nzbname = re.sub('.torrent', '', nzbname) + rcheck = rsscheck.torsend2client(ComicName, IssueNumber, comyear, entry['link'], nzbprov) + if rcheck == "fail": + logger.error("Unable to send torrent - check logs and settings.") + return + if mylar.TORRENT_LOCAL: + sent_to = "your local Watch folder" + else: + sent_to = "your seedbox Watch folder" + else: + tmppath = mylar.CACHE_DIR + if os.path.exists(tmppath): + logger.fdebug("cache directory successfully found at : " + str(tmppath)) + pass + else: + #let's make the dir. + logger.fdebug("couldn't locate cache directory, attempting to create at : " + str(mylar.CACHE_DIR)) + try: + os.makedirs(str(mylar.CACHE_DIR)) + logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR)) + + except OSError.e: + if e.errno != errno.EEXIST: + raise + logger.fdebug("link to retrieve via api:" + str(helpers.apiremove(linkapi,'$'))) + + #let's change all space to decimals for simplicity + nzbname = re.sub(" ", ".", str(entry['title'])) + #gotta replace & or escape it + nzbname = re.sub("\&", 'and', str(nzbname)) + nzbname = re.sub('[\,\:\?]', '', str(nzbname)) + extensions = ('.cbr', '.cbz') + + if nzbname.lower().endswith(extensions): + fd, ext = os.path.splitext(nzbname) + logger.fdebug("Removed extension from nzb: " + ext) + nzbname = re.sub(str(ext), '', str(nzbname)) + + logger.fdebug("nzbname used for post-processing:" + str(nzbname)) + +# #test nzb.get + if mylar.USE_NZBGET: + from xmlrpclib import ServerProxy + if mylar.NZBGET_HOST[:4] == 'http': + tmpapi = "http://" + nzbget_host = mylar.NZBGET_HOST[7:] + elif mylar.NZBGET_HOST[:5] == 'https': + tmpapi = "https://" + nzbget_host = mylar.NZBGET_HOST[8:] + else: + logger.error("You have an invalid nzbget hostname specified. Exiting") + return + tmpapi = str(tmpapi) + str(mylar.NZBGET_USERNAME) + ":" + str(mylar.NZBGET_PASSWORD) + tmpapi = str(tmpapi) + "@" + str(nzbget_host) + ":" + str(mylar.NZBGET_PORT) + "/xmlrpc" + server = ServerProxy(tmpapi) + send_to_nzbget = server.appendurl(nzbname + ".nzb", str(mylar.NZBGET_CATEGORY), int(nzbgetpriority), True, linkapi) + sent_to = "NZBGet" + if send_to_nzbget is True: + logger.info("Successfully sent nzb to NZBGet!") + else: + logger.info("Unable to send nzb to NZBGet - check your configs.") +# #end nzb.get test + + elif mylar.USE_SABNZBD: + # let's build the send-to-SAB string now: + tmpapi = str(mylar.SAB_HOST) + logger.fdebug("send-to-SAB host string: " + str(tmpapi)) + # changed to just work with direct links now... + SABtype = "/api?mode=addurl&name=" + fileURL = str(linkapi) + tmpapi = tmpapi + str(SABtype) + logger.fdebug("...selecting API type: " + str(tmpapi)) + tmpapi = tmpapi + str(fileURL) + + logger.fdebug("...attaching nzb provider link: " + str(helpers.apiremove(tmpapi,'$'))) + # determine SAB priority + if mylar.SAB_PRIORITY: + tmpapi = tmpapi + "&priority=" + str(sabpriority) + logger.fdebug("...setting priority: " + str(helpers.apiremove(tmpapi,'&'))) + # if category is blank, let's adjust + if mylar.SAB_CATEGORY: + tmpapi = tmpapi + "&cat=" + str(mylar.SAB_CATEGORY) + logger.fdebug("...attaching category: " + str(helpers.apiremove(tmpapi,'&'))) + if mylar.RENAME_FILES or mylar.POST_PROCESSING: + tmpapi = tmpapi + "&script=ComicRN.py" + logger.fdebug("...attaching rename script: " + str(helpers.apiremove(tmpapi,'&'))) + #final build of send-to-SAB + tmpapi = tmpapi + "&apikey=" + str(mylar.SAB_APIKEY) + + logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi,'&'))) + + try: + urllib2.urlopen(tmpapi) + except urllib2.URLError: + logger.error(u"Unable to send nzb file to SABnzbd") + return + + sent_to = "SABnzbd+" + logger.info(u"Successfully sent nzb file to SABnzbd") + + if annualize == True: + modcomicname = ComicName + ' Annual' + else: + modcomicname = ComicName + if mylar.PROWL_ENABLED and mylar.PROWL_ONSNATCH: + logger.info(u"Sending Prowl notification") + prowl = notifiers.PROWL() + prowl.notify(nzbname,"Download started using " + sent_to) + if mylar.NMA_ENABLED and mylar.NMA_ONSNATCH: + logger.info(u"Sending NMA notification") + nma = notifiers.NMA() + snline = modcomicname + ' (' + comyear + ') - Issue #' + IssueNumber + ' snatched!' + nma.notify(snline=snline,snatched_nzb=nzbname,sent_to=sent_to,prov=nzbprov) + if mylar.PUSHOVER_ENABLED and mylar.PUSHOVER_ONSNATCH: + logger.info(u"Sending Pushover notification") + pushover = notifiers.PUSHOVER() + pushover.notify(nzbname,"Download started using " + sent_to) + if mylar.BOXCAR_ENABLED and mylar.BOXCAR_ONSNATCH: + logger.info(u"Sending Boxcar notification") + boxcar = notifiers.BOXCAR() + boxcar.notify(snatched_nzb=nzbname,sent_to=sent_to) + if mylar.PUSHBULLET_ENABLED and mylar.PUSHBULLET_ONSNATCH: + logger.info(u"Sending Pushbullet notification") + pushbullet = notifiers.PUSHBULLET() + snline = modcomicname + ' (' + comyear + ') - Issue #' + IssueNumber + ' snatched!' + pushbullet.notify(snline=snline,snatched=nzbname,sent_to=sent_to,prov=nzbprov) + + foundc = "yes" + done = True + break + else: + log2file = log2file + "issues don't match.." + "\n" + foundc = "no" + if done == True: + cmloopit == 1 #let's make sure it STOPS searching after a sucessful match. + break + cmloopit-=1 + if cmloopit < 1 and c_alpha is not None and seperatealpha == "no" and foundc == "no": + logger.info("Alphanumerics detected within IssueNumber. Seperating from Issue # and re-trying.") + cmloopit = origcmloopit + seperatealpha = "yes" + findloop+=1 + if foundc == "yes": + foundcomic.append("yes") + logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname)) + updater.nzblog(IssueID, nzbname, ComicName, SARC, IssueArcID) + prov_count == 0 + #break + return foundc + elif foundc == "no" and prov_count == 0: + foundcomic.append("no") + #logger.fdebug('Could not find a matching comic using ' + str(tmpprov)) + if IssDateFix == "no": + #logger.info('Could not find Issue ' + str(IssueNumber) + ' of ' + ComicName + '(' + str(comyear) + ') using ' + str(tmpprov) + '. Status kept as wanted.' ) + break + return foundc + +def searchforissue(issueid=None, new=False, rsscheck=None): + myDB = db.DBConnection() + + if not issueid or rsscheck: + + if rsscheck: + logger.info(u"Initiating RSS Search Scan at scheduled interval of " + str(mylar.RSS_CHECKINTERVAL) + " minutes.") + else: + logger.info(u"Initiating NZB Search scan at requested interval of " + str(mylar.SEARCH_INTERVAL) + " minutes.") + + myDB = db.DBConnection() + + stloop = 1 + results = [] + + if mylar.ANNUALS_ON: + stloop+=1 + while (stloop > 0): + if stloop == 1: + issues_1 = myDB.select('SELECT * from issues WHERE Status="Wanted"') + for iss in issues_1: + results.append({'ComicID': iss['ComicID'], + 'IssueID': iss['IssueID'], + 'Issue_Number': iss['Issue_Number'], + 'IssueDate': iss['IssueDate'], + 'StoreDate': iss['ReleaseDate'], + 'mode': 'want' + }) + elif stloop == 2: + issues_2 = myDB.select('SELECT * from annuals WHERE Status="Wanted"') + for iss in issues_2: + results.append({'ComicID': iss['ComicID'], + 'IssueID': iss['IssueID'], + 'Issue_Number': iss['Issue_Number'], + 'IssueDate': iss['IssueDate'], + 'StoreDate': iss['ReleaseDate'], #need to replace with Store date + 'mode': 'want_ann' + }) + stloop-=1 + + new = True + + for result in results: + comic = myDB.selectone("SELECT * from comics WHERE ComicID=? AND ComicName != 'None'", [result['ComicID']]).fetchone() + if comic is None: + logger.fdebug(str(result['ComicID']) + ' has no associated comic information. Skipping searching for this series.') + continue + if result['StoreDate'] == '0000-00-00': + logger.fdebug(str(result['ComicID']) + ' has an invalid Store Date. Skipping searching for this series.') + continue + foundNZB = "none" + SeriesYear = comic['ComicYear'] + Publisher = comic['ComicPublisher'] + AlternateSearch = comic['AlternateSearch'] + IssueDate = result['IssueDate'] + StoreDate = result['StoreDate'] + UseFuzzy = comic['UseFuzzy'] + ComicVersion = comic['ComicVersion'] + if result['IssueDate'] == None: + ComicYear = comic['ComicYear'] + else: + ComicYear = str(result['IssueDate'])[:4] + mode = result['mode'] + if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE): + foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID']) + if foundNZB == "yes": + #print ("found!") + updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov) + else: + pass + #print ("not found!") + + if rsscheck: + logger.info('Completed RSS Search scan') + else: + logger.info('Completed NZB Search scan') + + + else: + result = myDB.selectone('SELECT * FROM issues where IssueID=?', [issueid]).fetchone() + mode = 'want' + if result is None: + result = myDB.selectone('SELECT * FROM annuals where IssueID=?', [issueid]).fetchone() + mode = 'want_ann' + if result is None: + logger.info("Unable to locate IssueID - you probably should delete/refresh the series.") + return + ComicID = result['ComicID'] + comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() + SeriesYear = comic['ComicYear'] + Publisher = comic['ComicPublisher'] + AlternateSearch = comic['AlternateSearch'] + IssueDate = result['IssueDate'] + StoreDate = result['ReleaseDate'] + UseFuzzy = comic['UseFuzzy'] + ComicVersion = comic['ComicVersion'] + if result['IssueDate'] == None: + IssueYear = comic['ComicYear'] + else: + IssueYear = str(result['IssueDate'])[:4] + + foundNZB = "none" + if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE): + foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID']) + if foundNZB == "yes": + logger.fdebug("I found " + comic['ComicName'] + ' #:' + str(result['Issue_Number'])) + updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'], mode=mode, provider=prov) + else: + pass + #print ("not found!") + return + +def searchIssueIDList(issuelist): + myDB = db.DBConnection() + for issueid in issuelist: + issue = myDB.selectone('SELECT * from issues WHERE IssueID=?', [issueid]).fetchone() + mode = 'want' + if issue is None: + issue = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issueid]).fetchone() + mode = 'want_ann' + if issue is None: + logger.info("unable to determine IssueID - perhaps you need to delete/refresh series?") + break + comic = myDB.selectone('SELECT * from comics WHERE ComicID=?', [issue['ComicID']]).fetchone() + print ("Checking for issue: " + str(issue['Issue_Number'])) + foundNZB = "none" + SeriesYear = comic['ComicYear'] + AlternateSearch = comic['AlternateSearch'] + Publisher = comic['ComicPublisher'] + UseFuzzy = comic['UseFuzzy'] + ComicVersion = comic['ComicVersion'] + if issue['IssueDate'] == None: + IssueYear = comic['ComicYear'] + else: + IssueYear = str(issue['IssueDate'])[:4] + if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_CBT or mylar.ENABLE_KAT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE): + foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID']) + if foundNZB == "yes": + #print ("found!") + updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov) + else: + pass + #print ("not found!") + + + +def provider_sequence(nzbprovider, torprovider, newznab_hosts): + #provider order sequencing here. + newznab_info = [] + prov_order = [] + + nzbproviders_lower = [x.lower() for x in nzbprovider] + + if len(mylar.PROVIDER_ORDER) > 0: + for pr_order in mylar.PROVIDER_ORDER: + logger.fdebug('looking for ' + str(pr_order[1]).lower()) + logger.fdebug('nzbproviders ' + str(nzbproviders_lower)) + logger.fdebug('torproviders ' + str(torprovider)) + if (pr_order[1].lower() in torprovider) or any(pr_order[1].lower() in x for x in nzbproviders_lower): + logger.fdebug('found provider in existing enabled providers.') + if any(pr_order[1].lower() in x for x in nzbproviders_lower): + # this is for nzb providers + for np in nzbprovider: + logger.fdebug('checking against nzb provider: ' + str(np)) + if all( [ 'newznab' in np, pr_order[1].lower() in np.lower() ] ): + logger.fdebug('newznab match against: ' + str(np)) + for newznab_host in newznab_hosts: + logger.fdebug('comparing ' + str(pr_order[1]).lower() + ' against: ' + str(newznab_host[0]).lower()) + if newznab_host[0].lower() == pr_order[1].lower(): + logger.fdebug('sucessfully matched - appending to provider.order sequence') + prov_order.append(np) #newznab_host) + newznab_info.append({"provider": np, + "info": newznab_host}) + break + elif pr_order[1].lower() in np.lower(): + prov_order.append(pr_order[1]) + break + else: + for tp in torprovider: + logger.fdebug('checking against torrent provider: ' + str(tp)) + if (pr_order[1].lower() in tp.lower()): + logger.fdebug('torrent match against: ' + str(tp)) + prov_order.append(tp) #torrent provider + break + + logger.fdebug('sequence is now to start with ' + pr_order[1] + ' at spot #' + str(pr_order[0])) + + return prov_order,newznab_info diff --git a/mylar/updater.py b/mylar/updater.py index 05112488..ee4986f4 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -59,6 +59,16 @@ def dbUpdate(ComicIDList=None): logger.fdebug("Gathering the status of all issues for the series.") issues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + if not issues: + #if issues are None it's probably a bad refresh/maxed out API that resulted in the issue data + #getting wiped out and not refreshed. Setting whack=True will force a complete refresh. + logger.info('No issue data available. This is Whack.') + whack = True + else: + #check for series that are numerically out of whack (ie. 5/4) + logger.info('Checking how out of whack the series is.') + whack = helpers.havetotals(refreshit=ComicID) + annload = [] #initiate the list here so we don't error out below. if mylar.ANNUALS_ON: @@ -82,85 +92,90 @@ def dbUpdate(ComicIDList=None): myDB.action('DELETE FROM issues WHERE ComicID=?', [ComicID]) myDB.action('DELETE FROM annuals WHERE ComicID=?', [ComicID]) logger.fdebug("Refreshing the series and pulling in new data using only CV.") - mylar.importer.addComictoDB(ComicID,mismatch,calledfrom='dbupdate',annload=annload) - #reload the annuals here. - issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) - annuals = [] - ann_list = [] - if mylar.ANNUALS_ON: - annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) - ann_list += annuals_list - issues_new += annuals_list + if whack == False: + mylar.importer.addComictoDB(ComicID,mismatch,calledfrom='dbupdate',annload=annload) + #reload the annuals here. - logger.fdebug("Attempting to put the Status' back how they were.") - icount = 0 - #the problem - the loop below will not match on NEW issues that have been refreshed that weren't present in the - #db before (ie. you left Mylar off for abit, and when you started it up it pulled down new issue information) - #need to test if issuenew['Status'] is None, but in a seperate loop below. - fndissue = [] - for issue in issues: - for issuenew in issues_new: - #logger.fdebug(str(issue['Issue_Number']) + ' - issuenew:' + str(issuenew['IssueID']) + ' : ' + str(issuenew['Status'])) - #logger.fdebug(str(issue['Issue_Number']) + ' - issue:' + str(issue['IssueID']) + ' : ' + str(issue['Status'])) - if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: - ctrlVAL = {"IssueID": issue['IssueID']} - #if the status is None and the original status is either Downloaded / Archived, keep status & stats - if issuenew['Status'] == None and (issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived'): - newVAL = {"Location": issue['Location'], - "ComicSize": issue['ComicSize'], - "Status": issue['Status']} - #if the status is now Downloaded/Snatched, keep status & stats (downloaded only) - elif issuenew['Status'] == 'Downloaded' or issue['Status'] == 'Snatched': - newVAL = {"Location": issue['Location'], - "ComicSize": issue['ComicSize']} - if issuenew['Status'] == 'Downloaded': - newVAL['Status'] = issuenew['Status'] + issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + annuals = [] + ann_list = [] + if mylar.ANNUALS_ON: + annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) + ann_list += annuals_list + issues_new += annuals_list + + logger.fdebug("Attempting to put the Status' back how they were.") + icount = 0 + #the problem - the loop below will not match on NEW issues that have been refreshed that weren't present in the + #db before (ie. you left Mylar off for abit, and when you started it up it pulled down new issue information) + #need to test if issuenew['Status'] is None, but in a seperate loop below. + fndissue = [] + for issue in issues: + for issuenew in issues_new: + #logger.fdebug(str(issue['Issue_Number']) + ' - issuenew:' + str(issuenew['IssueID']) + ' : ' + str(issuenew['Status'])) + #logger.fdebug(str(issue['Issue_Number']) + ' - issue:' + str(issue['IssueID']) + ' : ' + str(issue['Status'])) + if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: + ctrlVAL = {"IssueID": issue['IssueID']} + #if the status is None and the original status is either Downloaded / Archived, keep status & stats + if issuenew['Status'] == None and (issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived'): + newVAL = {"Location": issue['Location'], + "ComicSize": issue['ComicSize'], + "Status": issue['Status']} + #if the status is now Downloaded/Snatched, keep status & stats (downloaded only) + elif issuenew['Status'] == 'Downloaded' or issue['Status'] == 'Snatched': + newVAL = {"Location": issue['Location'], + "ComicSize": issue['ComicSize']} + if issuenew['Status'] == 'Downloaded': + newVAL['Status'] = issuenew['Status'] + else: + newVAL['Status'] = issue['Status'] + + elif issue['Status'] == 'Archived': + newVAL = {"Status": issue['Status'], + "Location": issue['Location'], + "ComicSize": issue['ComicSize']} else: - newVAL['Status'] = issue['Status'] + #change the status to the previous status + newVAL = {"Status": issue['Status']} - elif issue['Status'] == 'Archived': - newVAL = {"Status": issue['Status'], - "Location": issue['Location'], - "ComicSize": issue['ComicSize']} - else: - #change the status to the previous status - newVAL = {"Status": issue['Status']} + if newVAL['Status'] == None: + newVAL = {"Status": "Skipped"} - if newVAL['Status'] == None: - newVAL = {"Status": "Skipped"} + if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): + #logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number'])) + myDB.upsert("Annuals", newVAL, ctrlVAL) + else: + #logger.fdebug('#' + str(issue['Issue_Number']) + ' writing issuedata: ' + str(newVAL)) + myDB.upsert("Issues", newVAL, ctrlVAL) + fndissue.append({"IssueID": issue['IssueID']}) + icount+=1 + break + logger.info("In the process of converting the data to CV, I changed the status of " + str(icount) + " issues.") - if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): - #logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number'])) - myDB.upsert("Annuals", newVAL, ctrlVAL) - else: - #logger.fdebug('#' + str(issue['Issue_Number']) + ' writing issuedata: ' + str(newVAL)) - myDB.upsert("Issues", newVAL, ctrlVAL) - fndissue.append({"IssueID": issue['IssueID']}) - icount+=1 - break - logger.info("In the process of converting the data to CV, I changed the status of " + str(icount) + " issues.") + issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=? AND Status is NULL', [ComicID]) + if mylar.ANNUALS_ON: + issues_new += myDB.select('SELECT * FROM annuals WHERE ComicID=? AND Status is NULL', [ComicID]) - issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=? AND Status is NULL', [ComicID]) - if mylar.ANNUALS_ON: - issues_new += myDB.select('SELECT * FROM annuals WHERE ComicID=? AND Status is NULL', [ComicID]) - - newiss = [] - if mylar.AUTOWANT_UPCOMING: - newstatus = "Wanted" - else: - newstatus = "Skipped" - for iss in issues_new: - newiss.append({"IssueID": iss['IssueID'], + newiss = [] + if mylar.AUTOWANT_UPCOMING: + newstatus = "Wanted" + else: + newstatus = "Skipped" + for iss in issues_new: + newiss.append({"IssueID": iss['IssueID'], "Status": newstatus}) - if len(newiss) > 0: - for newi in newiss: - ctrlVAL = {"IssueID": newi['IssueID']} - newVAL = {"Status": newi['Status']} - #logger.fdebug('writing issuedata: ' + str(newVAL)) - myDB.upsert("Issues", newVAL, ctrlVAL) + if len(newiss) > 0: + for newi in newiss: + ctrlVAL = {"IssueID": newi['IssueID']} + newVAL = {"Status": newi['Status']} + #logger.fdebug('writing issuedata: ' + str(newVAL)) + myDB.upsert("Issues", newVAL, ctrlVAL) - logger.info('I have added ' + str(len(newiss)) + ' new issues for this series that were not present before.') + logger.info('I have added ' + str(len(newiss)) + ' new issues for this series that were not present before.') + + else: + mylar.importer.addComictoDB(ComicID,mismatch,annload=annload) else: mylar.importer.addComictoDB(ComicID,mismatch) diff --git a/mylar/webserve.py b/mylar/webserve.py index 2f9b1268..ab7febfb 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -52,6 +52,16 @@ def serve_template(templatename, **kwargs): class WebInterface(object): +# def filter_request(): +# request = cherrypy.request + +# if mylar.HTTPS_FORCE_ON: +# request.base = request.base.replace('http://', 'https://') + +# cherrypy.tools.filter_request = cherrypy.Tool('before_request_body', filter_request) + +# _cp_config = { 'tools.filter_reqeust_on': True } + def index(self): if mylar.SAFESTART: raise cherrypy.HTTPRedirect("manageComics") @@ -473,9 +483,21 @@ class WebInterface(object): else: if mylar.CV_ONETIMER == 1: logger.fdebug("CV_OneTimer option enabled...") - #in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clea$ + #in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clean grab. logger.fdebug("Gathering the status of all issues for the series.") + issues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + + if not issues: + #if issues are None it's probably a bad refresh/maxed out API that resulted in the issue data + #getting wiped out and not refreshed. Setting whack=True will force a complete refresh. + logger.info('No issue data available. This is Whack.') + whack = True + else: + #check for series that are numerically out of whack (ie. 5/4) + logger.info('Checking how out of whack the series is.') + whack = helpers.havetotals(refreshit=ComicID) + annload = [] #initiate the list here so we don't error out below. @@ -500,86 +522,90 @@ class WebInterface(object): myDB.action('DELETE FROM issues WHERE ComicID=?', [ComicID]) myDB.action('DELETE FROM annuals WHERE ComicID=?', [ComicID]) logger.fdebug("Refreshing the series and pulling in new data using only CV.") - mylar.importer.addComictoDB(ComicID,mismatch,calledfrom='dbupdate',annload=annload) - #reload the annuals here. + if whack == False: + mylar.importer.addComictoDB(ComicID,mismatch,calledfrom='dbupdate',annload=annload) + #reload the annuals here. - issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) - annuals = [] - ann_list = [] - if mylar.ANNUALS_ON: - annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) - ann_list += annuals_list - issues_new += annuals_list + issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + annuals = [] + ann_list = [] + if mylar.ANNUALS_ON: + annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) + ann_list += annuals_list + issues_new += annuals_list - logger.fdebug("Attempting to put the Status' back how they were.") - icount = 0 - #the problem - the loop below will not match on NEW issues that have been refreshed that weren't present in the - #db before (ie. you left Mylar off for abit, and when you started it up it pulled down new issue information) - #need to test if issuenew['Status'] is None, but in a seperate loop below. - fndissue = [] - for issue in issues: - for issuenew in issues_new: - #logger.fdebug(str(issue['Issue_Number']) + ' - issuenew:' + str(issuenew['IssueID']) + ' : ' + str(issuenew['Status'])) - #logger.fdebug(str(issue['Issue_Number']) + ' - issue:' + str(issue['IssueID']) + ' : ' + str(issue['Status'])) - if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: - ctrlVAL = {"IssueID": issue['IssueID']} - #if the status is None and the original status is either Downloaded / Archived, keep status & stats - if issuenew['Status'] == None and (issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived'): - newVAL = {"Location": issue['Location'], - "ComicSize": issue['ComicSize'], - "Status": issue['Status']} - #if the status is now Downloaded/Snatched, keep status & stats (downloaded only) - elif issuenew['Status'] == 'Downloaded' or issue['Status'] == 'Snatched': - newVAL = {"Location": issue['Location'], - "ComicSize": issue['ComicSize']} - if issuenew['Status'] == 'Downloaded': - newVAL['Status'] = issuenew['Status'] - else: - newVAL['Status'] = issue['Status'] + logger.fdebug("Attempting to put the Status' back how they were.") + icount = 0 + #the problem - the loop below will not match on NEW issues that have been refreshed that weren't present in the + #db before (ie. you left Mylar off for abit, and when you started it up it pulled down new issue information) + #need to test if issuenew['Status'] is None, but in a seperate loop below. + fndissue = [] + for issue in issues: + for issuenew in issues_new: + #logger.fdebug(str(issue['Issue_Number']) + ' - issuenew:' + str(issuenew['IssueID']) + ' : ' + str(issuenew['Status'])) + #logger.fdebug(str(issue['Issue_Number']) + ' - issue:' + str(issue['IssueID']) + ' : ' + str(issue['Status'])) + if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: + ctrlVAL = {"IssueID": issue['IssueID']} + #if the status is None and the original status is either Downloaded / Archived, keep status & stats + if issuenew['Status'] == None and (issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived'): + newVAL = {"Location": issue['Location'], + "ComicSize": issue['ComicSize'], + "Status": issue['Status']} + #if the status is now Downloaded/Snatched, keep status & stats (downloaded only) + elif issuenew['Status'] == 'Downloaded' or issue['Status'] == 'Snatched': + newVAL = {"Location": issue['Location'], + "ComicSize": issue['ComicSize']} + if issuenew['Status'] == 'Downloaded': + newVAL['Status'] = issuenew['Status'] + else: + newVAL['Status'] = issue['Status'] - elif issue['Status'] == 'Archived': - newVAL = {"Status": issue['Status'], - "Location": issue['Location'], - "ComicSize": issue['ComicSize']} - else: - #change the status to the previous status - newVAL = {"Status": issue['Status']} + elif issue['Status'] == 'Archived': + newVAL = {"Status": issue['Status'], + "Location": issue['Location'], + "ComicSize": issue['ComicSize']} + else: + #change the status to the previous status + newVAL = {"Status": issue['Status']} + + if newVAL['Status'] == None: + newVAL = {"Status": "Skipped"} - if newVAL['Status'] == None: - newVAL = {"Status": "Skipped"} + if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): + logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number'])) + myDB.upsert("Annuals", newVAL, ctrlVAL) + else: + #logger.fdebug('#' + str(issue['Issue_Number']) + ' writing issuedata: ' + str(newVAL)) + myDB.upsert("Issues", newVAL, ctrlVAL) + fndissue.append({"IssueID": issue['IssueID']}) + icount+=1 + break + logger.info("In the process of converting the data to CV, I changed the status of " + str(icount) + " issues.") - if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): - logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number'])) - myDB.upsert("Annuals", newVAL, ctrlVAL) - else: - logger.fdebug('#' + str(issue['Issue_Number']) + ' writing issuedata: ' + str(newVAL)) - myDB.upsert("Issues", newVAL, ctrlVAL) - fndissue.append({"IssueID": issue['IssueID']}) - icount+=1 - break - logger.info("In the process of converting the data to CV, I changed the status of " + str(icount) + " issues.") + issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=? AND Status is NULL', [ComicID]) + if mylar.ANNUALS_ON: + issues_new += myDB.select('SELECT * FROM annuals WHERE ComicID=? AND Status is NULL', [ComicID]) - issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=? AND Status is NULL', [ComicID]) - if mylar.ANNUALS_ON: - issues_new += myDB.select('SELECT * FROM annuals WHERE ComicID=? AND Status is NULL', [ComicID]) + newiss = [] + if mylar.AUTOWANT_UPCOMING: + #only mark store date >= current date as Wanted. + newstatus = "Wanted" + else: + newstatus = "Skipped" + for iss in issues_new: + newiss.append({"IssueID": iss['IssueID'], + "Status": newstatus}) + if len(newiss) > 0: + for newi in newiss: + ctrlVAL = {"IssueID": newi['IssueID']} + newVAL = {"Status": newi['Status']} + #logger.info('writing issuedata: ' + str(newVAL)) + myDB.upsert("Issues", newVAL, ctrlVAL) - newiss = [] - if mylar.AUTOWANT_UPCOMING: - newstatus = "Wanted" + logger.info('I have added ' + str(len(newiss)) + ' new issues for this series that were not present before.') else: - newstatus = "Skipped" - for iss in issues_new: - newiss.append({"IssueID": iss['IssueID'], - "Status": newstatus}) - if len(newiss) > 0: - for newi in newiss: - ctrlVAL = {"IssueID": newi['IssueID']} - newVAL = {"Status": newi['Status']} - logger.info('writing issuedata: ' + str(newVAL)) - myDB.upsert("Issues", newVAL, ctrlVAL) - - logger.info('I have added ' + str(len(newiss)) + ' new issues for this series that were not present before.') - + mylar.importer.addComictoDB(ComicID,mismatch,annload=annload) + else: mylar.importer.addComictoDB(ComicID,mismatch) @@ -682,9 +708,13 @@ class WebInterface(object): raise cherrypy.HTTPRedirect("home") addArtists.exposed = True - def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None): - #logger.fdebug('ComicID:' + str(ComicID)) - #logger.fdebug('mode:' + str(mode)) + def queueit(self, **kwargs): + threading.Thread(target=self.queueissue, kwargs=kwargs).start() + queueit.exposed = True + + def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None, manualsearch=None): + logger.fdebug('ComicID:' + str(ComicID)) + logger.fdebug('mode:' + str(mode)) now = datetime.datetime.now() myDB = db.DBConnection() #mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist. @@ -730,17 +760,23 @@ class WebInterface(object): logger.info(u"Downloaded " + ComicName + " " + ComicIssue ) raise cherrypy.HTTPRedirect("pullist") #return - elif mode == 'want' or mode == 'want_ann': + elif mode == 'want' or mode == 'want_ann' or manualsearch: cdname = myDB.selectone("SELECT ComicName from comics where ComicID=?", [ComicID]).fetchone() ComicName = cdname['ComicName'] controlValueDict = {"IssueID": IssueID} newStatus = {"Status": "Wanted"} if mode == 'want': - logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...") - myDB.upsert("issues", newStatus, controlValueDict) + if manualsearch: + logger.info('Initiating manual search for ' + ComicName + ' issue: ' + ComicIssue) + else: + logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...") + myDB.upsert("issues", newStatus, controlValueDict) else: - logger.info(u"Marking " + ComicName + " Annual: " + ComicIssue + " as wanted...") - myDB.upsert("annuals", newStatus, controlValueDict) + if manualsearch: + logger.info('Initiating manual search for ' + ComicName + ' Annual: ' + ComicIssue) + else: + logger.info(u"Marking " + ComicName + " Annual: " + ComicIssue + " as wanted...") + myDB.upsert("annuals", newStatus, controlValueDict) #--- #this should be on it's own somewhere #if IssueID is not None: @@ -2399,6 +2435,8 @@ class WebInterface(object): "post_processing" : helpers.checked(mylar.POST_PROCESSING), "enable_meta" : helpers.checked(mylar.ENABLE_META), "cmtagger_path" : mylar.CMTAGGER_PATH, + "ct_tag_cr" : helpers.checked(mylar.CT_TAG_CR), + "ct_tag_cbl" : helpers.checked(mylar.CT_TAG_CBL), "branch" : version.MYLAR_VERSION, "br_type" : mylar.INSTALL_TYPE, "br_version" : mylar.versioncheck.getVersion(), @@ -2580,7 +2618,7 @@ class WebInterface(object): nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None, nzbget_directory=None, usenet_retention=None, nzbsu=0, nzbsu_uid=None, nzbsu_apikey=None, dognzb=0, dognzb_uid=None, dognzb_apikey=None, newznab=0, newznab_host=None, newznab_name=None, newznab_apikey=None, newznab_uid=None, newznab_enabled=0, raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0, - enable_meta=0, cmtagger_path=None, enable_rss=0, rss_checkinterval=None, enable_torrent_search=0, enable_kat=0, enable_cbt=0, cbt_passkey=None, snatchedtorrent_notify=0, + enable_meta=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, enable_rss=0, rss_checkinterval=None, enable_torrent_search=0, enable_kat=0, enable_cbt=0, cbt_passkey=None, snatchedtorrent_notify=0, enable_torrents=0, minseeds=0, torrent_local=0, local_watchdir=None, torrent_seedbox=0, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None, prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None, pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0, @@ -2706,6 +2744,8 @@ class WebInterface(object): mylar.PRE_SCRIPTS = pre_scripts mylar.ENABLE_META = enable_meta mylar.CMTAGGER_PATH = cmtagger_path + mylar.CT_TAG_CR = ct_tag_cr + mylar.CT_TAG_CBL = ct_tag_cbl mylar.LOG_DIR = log_dir mylar.LOG_LEVEL = log_level mylar.CHMOD_DIR = chmod_dir diff --git a/post-processing/autoProcessComics.py b/post-processing/autoProcessComics.py index e669adbf..be6aba79 100644 --- a/post-processing/autoProcessComics.py +++ b/post-processing/autoProcessComics.py @@ -22,7 +22,7 @@ class AuthURLOpener(urllib.FancyURLopener): return urllib.FancyURLopener.open(self, url) -def processEpisode(dirName, nzbName=None): +def processIssue(dirName, nzbName=None): config = ConfigParser.ConfigParser() configFilename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessComics.cfg") @@ -81,7 +81,7 @@ def processEpisode(dirName, nzbName=None): for line in result: print line - if any("Post Processing SUCCESSFULL" in s for s in result): + if any("Post Processing SUCCESSFUL" in s for s in result): return 0 else: return 1 diff --git a/post-processing/nzbget/ComicRN.py b/post-processing/nzbget/ComicRN.py index 6b999835..81231db6 100755 --- a/post-processing/nzbget/ComicRN.py +++ b/post-processing/nzbget/ComicRN.py @@ -26,11 +26,11 @@ if os.environ.has_key('NZBOP_SCRIPTDIR') and not os.environ['NZBOP_VERSION'][0:5 POSTPROCESS_NONE=95 #Start script - result = autoProcessComics.processEpisode(os.environ['NZBPP_DIRECTORY'], os.environ['NZBPP_NZBNAME']) + result = autoProcessComics.processIssue(os.environ['NZBPP_DIRECTORY'], os.environ['NZBPP_NZBNAME']) elif len(sys.argv) == NZBGET_NO_OF_ARGUMENTS: - result = autoProcessComics.processEpisode(sys.argv[1], sys.argv[2], sys.argv[3]) + result = autoProcessComics.processIssue(sys.argv[1], sys.argv[2], sys.argv[3]) if result == 0: if os.environ.has_key('NZBOP_SCRIPTDIR'): # log success for nzbget diff --git a/post-processing/sabnzbd/ComicRN.py b/post-processing/sabnzbd/ComicRN.py index c8e94812..5a47ee15 100755 --- a/post-processing/sabnzbd/ComicRN.py +++ b/post-processing/sabnzbd/ComicRN.py @@ -17,6 +17,6 @@ if len(sys.argv) < 2: print "No folder supplied - is this being called from SABnzbd or NZBGet?" sys.exit() elif len(sys.argv) >= 3: - sys.exit(autoProcessComics.processEpisode(sys.argv[1], sys.argv[3])) + sys.exit(autoProcessComics.processIssue(sys.argv[1], sys.argv[3])) else: - sys.exit(autoProcessComics.processEpisode(sys.argv[1])) + sys.exit(autoProcessComics.processIssue(sys.argv[1]))