Whitespace cleanup

This commit is contained in:
Adrian Moisey 2015-05-22 10:32:51 +02:00 committed by evilhero
parent d21cc0d80e
commit 9a4a6b4bc5
37 changed files with 1769 additions and 1771 deletions

View File

@ -32,9 +32,11 @@ try:
except ImportError:
import lib.argparse as argparse
def handler_sigterm(signum, frame):
mylar.SIGNAL = 'shutdown'
def main():
# Fixed paths to mylar
@ -42,10 +44,10 @@ def main():
mylar.FULL_PATH = os.path.abspath(sys.executable)
else:
mylar.FULL_PATH = os.path.abspath(__file__)
mylar.PROG_DIR = os.path.dirname(mylar.FULL_PATH)
mylar.ARGS = sys.argv[1:]
# From sickbeard
mylar.SYS_ENCODING = None
@ -58,7 +60,7 @@ def main():
# for OSes that are poorly configured I'll just force UTF-8
if not mylar.SYS_ENCODING or mylar.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
mylar.SYS_ENCODING = 'UTF-8'
# Set up and gather command line arguments
parser = argparse.ArgumentParser(description='Comic Book add-on for SABnzbd+')
@ -73,14 +75,14 @@ def main():
parser.add_argument('--pidfile', help='Create a pid file (only relevant when running as a daemon)')
parser.add_argument('--safe', action='store_true', help='redirect the startup page to point to the Manage Comics screen on startup')
#parser.add_argument('-u', '--update', action='store_true', help='force mylar to perform an update as if in GUI')
args = parser.parse_args()
if args.verbose:
mylar.VERBOSE = 2
elif args.quiet:
mylar.VERBOSE = 0
#if args.update:
# print('Attempting to update Mylar so things can work again...')
# try:
@ -92,10 +94,10 @@ def main():
if sys.platform == 'win32':
print "Daemonize not supported under Windows, starting normally"
else:
mylar.DAEMON=True
mylar.VERBOSE=0
mylar.DAEMON = True
mylar.VERBOSE = 0
if args.pidfile :
if args.pidfile:
mylar.PIDFILE = str(args.pidfile)
# If the pidfile already exists, mylar may still be running, so exit
@ -112,17 +114,16 @@ def main():
else:
logger.warn("Not running in daemon mode. PID file creation disabled.")
if args.datadir:
mylar.DATA_DIR = args.datadir
else:
mylar.DATA_DIR = mylar.PROG_DIR
if args.config:
mylar.CONFIG_FILE = args.config
else:
mylar.CONFIG_FILE = os.path.join(mylar.DATA_DIR, 'config.ini')
if args.safe:
mylar.SAFESTART = True
else:
@ -136,14 +137,14 @@ def main():
# raise SystemExit('Could not create data directory: ' + mylar.DATA_DIR + '. Exiting....')
filechecker.validateAndCreateDirectory(mylar.DATA_DIR, True)
# Make sure the DATA_DIR is writeable
if not os.access(mylar.DATA_DIR, os.W_OK):
raise SystemExit('Cannot write to the data directory: ' + mylar.DATA_DIR + '. Exiting...')
# Put the database in the DATA_DIR
mylar.DB_FILE = os.path.join(mylar.DATA_DIR, 'mylar.db')
# backup the db and configs before they load.
if args.backup:
print '[AUTO-BACKUP] Backing up .db and config.ini files for safety.'
@ -177,22 +178,22 @@ def main():
print '[AUTO-BACKUP] Now renaming ' + back + ' to ' + back_1
shutil.move(back, back_1)
print '[AUTO-BACKUP] Now copying db file to ' + back
shutil.copy(ogfile, back)
shutil.copy(ogfile, back)
except OSError as exception:
if exception.errno != errno.EXIST:
raise
i+=1
i += 1
mylar.CFG = ConfigObj(mylar.CONFIG_FILE, encoding='utf-8')
# Rename the main thread
threading.currentThread().name = "MAIN"
# Read config & start logging
mylar.initialize()
if mylar.DAEMON:
mylar.daemonize()
@ -202,7 +203,7 @@ def main():
logger.info('Starting Mylar on foced port: %i' % http_port)
else:
http_port = int(mylar.HTTP_PORT)
# Check if pyOpenSSL is installed. It is required for certificate generation
# and for CherryPy.
if mylar.ENABLE_HTTPS:
@ -225,19 +226,19 @@ def main():
'http_password': mylar.HTTP_PASSWORD,
}
# Try to start the server.
# Try to start the server.
webstart.initialize(web_config)
#logger.info('Starting Mylar on port: %i' % http_port)
if mylar.LAUNCH_BROWSER and not args.nolaunch:
mylar.launch_browser(mylar.HTTP_HOST, http_port, mylar.HTTP_ROOT)
# Start the background threads
mylar.start()
signal.signal(signal.SIGTERM, handler_sigterm)
while True:
if not mylar.SIGNAL:
try:
@ -252,9 +253,9 @@ def main():
mylar.shutdown(restart=True)
else:
mylar.shutdown(restart=True, update=True)
mylar.SIGNAL = None
return
if __name__ == "__main__":

View File

@ -2,4 +2,4 @@
from lib.comictaggerlib.main import ctmain
if __name__ == '__main__':
ctmain()
ctmain()

View File

@ -66,7 +66,7 @@ class FailedProcessor(object):
module = '[FAILED-DOWNLOAD]'
myDB = db.DBConnection()
if self.nzb_name and self.nzb_folder:
self._log('Failed download has been detected: ' + self.nzb_name + ' in ' + self.nzb_folder)
@ -105,8 +105,8 @@ class FailedProcessor(object):
if nzbiss is None:
logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
else:
@ -124,9 +124,9 @@ class FailedProcessor(object):
nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone()
if nzbiss is None:
logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
return self.queue.put(self.valreturn)
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
nzbname = nzbiss['NZBName']
@ -145,7 +145,7 @@ class FailedProcessor(object):
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
if issuenzb is not None:
logger.info(module + ' issuenzb found.')
if helpers.is_number(issueid):
@ -165,8 +165,8 @@ class FailedProcessor(object):
else:
logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
@ -208,23 +208,23 @@ class FailedProcessor(object):
else:
logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*')
self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
def failed_check(self):
#issueid = self.issueid
#comicid = self.comicid
#comicid = self.comicid
# ID = ID passed by search upon a match upon preparing to send it to client to download.
# ID is provider dependent, so the same file should be different for every provider.
module = '[FAILED_DOWNLOAD_CHECKER]'
myDB = db.DBConnection()
# Querying on NZBName alone will result in all downloads regardless of provider.
# This will make sure that the files being downloaded are different regardless of provider.
# Perhaps later improvement might be to break it down by provider so that Mylar will attempt to
# Perhaps later improvement might be to break it down by provider so that Mylar will attempt to
# download same issues on different providers (albeit it shouldn't matter, if it's broke it's broke).
logger.info('prov : ' + str(self.prov) + '[' + str(self.id) + ']')
chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone()
@ -233,7 +233,7 @@ class FailedProcessor(object):
return 'Good'
else:
if chk_fail['status'] == 'Good':
logger.info(module + ' result has a status of GOOD - which means it does not currently exist in the failed download list.')
logger.info(module + ' result has a status of GOOD - which means it does not currently exist in the failed download list.')
return chk_fail['status']
elif chk_fail['status'] == 'Failed':
logger.info(module + ' result has a status of FAIL which indicates it is not a good choice to download.')
@ -284,4 +284,4 @@ class FailedProcessor(object):
myDB.upsert("failed", Vals, ctrlVal)
logger.info(module + ' Successfully marked as Failed.')

View File

@ -104,7 +104,7 @@ class PostProcessor(object):
first_line = f.readline()
if mylar.PRE_SCRIPTS.endswith('.sh'):
shell_cmd = re.sub('#!','', first_line).strip()
shell_cmd = re.sub('#!', '', first_line).strip()
if shell_cmd == '' or shell_cmd is None:
shell_cmd = '/bin/bash'
else:
@ -119,8 +119,8 @@ class PostProcessor(object):
self._log("cmd to be executed: " + str(script_cmd))
# use subprocess to run the command and capture output
logger.fdebug(u"Executing command "+str(script_cmd))
logger.fdebug(u"Absolute path to script: "+script_cmd[0])
logger.fdebug(u"Executing command " +str(script_cmd))
logger.fdebug(u"Absolute path to script: " +script_cmd[0])
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
@ -145,7 +145,7 @@ class PostProcessor(object):
first_line = f.readline()
if mylar.EXTRA_SCRIPTS.endswith('.sh'):
shell_cmd = re.sub('#!','', first_line)
shell_cmd = re.sub('#!', '', first_line)
if shell_cmd == '' or shell_cmd is None:
shell_cmd = '/bin/bash'
else:
@ -160,8 +160,8 @@ class PostProcessor(object):
self._log("cmd to be executed: " + str(script_cmd))
# use subprocess to run the command and capture output
logger.fdebug(u"Executing command "+str(script_cmd))
logger.fdebug(u"Absolute path to script: "+script_cmd[0])
logger.fdebug(u"Executing command " +str(script_cmd))
logger.fdebug(u"Absolute path to script: " +script_cmd[0])
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
@ -173,7 +173,7 @@ class PostProcessor(object):
def Process(self):
module = self.module
module = self.module
self._log("nzb name: " + str(self.nzb_name))
self._log("nzb folder: " + str(self.nzb_folder))
logger.fdebug(module + ' nzb name: ' + str(self.nzb_name))
@ -210,7 +210,7 @@ class PostProcessor(object):
# -- end. not used.
if mylar.USE_NZBGET==1:
if self.nzb_name != 'Manual Run':
if self.nzb_name != 'Manual Run':
logger.fdebug(module + ' Using NZBGET')
logger.fdebug(module + ' NZB name as passed from NZBGet: ' + self.nzb_name)
# if the NZBGet Directory option is enabled, let's use that folder name and append the jobname.
@ -231,7 +231,7 @@ class PostProcessor(object):
#when all done, iterate over the tuple until completion...
comicseries = myDB.select("SELECT * FROM comics")
manual_list = []
if comicseries is None:
if comicseries is None:
logger.error(module + ' No Series in Watchlist - aborting Manual Post Processing. Maybe you should be running Import?')
return
else:
@ -261,7 +261,7 @@ class PostProcessor(object):
"ComicPublisher": wv_comicpublisher,
"AlternateSearch": wv_alternatesearch,
"ComicID": wv_comicid,
"WatchValues" : {"SeriesYear": wv_seriesyear,
"WatchValues": {"SeriesYear": wv_seriesyear,
"LatestDate": latestdate,
"ComicVersion": wv_comicversion,
"Publisher": wv_publisher,
@ -272,7 +272,7 @@ class PostProcessor(object):
ccnt=0
nm=0
for cs in watchvals:
watchmatch = filechecker.listFiles(self.nzb_folder,cs['ComicName'],cs['ComicPublisher'],cs['AlternateSearch'], manual=cs['WatchValues'])
watchmatch = filechecker.listFiles(self.nzb_folder, cs['ComicName'], cs['ComicPublisher'], cs['AlternateSearch'], manual=cs['WatchValues'])
if watchmatch['comiccount'] == 0: # is None:
nm+=1
continue
@ -283,7 +283,7 @@ class PostProcessor(object):
while (fn < fccnt):
try:
tmpfc = watchmatch['comiclist'][fn]
except IndexError,KeyError:
except IndexError, KeyError:
break
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
@ -297,10 +297,10 @@ class PostProcessor(object):
logger.fdebug(module + ' Annual detected.')
fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip())
annchk = "yes"
issuechk = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
issuechk = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit]).fetchone()
else:
fcdigit = helpers.issuedigits(temploc)
issuechk = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
issuechk = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit]).fetchone()
if issuechk is None:
logger.fdebug(module + ' No corresponding issue # found for ' + str(cs['ComicID']))
@ -322,7 +322,7 @@ class PostProcessor(object):
if int(issuechk['ReleaseDate'][:4]) < int(tmpfc['ComicYear']):
logger.fdebug(module + ' ' + str(issuechk['ReleaseDate']) + ' is before the issue year of ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
datematch = "False"
else:
monthval = issuechk['IssueDate']
if int(issuechk['IssueDate'][:4]) < int(tmpfc['ComicYear']):
@ -343,10 +343,10 @@ class PostProcessor(object):
if int(issyr) != int(tmpfc['ComicYear']):
logger.fdebug(module + '[.:FAIL:.] Issue is before the modified issue year of ' + str(issyr))
datematch = "False"
else:
logger.info(module + ' Found matching issue # ' + str(fcdigit) + ' for ComicID: ' + str(cs['ComicID']) + ' / IssueID: ' + str(issuechk['IssueID']))
if datematch == "True":
manual_list.append({"ComicLocation": tmpfc['ComicLocation'],
"ComicID": cs['ComicID'],
@ -359,7 +359,7 @@ class PostProcessor(object):
fn+=1
logger.fdebug(module + ' There are ' + str(len(manual_list)) + ' files found that match on your watchlist, ' + str(nm) + ' do not match anything and will be ignored.')
else:
nzbname = self.nzb_name
@ -382,7 +382,7 @@ class PostProcessor(object):
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+',' ', nzbname) #make sure we remove the extra spaces.
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[NZBNAME] nzbname (remove extensions, double spaces, convert underscores to spaces): ' + nzbname)
nzbname = re.sub('\s', '.', nzbname)
@ -390,8 +390,8 @@ class PostProcessor(object):
# if mylar.USE_NZBGET==1:
# nzbname=self.nzb_name
self._log("nzbname: " + str(nzbname))
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname,nzbname]).fetchone()
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
self._log("Failure - could not initially locate nzbfile in my database to rename.")
@ -400,18 +400,18 @@ class PostProcessor(object):
nzbname = re.sub('[\(\)]', '', str(nzbname))
self._log("trying again with this nzbname: " + str(nzbname))
logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname))
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname,nzbname]).fetchone()
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
else:
self._log("I corrected and found the nzb as : " + str(nzbname))
logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname))
issueid = nzbiss['IssueID']
else:
else:
issueid = nzbiss['IssueID']
logger.fdebug(module + ' Issueid: ' + str(issueid))
sarc = nzbiss['SARC']
@ -426,10 +426,10 @@ class PostProcessor(object):
# issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
if issuenzb is None:
if issuenzb is None:
logger.info(module + ' Could not detect as a standard issue - checking against annuals.')
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
if issuenzb is None:
if issuenzb is None:
logger.info(module + ' issuenzb not found.')
#if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
#using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
@ -451,7 +451,7 @@ class PostProcessor(object):
# #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
# if 'S' in issueid:
# sandwich = issueid
# elif 'G' in issueid or '-' in issueid:
# elif 'G' in issueid or '-' in issueid:
# sandwich = 1
if helpers.is_number(sandwich):
if sandwich < 900000:
@ -471,7 +471,7 @@ class PostProcessor(object):
else:
self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR)
logger.info(module + ' Story Arc Directory set to : ' + mylar.GRABBAG_DIR)
else:
self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.")
logger.info(module + ' One-off mode enabled for Post-Processing. Will move into Grab-bag directory.')
@ -486,13 +486,13 @@ class PostProcessor(object):
path, ext = os.path.splitext(ofilename)
if odir is None:
odir = self.nzb_folder
odir = self.nzb_folder
issuearcid = re.sub('S', '', issueid)
logger.fdebug(module + ' issuearcid:' + str(issuearcid))
arcdata = myDB.selectone("SELECT * FROM readinglist WHERE IssueArcID=?",[issuearcid]).fetchone()
arcdata = myDB.selectone("SELECT * FROM readinglist WHERE IssueArcID=?", [issuearcid]).fetchone()
issueid = arcdata['IssueID']
issueid = arcdata['IssueID']
#tag the meta.
if mylar.ENABLE_META:
self._log("Metatagging enabled - proceeding...")
@ -523,9 +523,9 @@ class PostProcessor(object):
grdst = mylar.GRABBAG_DIR
else:
grdst = mylar.DESTINATION_DIR
filechecker.validateAndCreateDirectory(grdst, True, module=module)
if 'S' in sandwich:
#if from a StoryArc, check to see if we're appending the ReadingOrder to the filename
if mylar.READ2FILENAME:
@ -582,8 +582,8 @@ class PostProcessor(object):
logger.info(module + ' Post-Processing completed for: [' + sarc + '] ' + grab_dst)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
@ -608,11 +608,11 @@ class PostProcessor(object):
waiting = True
else:
break
dupthis = helpers.duplicate_filecheck(ml['ComicLocation'], ComicID=comicid, IssueID=issueid)
if dupthis == "write":
stat = ' [' + str(i) + '/' + str(len(manual_list)) + ']'
self.Process_next(comicid,issueid,issuenumOG,ml,stat)
self.Process_next(comicid, issueid, issuenumOG, ml, stat)
dupthis = None
logger.info(module + ' Manual post-processing completed for ' + str(i) + ' issues.')
return
@ -622,17 +622,17 @@ class PostProcessor(object):
#the self.nzb_folder should contain only the existing filename
dupthis = helpers.duplicate_filecheck(self.nzb_folder, ComicID=comicid, IssueID=issueid)
if dupthis == "write":
return self.Process_next(comicid,issueid,issuenumOG)
return self.Process_next(comicid, issueid, issuenumOG)
else:
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop',
"issueid" : issueid,
"comicid" : comicid})
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)
def Process_next(self,comicid,issueid,issuenumOG,ml=None,stat=None):
def Process_next(self, comicid, issueid, issuenumOG, ml=None, stat=None):
if stat is None: stat = ' [1/1]'
module = self.module
annchk = "no"
@ -640,9 +640,9 @@ class PostProcessor(object):
snatchedtorrent = False
myDB = db.DBConnection()
comicnzb = myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid, comicid]).fetchone()
if ml is not None and mylar.SNATCHEDTORRENT_NOTIFY:
snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=?) AND Status='Snatched'", [issueid,comicid,'KAT','32P']).fetchone()
snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=?) AND Status='Snatched'", [issueid, comicid, 'KAT', '32P']).fetchone()
if snatchnzb is None:
logger.fdebug(module + ' Was not downloaded with Mylar and the usage of torrents. Disabling torrent manual post-processing completion notification.')
else:
@ -650,7 +650,7 @@ class PostProcessor(object):
snatchedtorrent = True
if issuenzb is None:
issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone()
issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid, comicid]).fetchone()
annchk = "yes"
if annchk == "no":
logger.info(module + stat + ' Starting Post-Processing for ' + issuenzb['ComicName'] + ' issue: ' + issuenzb['Issue_Number'])
@ -690,7 +690,7 @@ class PostProcessor(object):
if '.' in issuenum:
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
iss_decval = issuenum[iss_find+1:]
iss_decval = issuenum[iss_find +1:]
if iss_decval.endswith('.'): iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
iss = iss_b4dec
@ -713,7 +713,7 @@ class PostProcessor(object):
issueno = iss
# issue zero-suppression here
if mylar.ZERO_LEVEL == "0":
if mylar.ZERO_LEVEL == "0":
zeroadd = ""
else:
if mylar.ZERO_LEVEL_N == "none": zeroadd = ""
@ -739,7 +739,7 @@ class PostProcessor(object):
prettycomiss = str(zeroadd) + str(int(issueno))
else:
prettycomiss = str(zeroadd) + str(iss)
if issue_except != 'None':
if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except
self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
elif int(issueno) >= 10 and int(issueno) < 100:
@ -778,7 +778,7 @@ class PostProcessor(object):
issueyear = issuenzb['IssueDate'][:4]
self._log("Issue Year: " + str(issueyear))
logger.fdebug(module + ' Issue Year : ' + str(issueyear))
month = issuenzb['IssueDate'][5:7].replace('-','').strip()
month = issuenzb['IssueDate'][5:7].replace('-', '').strip()
month_name = helpers.fullmonth(month)
# comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
publisher = comicnzb['ComicPublisher']
@ -806,7 +806,7 @@ class PostProcessor(object):
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
self._log("No version # found for series - tag will not be available for renaming.")
@ -816,7 +816,7 @@ class PostProcessor(object):
chunk_file_format = mylar.FILE_FORMAT
if annchk == "no":
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug(module + ' Not an annual - removing from filename parameters')
@ -853,7 +853,7 @@ class PostProcessor(object):
logger.fdebug(module + ' comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/')
logger.fdebug(module + ' continuing with PostProcessing, but I am not using metadata.')
pcheck = "fail"
if pcheck == "fail":
self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
logger.fdebug(module + ' Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...')
@ -893,7 +893,7 @@ class PostProcessor(object):
'publisher': publisher
})
seriesmetadata['seriesmeta'] = seriesmeta
self._run_pre_scripts(nzbn, nzbf, seriesmetadata )
self._run_pre_scripts(nzbn, nzbf, seriesmetadata)
#rename file and move to new path
#nfilename = series + " " + issueno + " (" + seriesyear + ")"
@ -945,8 +945,8 @@ class PostProcessor(object):
if ofilename is None:
logger.error(module + ' Aborting PostProcessing - the filename does not exist in the location given. Make sure that ' + str(self.nzb_folder) + ' exists and is the correct location.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
self._log("Original Filename: " + ofilename)
self._log("Original Extension: " + ext)
@ -994,8 +994,8 @@ class PostProcessor(object):
logger.fdebug(module + ' nfilename:' + nfilename + ext)
if mylar.RENAME_FILES:
if str(ofilename) != str(nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename) + ' ..to.. ' + os.path.join(odir,nfilename + ext))
os.rename(os.path.join(odir, ofilename), os.path.join(odir,nfilename + ext))
logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename) + ' ..to.. ' + os.path.join(odir, nfilename + ext))
os.rename(os.path.join(odir, ofilename), os.path.join(odir, nfilename + ext))
else:
logger.fdebug(module + ' Filename is identical as original, not renaming.')
@ -1008,8 +1008,8 @@ class PostProcessor(object):
self._log("Post-Processing ABORTED.")
logger.warn(module + ' Failed to move directory : ' + src + ' to ' + dst + ' - check directory and manually re-run')
logger.warn(module + ' Post-Processing ABORTED')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
#tidyup old path
@ -1020,8 +1020,8 @@ class PostProcessor(object):
self._log("Post-Processing ABORTED.")
logger.warn(module + ' Failed to remove temporary directory : ' + self.nzb_folder)
logger.warn(module + ' Post-Processing ABORTED')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
self._log("Removed temporary directory : " + str(self.nzb_folder))
logger.fdebug(module + ' Removed temporary directory : ' + self.nzb_folder)
@ -1032,8 +1032,8 @@ class PostProcessor(object):
src = os.path.join(odir, ofilename)
if mylar.RENAME_FILES:
if str(ofilename) != str(nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, str(ofilename)) + ' ..to.. ' + os.path.join(odir, self.nzb_folder,str(nfilename + ext)))
os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir ,str(nfilename + ext)))
logger.fdebug(module + ' Renaming ' + os.path.join(odir, str(ofilename)) + ' ..to.. ' + os.path.join(odir, self.nzb_folder, str(nfilename + ext)))
os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir, str(nfilename + ext)))
src = os.path.join(odir, str(nfilename + ext))
else:
logger.fdebug(module + ' Filename is identical as original, not renaming.')
@ -1046,8 +1046,8 @@ class PostProcessor(object):
logger.fdebug(module + ' Failed to move directory - check directories and manually re-run.')
logger.fdebug(module + ' Post-Processing ABORTED.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
logger.fdebug(module + ' Successfully moved to : ' + dst)
@ -1061,7 +1061,7 @@ class PostProcessor(object):
else:
raise OSError(module + ' ' + odir + ' not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be removed manually.')
else:
raise OSError(module + ' ' + odir + ' unable to remove at this time.')
raise OSError(module + ' ' + odir + ' unable to remove at this time.')
except (OSError, IOError):
logger.fdebug(module + ' Failed to remove temporary directory (' + odir + ') - Processing will continue, but manual removal is necessary')
@ -1076,7 +1076,7 @@ class PostProcessor(object):
#delete entry from nzblog table
myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
#update snatched table to change status to Downloaded
if annchk == "no":
updater.foundsearch(comicid, issueid, down=downtype, module=module)
dispiss = 'issue: ' + issuenumOG
@ -1088,11 +1088,11 @@ class PostProcessor(object):
dispiss = issuenumOG
#force rescan of files
updater.forceRescan(comicid,module=module)
updater.forceRescan(comicid, module=module)
if mylar.WEEKFOLDER:
#if enabled, will *copy* the post-processed file to the weeklypull list folder for the given week.
weeklypull.weekly_singlecopy(comicid,issuenum,str(nfilename+ext),dst,module=module,issueid=issueid)
weeklypull.weekly_singlecopy(comicid, issuenum, str(nfilename +ext), dst, module=module, issueid=issueid)
# retrieve/create the corresponding comic objects
if mylar.ENABLE_EXTRA_SCRIPTS:
@ -1113,21 +1113,21 @@ class PostProcessor(object):
'publisher': publisher
})
seriesmetadata['seriesmeta'] = seriesmeta
self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata )
self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata)
if ml is not None:
#we only need to return self.log if it's a manual run and it's not a snatched torrent
if snatchedtorrent:
if snatchedtorrent:
#manual run + snatched torrent
pass
else:
#manual run + not snatched torrent (or normal manual-run)
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss )
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop',
"issueid" : issueid,
"comicid" : comicid})
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)
@ -1144,8 +1144,8 @@ class PostProcessor(object):
if mylar.PROWL_ENABLED:
pushmessage = prline
prowl = notifiers.PROWL()
prowl.notify(pushmessage,"Download and Postprocessing completed", module=module)
prowl.notify(pushmessage, "Download and Postprocessing completed", module=module)
if mylar.NMA_ENABLED:
nma = notifiers.NMA()
nma.notify(prline=prline, prline2=prline2, module=module)
@ -1161,14 +1161,14 @@ class PostProcessor(object):
if mylar.PUSHBULLET_ENABLED:
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(prline=prline, prline2=prline2, module=module)
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss )
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop',
"issueid" : issueid,
"comicid" : comicid})
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)

View File

@ -599,7 +599,7 @@ def initialize():
CT_TAG_CBL = bool(check_setting_int(CFG, 'General', 'ct_tag_cbl', 1))
CT_CBZ_OVERWRITE = bool(check_setting_int(CFG, 'General', 'ct_cbz_overwrite', 0))
UNRAR_CMD = check_setting_str(CFG, 'General', 'unrar_cmd', '')
UPCOMING_SNATCHED = bool(check_setting_int(CFG, 'General', 'upcoming_snatched', 1))
UPDATE_ENDED = bool(check_setting_int(CFG, 'General', 'update_ended', 0))
INDIE_PUB = check_setting_str(CFG, 'General', 'indie_pub', '75')
@ -648,7 +648,7 @@ def initialize():
if MODE_32P == 0 and RSSFEED_32P is not None:
#parse out the keys.
KEYS_32P = helpers.parse_32pfeed(RSSFEED_32P)
USERNAME_32P = check_setting_str(CFG, 'Torrents', 'username_32p', '')
PASSWORD_32P = check_setting_str(CFG, 'Torrents', 'password_32p', '')
@ -808,7 +808,7 @@ def initialize():
#print('creating provider sequence order now...')
TMPPR_NUM = 0
PROV_ORDER = []
while TMPPR_NUM < PR_NUM :
while TMPPR_NUM < PR_NUM:
PROV_ORDER.append({"order_seq": TMPPR_NUM,
"provider": str(PR[TMPPR_NUM])})
TMPPR_NUM +=1
@ -831,7 +831,7 @@ def initialize():
TMPPR_NUM = 0
while (TMPPR_NUM < PR_NUM):
#print 'checking entry #' + str(TMPPR_NUM) + ': ' + str(PR[TMPPR_NUM])
if not any(d.get("provider",None) == str(PR[TMPPR_NUM]) for d in PROV_ORDER):
if not any(d.get("provider", None) == str(PR[TMPPR_NUM]) for d in PROV_ORDER):
new_order_seqnum = len(PROV_ORDER)
#print 'new provider should be : ' + str(new_order_seqnum) + ' -- ' + str(PR[TMPPR_NUM])
PROV_ORDER.append({"order_seq": str(new_order_seqnum),
@ -849,11 +849,11 @@ def initialize():
flatt_providers = []
for pro in PROV_ORDER:
try:
provider_seq = re.sub('cbt','32p', pro['provider'])
provider_seq = re.sub('cbt', '32p', pro['provider'])
flatt_providers.extend([pro['order_seq'], provider_seq])
except TypeError:
#if the value is None (no Name specified for Newznab entry), break out now
continue
continue
PROVIDER_ORDER = list(itertools.izip(*[itertools.islice(flatt_providers, i, None, 2) for i in range(2)]))
config_write()
@ -861,8 +861,8 @@ def initialize():
# update folder formats in the config & bump up config version
if CONFIG_VERSION == '0':
from mylar.helpers import replace_all
file_values = { 'issue': 'Issue', 'title': 'Title', 'series' : 'Series', 'year' : 'Year' }
folder_values = { 'series' : 'Series', 'publisher':'Publisher', 'year' : 'Year', 'first' : 'First', 'lowerfirst' : 'first' }
file_values = {'issue': 'Issue', 'title': 'Title', 'series': 'Series', 'year': 'Year'}
folder_values = {'series': 'Series', 'publisher': 'Publisher', 'year': 'Year', 'first': 'First', 'lowerfirst': 'first'}
FILE_FORMAT = replace_all(FILE_FORMAT, file_values)
FOLDER_FORMAT = replace_all(FOLDER_FORMAT, folder_values)
@ -872,7 +872,7 @@ def initialize():
from mylar.helpers import replace_all
file_values = { 'Issue': '$Issue',
file_values = {'Issue': '$Issue',
'Title': '$Title',
'Series': '$Series',
'Year': '$Year',
@ -880,7 +880,7 @@ def initialize():
'series': '$series',
'year': '$year'
}
folder_values = { 'Series': '$Series',
folder_values = {'Series': '$Series',
'Publisher': '$Publisher',
'Year': '$Year',
'First': '$First',
@ -965,7 +965,7 @@ def initialize():
else:
vers = 'D'
USER_AGENT = 'Mylar/'+str(hash)+'('+vers+') +http://www.github.com/evilhero/mylar/'
USER_AGENT = 'Mylar/' +str(hash) +'(' +vers +') +http://www.github.com/evilhero/mylar/'
# Check for new versions
if CHECK_GITHUB_ON_STARTUP:
@ -1418,7 +1418,7 @@ def start():
#now the scheduler (check every 24 hours)
#SCHED.add_interval_job(weeklypull.pullit, hours=24)
WeeklyScheduler.thread.start()
#let's do a run at the Wanted issues here (on startup) if enabled.
#if NZB_STARTUP_SEARCH:
# threading.Thread(target=search.searchforissue).start()
@ -1878,12 +1878,12 @@ def csv_load():
if not os.path.exists(EXCEPTIONS_FILE):
try:
csvfile = open(str(EXCEPTIONS_FILE), "rb")
except (OSError,IOError):
except (OSError, IOError):
if i == 1:
logger.info('No Custom Exceptions found - Using base exceptions only. Creating blank custom_exceptions for your personal use.')
try:
shutil.copy(os.path.join(DATA_DIR,"custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError,IOError):
shutil.copy(os.path.join(DATA_DIR, "custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError, IOError):
logger.error('Cannot create custom_exceptions.csv in ' + str(DATA_DIR) + '. Make sure _sample.csv is present and/or check permissions.')
return
else:

View File

@ -15,17 +15,18 @@
from mylar import db
def getCachedArt(albumid):
from mylar import cache
c = cache.Cache()
artwork_path = c.get_artwork_from_cache(ComicID=comicid)
if not artwork_path:
return None
if artwork_path.startswith('http://'):
artwork = urllib.urlopen(artwork_path).read()
return artwork

View File

@ -46,8 +46,7 @@ class Api(object):
self.data = None
self.callback = None
def checkParams(self,*args,**kwargs):
def checkParams(self, *args, **kwargs):
if not mylar.API_ENABLED:
self.data = 'API not enabled'

View File

@ -5,13 +5,14 @@ from bs4 import BeautifulSoup
import mylar
from mylar import logger
class info32p(object):
def __init__(self, reauthenticate=False, searchterm=None):
self.module = '[32P-AUTHENTICATION]'
self.url = 'https://32pag.es/login.php'
self.payload = {'username': mylar.USERNAME_32P,
self.payload = {'username': mylar.USERNAME_32P,
'password': mylar.PASSWORD_32P}
self.headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept-Charset': 'utf-8',
@ -50,8 +51,8 @@ class info32p(object):
logger.info('[32P] Successfully authenticated. Initiating search for : ' + self.searchterm)
return self.search32p(s)
soup = BeautifulSoup(r.content)
all_script = soup.find_all("script", {"src":False})
all_script2 = soup.find_all("link", {"rel":"alternate"})
all_script = soup.find_all("script", {"src": False})
all_script2 = soup.find_all("link", {"rel": "alternate"})
for ind_s in all_script:
all_value = str(ind_s)
@ -62,13 +63,13 @@ class info32p(object):
if al == 'authkey':
auth_found = True
elif auth_found == True and al != '=':
authkey = re.sub('["/;]','', al).strip()
authkey = re.sub('["/;]', '', al).strip()
auth_found = False
logger.fdebug(self.module + ' Authkey found: ' + str(authkey))
if al == 'userid':
user_found = True
elif user_found == True and al != '=':
userid = re.sub('["/;]','', al).strip()
userid = re.sub('["/;]', '', al).strip()
user_found = False
logger.fdebug(self.module + ' Userid found: ' + str(userid))
@ -79,23 +80,23 @@ class info32p(object):
alurl = al['href']
if 'auth=' in alurl and 'torrents_notify' in alurl and not authfound:
f1 = alurl.find('auth=')
f2 = alurl.find('&',f1+1)
auth = alurl[f1+5:f2]
f2 = alurl.find('&', f1 + 1)
auth = alurl[f1 +5:f2]
logger.fdebug(self.module + ' Auth:' + str(auth))
authfound = True
p1 = alurl.find('passkey=')
p2 = alurl.find('&',p1+1)
passkey = alurl[p1+8:p2]
p2 = alurl.find('&', p1 + 1)
passkey = alurl[p1 +8:p2]
logger.fdebug(self.module + ' Passkey:' + str(passkey))
if self.reauthenticate: break
if 'torrents_notify' in alurl and ('torrents_notify_' + str(passkey)) not in alurl:
notifyname_st = alurl.find('name=')
notifyname_en = alurl.find('&',notifyname_st+1)
notifyname_en = alurl.find('&', notifyname_st +1)
if notifyname_en == -1: notifyname_en = len(alurl)
notifyname = alurl[notifyname_st+5:notifyname_en]
notifyname = alurl[notifyname_st +5:notifyname_en]
notifynumber_st = alurl.find('torrents_notify_')
notifynumber_en = alurl.find('_', notifynumber_st+17)
notifynumber_en = alurl.find('_', notifynumber_st +17)
notifynumber = alurl[notifynumber_st:notifynumber_en]
logger.fdebug(self.module + ' [NOTIFICATION: ' + str(notifyname) + '] Notification ID: ' + str(notifynumber))

View File

@ -21,44 +21,45 @@ import lib.simplejson as simplejson
import mylar
from mylar import db, helpers, logger
class Cache(object):
"""
This class deals with getting, storing and serving up artwork (album
This class deals with getting, storing and serving up artwork (album
art, artist images, etc) and info/descriptions (album info, artist descrptions)
to and from the cache folder. This can be called from within a web interface,
to and from the cache folder. This can be called from within a web interface,
for example, using the helper functions getInfo(id) and getArtwork(id), to utilize the cached
images rather than having to retrieve them every time the page is reloaded.
So you can call cache.getArtwork(id) which will return an absolute path
to the image file on the local machine, or if the cache directory
doesn't exist, or can not be written to, it will return a url to the image.
Call cache.getInfo(id) to grab the artist/album info; will return the text description
The basic format for art in the cache is <musicbrainzid>.<date>.<ext>
and for info it is <musicbrainzid>.<date>.txt
"""
mylar.CACHE_DIR = os.path.join(str(mylar.PROG_DIR), 'cache/')
path_to_art_cache = os.path.join(mylar.CACHE_DIR, 'artwork')
id = None
id_type = None # 'comic' or 'issue' - set automatically depending on whether ComicID or IssueID is passed
query_type = None # 'artwork','thumb' or 'info' - set automatically
id_type = None # 'comic' or 'issue' - set automatically depending on whether ComicID or IssueID is passed
query_type = None # 'artwork','thumb' or 'info' - set automatically
artwork_files = []
thumb_files = []
artwork_errors = False
artwork_url = None
thumb_errors = False
thumb_url = None
def __init__(self):
pass
def _exists(self, type):
self.artwork_files = glob.glob(os.path.join(self.path_to_art_cache, self.id + '*'))
@ -70,9 +71,9 @@ class Cache(object):
return True
else:
return False
elif type == 'thumb':
if self.thumb_files:
return True
else:
@ -81,38 +82,38 @@ class Cache(object):
def _get_age(self, date):
# There's probably a better way to do this
split_date = date.split('-')
days_old = int(split_date[0])*365 + int(split_date[1])*30 + int(split_date[2])
days_old = int(split_date[0]) *365 + int(split_date[1]) *30 + int(split_date[2])
return days_old
def _is_current(self, filename=None, date=None):
if filename:
base_filename = os.path.basename(filename)
date = base_filename.split('.')[1]
# Calculate how old the cached file is based on todays date & file date stamp
# helpers.today() returns todays date in yyyy-mm-dd format
if self._get_age(helpers.today()) - self._get_age(date) < 30:
return True
else:
return False
def get_artwork_from_cache(self, ComicID=None, imageURL=None):
'''
Pass a comicvine id to this function (either ComicID or IssueID)
'''
self.query_type = 'artwork'
if ComicID:
self.id = ComicID
self.id_type = 'comic'
else:
self.id = IssueID
self.id_type = 'issue'
if self._exists('artwork') and self._is_current(filename=self.artwork_files[0]):
return self.artwork_files[0]
else:
@ -155,18 +156,16 @@ class Cache(object):
self.artwork_url = image_url
def getArtwork(ComicID=None, imageURL=None):
c = Cache()
artwork_path = c.get_artwork_from_cache(ComicID,imageURL)
logger.info('artwork path at : ' + str(artwork_path))
artwork_path = c.get_artwork_from_cache(ComicID, imageURL)
logger.info('artwork path at : ' + str(artwork_path))
if not artwork_path:
return None
if artwork_path.startswith('http://'):
return artwork_path
else:
artwork_file = os.path.basename(artwork_path)
return "cache/artwork/" + artwork_file

View File

@ -18,7 +18,8 @@ import mylar
from mylar import logger
from mylar.helpers import cvapi_check
def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, filename=None, module=None):
def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filename=None, module=None):
if module is None:
module = ''
module += '[META-TAGGER]'
@ -28,7 +29,6 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
## Set the directory in which comictagger and other external commands are located - IMPORTANT - ##
# ( User may have to modify, depending on their setup, but these are some guesses for now )
if platform.system() == "Windows":
#if it's a source install.
sys_type = 'windows'
@ -57,7 +57,6 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd)
elif platform.system() == "Darwin":
#Mac OS X
sys_type = 'mac'
@ -68,7 +67,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
unrar_cmd = mylar.UNRAR_CMD.strip()
logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd)
else:
#for the 'nix
sys_type = 'linux'
@ -100,7 +99,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
file_conversion = True
file_extension_fixing = True
if not os.path.exists( unrar_cmd ):
if not os.path.exists(unrar_cmd):
logger.fdebug(module + ' WARNING: cannot find the unrar command.')
logger.fdebug(module + ' File conversion and extension fixing not available')
logger.fdebug(module + ' You probably need to edit this script, or install the missing tool, or both!')
@ -108,13 +107,12 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
#file_conversion = False
#file_extension_fixing = False
## Sets up other directories ##
scriptname = os.path.basename( sys.argv[0] )
downloadpath = os.path.abspath( dirName )
sabnzbdscriptpath = os.path.dirname( sys.argv[0] )
scriptname = os.path.basename(sys.argv[0])
downloadpath = os.path.abspath(dirName)
sabnzbdscriptpath = os.path.dirname(sys.argv[0])
if manual is None:
comicpath = os.path.join( downloadpath , "temp" )
comicpath = os.path.join(downloadpath, "temp")
else:
chkpath, chkfile = os.path.split(filename)
logger.fdebug(module + ' chkpath: ' + chkpath)
@ -123,8 +121,8 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
if os.path.isdir(chkpath) and chkpath != downloadpath:
logger.fdebug(module + ' Changing ' + downloadpath + ' location to ' + chkpath + ' as it is a directory.')
downloadpath = chkpath
comicpath = os.path.join( downloadpath, issueid )
unrar_folder = os.path.join( comicpath , "unrard" )
comicpath = os.path.join(downloadpath, issueid)
unrar_folder = os.path.join(comicpath, "unrard")
logger.fdebug(module + ' Paths / Locations:')
logger.fdebug(module + ' scriptname : ' + scriptname)
@ -134,8 +132,8 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' unrar_folder : ' + unrar_folder)
logger.fdebug(module + ' Running the ComicTagger Add-on for Mylar')
if os.path.exists( comicpath ):
shutil.rmtree( comicpath )
if os.path.exists(comicpath):
shutil.rmtree(comicpath)
logger.fdebug(module + ' Attempting to create directory @: ' + str(comicpath))
try:
@ -147,27 +145,27 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' Filename is : ' + str(filename))
if filename is None:
filename_list = glob.glob( os.path.join( downloadpath, "*.cbz" ) )
filename_list.extend( glob.glob( os.path.join( downloadpath, "*.cbr" ) ) )
filename_list = glob.glob(os.path.join(downloadpath, "*.cbz"))
filename_list.extend(glob.glob(os.path.join(downloadpath, "*.cbr")))
fcount = 1
for f in filename_list:
if fcount > 1:
if fcount > 1:
logger.fdebug(module + ' More than one cbr/cbz within path, performing Post-Process on first file detected: ' + f)
break
if f.endswith('.cbz'):
logger.fdebug(module + ' .cbz file detected. Excluding from temporary directory move at this time.')
comicpath = downloadpath
else:
shutil.move( f, comicpath )
filename = f #just the filename itself
fcount+=1
shutil.move(f, comicpath)
filename = f # just the filename itself
fcount += 1
else:
# if the filename is identical to the parent folder, the entire subfolder gets copied since it's the first match, instead of just the file
#if os.path.isfile(filename):
#if the filename doesn't exist - force the path assuming it's the 'download path'
filename = os.path.join(downloadpath, filename)
logger.fdebug(module + ' The path where the file is that I was provided is probably wrong - modifying it to : ' + filename)
shutil.move( filename, os.path.join(comicpath, os.path.split(filename)[1]) )
shutil.move(filename, os.path.join(comicpath, os.path.split(filename)[1]))
logger.fdebug(module + ' moving : ' + filename + ' to ' + os.path.join(comicpath, os.path.split(filename)[1]))
try:
@ -176,33 +174,33 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.warn('Unable to detect filename within directory - I am aborting the tagging. You best check things out.')
return "fail"
#print comicpath
#print os.path.join( comicpath, filename )
#print os.path.join(comicpath, filename)
if filename.endswith('.cbr'):
f = os.path.join( comicpath, filename )
if zipfile.is_zipfile( f ):
f = os.path.join(comicpath, filename)
if zipfile.is_zipfile(f):
logger.fdebug(module + ' zipfile detected')
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
logger.fdebug(module + ' {0}: renaming {1} to be a cbz'.format( scriptname, os.path.basename( f ) ))
base = os.path.splitext(f)[0]
shutil.move(f, base + ".cbz")
logger.fdebug(module + ' {0}: renaming {1} to be a cbz'.format(scriptname, os.path.basename(f)))
filename = base + '.cbz'
if file_extension_fixing:
if filename.endswith('.cbz'):
logger.info(module + ' Filename detected as a .cbz file.')
f = os.path.join( comicpath, filename )
f = os.path.join(comicpath, filename)
logger.fdebug(module + ' filename : ' + f)
if os.path.isfile( f ):
if os.path.isfile(f):
try:
rar_test_cmd_output = "is not RAR archive" #default, in case of error
rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] )
rar_test_cmd_output = "is not RAR archive" # default, in case of error
rar_test_cmd_output = subprocess.check_output([unrar_cmd, "t", f])
except:
logger.fdebug(module + ' This is a zipfile. Unable to test rar.')
if not "is not RAR archive" in rar_test_cmd_output:
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbr" )
logger.fdebug(module + ' {0}: renaming {1} to be a cbr'.format( scriptname, os.path.basename( f ) ))
base = os.path.splitext(f)[0]
shutil.move(f, base + ".cbr")
logger.fdebug(module + ' {0}: renaming {1} to be a cbr'.format(scriptname, os.path.basename(f)))
else:
try:
with open(f): pass
@ -210,7 +208,6 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.warn(module + ' No zip file present')
return "fail"
#if the temp directory is the LAST directory in the path, it's part of the CT logic path above
#and can be removed to allow a copy back to the original path to work.
if 'temp' in os.path.basename(os.path.normpath(comicpath)):
@ -220,7 +217,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
base = os.path.join(re.sub(issueid, '', comicpath), filename) #extension is already .cbz
logger.fdebug(module + ' Base set to : ' + base)
logger.fdebug(module + ' Moving : ' + f + ' - to - ' + base)
shutil.move( f, base)
shutil.move(f, base)
try:
with open(base):
logger.fdebug(module + ' Verified file exists in location: ' + base)
@ -231,7 +228,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
if removetemp == True:
if comicpath != downloadpath:
shutil.rmtree( comicpath )
shutil.rmtree(comicpath)
logger.fdebug(module + ' Successfully removed temporary directory: ' + comicpath)
else:
logger.fdebug(module + ' Unable to remove temporary directory since it is identical to the download location : ' + comicpath)
@ -241,28 +238,28 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
# Now rename all CBR files to RAR
if filename.endswith('.cbr'):
#logger.fdebug('renaming .cbr to .rar')
f = os.path.join( comicpath, filename)
base = os.path.splitext( f )[0]
f = os.path.join(comicpath, filename)
base = os.path.splitext(f)[0]
baserar = base + ".rar"
shutil.move( f, baserar )
shutil.move(f, baserar)
## Changes any cbr files to cbz files for insertion of metadata ##
if file_conversion:
f = os.path.join( comicpath, filename )
logger.fdebug(module + ' {0}: converting {1} to be zip format'.format( scriptname, os.path.basename( f ) ))
basename = os.path.splitext( f )[0]
f = os.path.join(comicpath, filename)
logger.fdebug(module + ' {0}: converting {1} to be zip format'.format(scriptname, os.path.basename(f)))
basename = os.path.splitext(f)[0]
zipname = basename + ".cbz"
# Move into the folder where we will be unrar-ing things
os.makedirs( unrar_folder )
os.chdir( unrar_folder )
os.makedirs(unrar_folder)
os.chdir(unrar_folder)
# Extract and zip up
logger.fdebug(module + ' {0}: Comicpath is ' + baserar) #os.path.join(comicpath,basename))
logger.fdebug(module + ' {0}: Unrar is ' + unrar_folder )
logger.fdebug(module + ' {0}: Comicpath is ' + baserar) # os.path.join(comicpath,basename))
logger.fdebug(module + ' {0}: Unrar is ' + unrar_folder)
try:
#subprocess.Popen( [ unrar_cmd, "x", os.path.join(comicpath,basename) ] ).communicate()
output = subprocess.check_output( [ unrar_cmd, 'x', baserar ] )
output = subprocess.check_output([unrar_cmd, 'x', baserar])
except CalledProcessError as e:
if e.returncode == 3:
logger.warn(module + ' [Unrar Error 3] - Broken Archive.')
@ -271,26 +268,26 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.warn(module + ' Marking this as an incomplete download.')
return "unrar error"
shutil.make_archive( basename, "zip", unrar_folder )
shutil.make_archive(basename, "zip", unrar_folder)
# get out of unrar folder and clean up
os.chdir( comicpath )
shutil.rmtree( unrar_folder )
os.chdir(comicpath)
shutil.rmtree(unrar_folder)
## Changes zip to cbz
f = os.path.join( comicpath, os.path.splitext(filename)[0] + ".zip" )
f = os.path.join(comicpath, os.path.splitext(filename)[0] + ".zip")
#print "zipfile" + f
try:
with open(f): pass
except:
logger.warn(module + ' No zip file present:' + f)
return "fail"
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
return "fail"
base = os.path.splitext(f)[0]
shutil.move(f, base + ".cbz")
nfilename = base + ".cbz"
#else:
# logger.fdebug(module + ' Filename:' + filename)
# logger.fdebug(module + ' Filename:' + filename)
# nfilename = filename
#if os.path.isfile( nfilename ):
@ -313,7 +310,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' Converted directory: ' + str(file_dir))
logger.fdebug(module + ' Converted filename: ' + str(file_n))
logger.fdebug(module + ' Destination path: ' + os.path.join(file_dir,file_n)) #dirName,file_n))
logger.fdebug(module + ' Destination path: ' + os.path.join(file_dir, file_n)) #dirName,file_n))
logger.fdebug(module + ' dirName: ' + dirName)
logger.fdebug(module + ' absDirName: ' + os.path.abspath(dirName))
@ -322,25 +319,26 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
comversion = '1'
comversion = re.sub('[^0-9]', '', comversion).strip()
cvers = 'volume=' + str(comversion)
tagoptions = [ "-s", "--verbose", "-m", cvers ]
tagoptions = ["-s", "--verbose", "-m", cvers]
## check comictagger version - less than 1.15.beta - take your chances.
if sys_type == 'windows':
ctversion = subprocess.check_output( [ comictagger_cmd, "--version" ] )
ctversion = subprocess.check_output([comictagger_cmd, "--version"])
else:
ctversion = subprocess.check_output( [ sys.executable, comictagger_cmd, "--version" ] )
ctversion = subprocess.check_output([sys.executable, comictagger_cmd, "--version"])
ctend = ctversion.find(':')
ctcheck = re.sub("[^0-9]", "", ctversion[:ctend])
ctcheck = re.sub('\.', '', ctcheck).strip()
if int(ctcheck) >= int('1115'): #(v1.1.15)
if int(ctcheck) >= int('1115'): # (v1.1.15)
if mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - no personal ComicVine API Key supplied. Take your chances.')
use_cvapi = "False"
else:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - using personal ComicVine API key supplied via mylar.')
use_cvapi = "True"
tagoptions.extend( [ "--cv-api-key", mylar.COMICVINE_API ] )
tagoptions.extend(["--cv-api-key", mylar.COMICVINE_API])
else:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.')
use_cvapi = "False"
@ -360,45 +358,44 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
if tagcnt == 0:
logger.warn(module + ' You have metatagging enabled, but you have not selected the type(s) of metadata to write. Please fix and re-run manually')
return "fail"
#if it's a cbz file - check if no-overwrite existing tags is enabled / disabled in config.
if nfilename.endswith('.cbz'):
if mylar.CT_CBZ_OVERWRITE:
logger.fdebug(module + ' Will modify existing tag blocks even if it exists.')
else:
logger.fdebug(module + ' Will NOT modify existing tag blocks even if they exist already.')
tagoptions.extend( [ "--nooverwrite" ] )
tagoptions.extend(["--nooverwrite"])
if issueid is None:
tagoptions.extend( [ "-f", "-o" ] )
tagoptions.extend(["-f", "-o"])
else:
tagoptions.extend( [ "-o", "--id", issueid ] )
tagoptions.extend(["-o", "--id", issueid])
original_tagoptions = tagoptions
og_tagtype = None
while ( i <= tagcnt ):
if i == 1:
while (i <= tagcnt):
if i == 1:
tagtype = 'cr' # CR meta-tagging cycle.
tagdisp = 'ComicRack tagging'
elif i == 2:
tagtype = 'cbl' #Cbl meta-tagging cycle
elif i == 2:
tagtype = 'cbl' # Cbl meta-tagging cycle
tagdisp = 'Comicbooklover tagging'
f_tagoptions = original_tagoptions
if og_tagtype is not None:
if og_tagtype is not None:
for index, item in enumerate(f_tagoptions):
if item == og_tagtype:
f_tagoptions[index] = tagtype
else:
f_tagoptions.extend( [ "--type", tagtype, nfilename ] )
f_tagoptions.extend(["--type", tagtype, nfilename])
og_tagtype = tagtype
logger.info(module + ' ' + tagdisp + ' meta-tagging processing started.')
#CV API Check here.
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= 200:
cvapi_check()
@ -411,19 +408,18 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
script_cmd = shlex.split(currentScriptName, posix=False) + f_tagoptions
# use subprocess to run the command and capture output
logger.fdebug(module + ' Executing command: '+str(script_cmd))
logger.fdebug(module + ' Absolute path to script: '+script_cmd[0])
logger.fdebug(module + ' Executing command: ' +str(script_cmd))
logger.fdebug(module + ' Absolute path to script: ' +script_cmd[0])
try:
p = subprocess.Popen(script_cmd)
out, err = p.communicate() #@UnusedVariable
logger.fdebug(module + '[COMIC-TAGGER] : '+str(out))
out, err = p.communicate() # @UnusedVariable
logger.fdebug(module + '[COMIC-TAGGER] : ' +str(out))
logger.info(module + '[COMIC-TAGGER] Successfully wrote ' + tagdisp)
except OSError, e:
logger.warn(module + '[COMIC-TAGGER] Unable to run comictagger with the options provided: ' + str(script_cmd))
#increment CV API counter.
mylar.CVAPI_COUNT +=1
mylar.CVAPI_COUNT += 1
## Tag each CBZ, and move it back to original directory ##
@ -444,33 +440,32 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
# mylar.CVAPI_COUNT +=1
i+=1
if os.path.exists(os.path.join(os.path.abspath(file_dir),file_n)): #(os.path.abspath(dirName),file_n)):
logger.fdebug(module + ' Unable to move from temporary directory - file already exists in destination: ' + os.path.join(os.path.abspath(file_dir),file_n))
if os.path.exists(os.path.join(os.path.abspath(file_dir), file_n)): # (os.path.abspath(dirName),file_n)):
logger.fdebug(module + ' Unable to move from temporary directory - file already exists in destination: ' + os.path.join(os.path.abspath(file_dir), file_n))
else:
try:
shutil.move( os.path.join(comicpath, nfilename), os.path.join(os.path.abspath(file_dir),file_n)) #os.path.abspath(dirName),file_n))
shutil.move(os.path.join(comicpath, nfilename), os.path.join(os.path.abspath(file_dir), file_n)) #os.path.abspath(dirName),file_n))
#shutil.move( nfilename, os.path.join(os.path.abspath(dirName),file_n))
logger.fdebug(module + ' Sucessfully moved file from temporary path.')
except:
logger.error(module + ' Unable to move file from temporary path [' + os.path.join(comicpath, nfilename) + ']. Deletion of temporary path halted.')
logger.error(module + ' attempt to move: ' + os.path.join(comicpath, nfilename) + ' to ' + os.path.join(os.path.abspath(file_dir), file_n))
return os.path.join(os.path.abspath(file_dir), file_n) #os.path.join(comicpath, nfilename)
return os.path.join(os.path.abspath(file_dir), file_n) # os.path.join(comicpath, nfilename)
i = 0
os.chdir( mylar.PROG_DIR )
os.chdir(mylar.PROG_DIR)
while i < 10:
try:
logger.fdebug(module + ' Attempting to remove: ' + comicpath)
shutil.rmtree( comicpath )
shutil.rmtree(comicpath)
except:
time.sleep(.1)
else:
return os.path.join(os.path.abspath(file_dir), file_n) #dirName), file_n)
i+=1
return os.path.join(os.path.abspath(file_dir), file_n) # dirName), file_n)
i += 1
logger.fdebug(module + ' Failed to remove temporary path : ' + str(comicpath))
return os.path.join(os.path.abspath(file_dir),file_n) #dirName),file_n)
return os.path.join(os.path.abspath(file_dir), file_n) # dirName),file_n)

View File

@ -10,17 +10,18 @@ from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
def cbdb(comicnm, ComicYear):
#comicnm = 'Animal Man'
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
comicnm = re.sub(' ', '+', comicnm)
input = "http://mobile.comicbookdb.com/search.php?form_search=" + str(comicnm) + "&form_searchtype=Title&x=0&y=0"
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
response = urllib2.urlopen(input)
soup = BeautifulSoup(response)
abc = soup.findAll('a', href=True)
lenabc = len(abc)
i=0
i = 0
resultName = []
resultID = []
resultYear = []
@ -29,7 +30,7 @@ def cbdb(comicnm, ComicYear):
matched = "no"
while (i < lenabc):
titlet = abc[i] #iterate through the href's, pulling out only results.
titlet = abc[i] # iterate through the href's, pulling out only results.
print ("titlet: " + str(titlet))
if "title.php" in str(titlet):
print ("found title")
@ -38,10 +39,10 @@ def cbdb(comicnm, ComicYear):
resultName = tempName[:tempName.find("(")]
print ("ComicName: " + resultName)
resultYear = tempName[tempName.find("(")+1:tempName.find(")")]
resultYear = tempName[tempName.find("(") +1:tempName.find(")")]
if resultYear.isdigit(): pass
else:
i+=1
else:
i += 1
continue
print "ComicYear: " + resultYear
@ -50,11 +51,10 @@ def cbdb(comicnm, ComicYear):
print "CBDB URL: " + resultURL
IDst = ID_som.find('?ID=')
resultID = ID_som[(IDst+4):]
resultID = ID_som[(IDst +4):]
print "CBDB ID: " + resultID
print ("resultname: " + resultName)
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
@ -67,7 +67,7 @@ def cbdb(comicnm, ComicYear):
print ("i:" + str(i) + "...matched by name to Mylar!")
print ("ComicYear: " + str(ComicYear) + ".. to ResultYear: " + str(resultYear))
if resultYear.isdigit():
if int(resultYear) == int(ComicYear) or int(resultYear) == int(ComicYear)+1:
if int(resultYear) == int(ComicYear) or int(resultYear) == int(ComicYear) +1:
resultID = str(resultID)
print ("Matchumundo!")
matched = "yes"
@ -75,7 +75,7 @@ def cbdb(comicnm, ComicYear):
continue
if matched == "yes":
break
i+=1
i += 1
return IssueDetails(resultID)
@ -84,15 +84,15 @@ def IssueDetails(cbdb_id):
annualslist = []
gcount = 0
pagethis = 'http://comicbookdb.com/title.php?ID=' + str(cbdb_id)
response = urllib2.urlopen(pagethis)
soup = BeautifulSoup(response)
resultp = soup.findAll("table")
total = len(resultp) # -- number of tables
#get details here
startit = resultp[0].find("table", {"width" : "884" })
startit = resultp[0].find("table", {"width": "884"})
i = 0
pubchk = 0
@ -111,7 +111,7 @@ def IssueDetails(cbdb_id):
noi = boop[i].nextSibling
print ("number of issues: " + noi)
i+=1
i += 1
if i > len(boop): break
@ -121,19 +121,19 @@ def IssueDetails(cbdb_id):
# totalIssues = str(noi)
# print ("Publication Dates : " + str(resultPublished))
# print ("Total Issues: " + str(totalIssues))
ti = 1 # start at one as 0 is the ENTIRE soup structure
ti = 1 # start at one as 0 is the ENTIRE soup structure
while (ti < total):
#print result
if resultp[ti].find("a", {"class" : "page_link" }):
if resultp[ti].find("a", {"class": "page_link"}):
#print "matcheroso"
tableno = resultp[ti].findAll('tr') #7th table, all the tr's
tableno = resultp[ti].findAll('tr') # 7th table, all the tr's
#print ti, total
break
ti+=1
ti += 1
noresults = len(tableno)
#print ("tableno: " + str(tableno))
print ("there are " + str(noresults) + " issues total (cover variations, et all).")
i=1 # start at 1 so we don't grab the table headers ;)
i = 1 # start at 1 so we don't grab the table headers ;)
issue = []
storyarc = []
pubdate = []
@ -143,27 +143,27 @@ def IssueDetails(cbdb_id):
while (i < noresults):
resultit = tableno[i] # 7th table, 1st set of tr (which indicates an issue).
print ("resultit: " + str(resultit))
issuet = resultit.find("a", {"class" : "page_link" }) # gets the issue # portion
issuet = resultit.find("a", {"class": "page_link"}) # gets the issue # portion
try:
issue = issuet.findNext(text=True)
except:
print ("blank space - skipping")
i+=1
i += 1
continue
if 'annual' not in issue.lower():
i+=1
if 'annual' not in issue.lower():
i += 1
continue
lent = resultit('a',href=True) #gathers all the a href's within this particular tr
lent = resultit('a', href=True) #gathers all the a href's within this particular tr
#print ("lent: " + str(lent))
lengtht = len(lent) #returns the # of ahref's within this particular tr
lengtht = len(lent) # returns the # of ahref's within this particular tr
#print ("lengtht: " + str(lengtht))
#since we don't know which one contains the story arc, we need to iterate through to find it
#we need to know story arc, because the following td is the Publication Date
n=0
n = 0
issuetitle = 'None'
while (n < lengtht):
storyt = lent[n] #
storyt = lent[n] #
print ("storyt: " + str(storyt))
if 'issue.php' in storyt:
issuetitle = storyt.findNext(text=True)
@ -173,21 +173,21 @@ def IssueDetails(cbdb_id):
storyarc = storyt.findNext(text=True)
#print ("Story Arc: " + str(storyarc))
break
n+=1
n += 1
pubd = resultit('td') # find all the <td>'s within this tr
publen = len(pubd) # find the # of <td>'s
pubs = pubd[publen-1] #take the last <td> which will always contain the publication date
pdaters = pubs.findNext(text=True) #get the actual date :)
basmonths = {'january':'01','february':'02','march':'03','april':'04','may':'05','june':'06','july':'07','august':'09','september':'10','october':'11','december':'12','annual':''}
publen = len(pubd) # find the # of <td>'s
pubs = pubd[publen -1] # take the last <td> which will always contain the publication date
pdaters = pubs.findNext(text=True) # get the actual date :)
basmonths = {'january': '01', 'february': '02', 'march': '03', 'april': '04', 'may': '05', 'june': '06', 'july': '07', 'august': '09', 'september': '10', 'october': '11', 'december': '12', 'annual': ''}
for numbs in basmonths:
if numbs in pdaters.lower():
pconv = basmonths[numbs]
ParseYear = re.sub('/s','',pdaters[-5:])
ParseYear = re.sub('/s', '', pdaters[-5:])
if basmonths[numbs] == '':
pubdate = str(ParseYear)
else:
pubdate= str(ParseYear) + "-" + str(pconv)
#logger.fdebug("!success - Publication date: " + str(ParseDate))
pubdate = str(ParseYear) + "-" + str(pconv)
# logger.fdebug("!success - Publication date: " + str(ParseDate))
#pubdate = re.sub("[^0-9]", "", pdaters)
issuetmp = re.sub("[^0-9]", '', issue)
@ -200,9 +200,9 @@ def IssueDetails(cbdb_id):
'AnnualDate': pubdate.strip(),
'AnnualYear': ParseYear.strip()
})
gcount+=1
gcount += 1
print("annualslist appended...")
i+=1
i += 1
annuals['annualslist'] = annualslist

View File

@ -5,8 +5,8 @@
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# Mylar is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
@ -27,6 +27,7 @@ from mylar.helpers import cvapi_check
from bs4 import BeautifulSoup as Soup
import httplib
def patch_http_response_read(func):
def inner(*args):
try:
@ -41,7 +42,8 @@ if platform.python_version() == '2.7.6':
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None):
def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist=None):
#import easy to use xml parser called minidom:
from xml.dom.minidom import parseString
@ -53,7 +55,7 @@ def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None
if type == 'comic':
if not comicid.startswith('4050-'): comicid = '4050-' + comicid
PULLURL= mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
PULLURL = mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
elif type == 'issue':
if mylar.CV_ONLY:
cv_type = 'issues'
@ -80,7 +82,7 @@ def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None
#download the file:
file = urllib2.urlopen(PULLURL)
#increment CV API counter.
mylar.CVAPI_COUNT +=1
mylar.CVAPI_COUNT += 1
#convert to string:
data = file.read()
#close file because we dont need it anymore:
@ -91,8 +93,8 @@ def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None
return dom
def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidlist=None):
if type == 'issue':
def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, comicidlist=None):
if type == 'issue':
offset = 1
issue = {}
ndic = []
@ -107,7 +109,7 @@ def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidl
else:
id = comicid
islist = None
searched = pulldetails(id,'issue',None,0,islist)
searched = pulldetails(id, 'issue', None, 0, islist)
if searched is None: return False
totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
logger.fdebug("there are " + str(totalResults) + " search results...")
@ -119,8 +121,8 @@ def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidl
if countResults > 0:
#new api - have to change to page # instead of offset count
offsetcount = countResults
searched = pulldetails(id,'issue',None,offsetcount,islist)
issuechoice,tmpdate = GetIssuesInfo(id,searched,arcid)
searched = pulldetails(id, 'issue', None, offsetcount, islist)
issuechoice, tmpdate = GetIssuesInfo(id, searched, arcid)
if tmpdate < firstdate:
firstdate = tmpdate
ndic = ndic + issuechoice
@ -133,22 +135,22 @@ def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidl
return issue
elif type == 'comic':
dom = pulldetails(comicid,'comic',None,1)
return GetComicInfo(comicid,dom)
elif type == 'firstissue':
dom = pulldetails(comicid,'firstissue',issueid,1)
return GetFirstIssue(issueid,dom)
dom = pulldetails(comicid, 'comic', None, 1)
return GetComicInfo(comicid, dom)
elif type == 'firstissue':
dom = pulldetails(comicid, 'firstissue', issueid, 1)
return GetFirstIssue(issueid, dom)
elif type == 'storyarc':
dom = pulldetails(arc,'storyarc',None,1)
return GetComicInfo(issueid,dom)
dom = pulldetails(arc, 'storyarc', None, 1)
return GetComicInfo(issueid, dom)
elif type == 'comicyears':
#used by the story arc searcher when adding a given arc to poll each ComicID in order to populate the Series Year.
#this grabs each issue based on issueid, and then subsets the comicid for each to be used later.
#set the offset to 0, since we're doing a filter.
dom = pulldetails(arcid,'comicyears',offset=0,comicidlist=comicidlist)
dom = pulldetails(arcid, 'comicyears', offset=0, comicidlist=comicidlist)
return GetSeriesYears(dom)
def GetComicInfo(comicid,dom,safechk=None):
def GetComicInfo(comicid, dom, safechk=None):
if safechk is None:
#safetycheck when checking comicvine. If it times out, increment the chk on retry attempts up until 5 tries then abort.
safechk = 1
@ -182,13 +184,13 @@ def GetComicInfo(comicid,dom,safechk=None):
# where [0] denotes the number of the name field(s)
# where nodeName denotes the parentNode : ComicName = results, publisher = publisher, issues = issue
try:
names = len( dom.getElementsByTagName('name') )
names = len(dom.getElementsByTagName('name'))
n = 0
while ( n < names ):
while (n < names):
if dom.getElementsByTagName('name')[n].parentNode.nodeName == 'results':
try:
comic['ComicName'] = dom.getElementsByTagName('name')[n].firstChild.wholeText
comic['ComicName'] = comic['ComicName'].rstrip()
comic['ComicName'] = comic['ComicName'].rstrip()
except:
logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible AND that you have provided your OWN ComicVine API key.')
return
@ -199,7 +201,7 @@ def GetComicInfo(comicid,dom,safechk=None):
except:
comic['ComicPublisher'] = "Unknown"
n+=1
n += 1
except:
logger.warn('Something went wrong retrieving from ComicVine. Ensure your API is up-to-date and that comicvine is accessible')
return
@ -217,7 +219,7 @@ def GetComicInfo(comicid,dom,safechk=None):
time.sleep(10)
safechk +=1
GetComicInfo(comicid, dom, safechk)
desdeck = 0
#the description field actually holds the Volume# - so let's grab it
try:
@ -269,29 +271,29 @@ def GetComicInfo(comicid,dom,safechk=None):
#increased to 10 to allow for text numbering (+5 max)
#sometimes it's volume 5 and ocassionally it's fifth volume.
if i == 0:
vfind = comicDes[v_find:v_find+15] #if it's volume 5 format
basenums = {'zero':'0','one':'1','two':'2','three':'3','four':'4','five':'5','six':'6','seven':'7','eight':'8','nine':'9','ten':'10','i':'1','ii':'2','iii':'3','iv':'4','v':'5'}
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)
else:
vfind = comicDes[:v_find] # if it's fifth volume format
basenums = {'zero':'0','first':'1','second':'2','third':'3','fourth':'4','fifth':'5','sixth':'6','seventh':'7','eighth':'8','nineth':'9','tenth':'10','i':'1','ii':'2','iii':'3','iv':'4','v':'5'}
basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('X volume format - ' + str(i) + ': ' + vfind)
volconv = ''
for nums in basenums:
if nums in vfind.lower():
sconv = basenums[nums]
vfind = re.sub(nums, sconv, vfind.lower())
break
break
#logger.info('volconv: ' + str(volconv))
#now we attempt to find the character position after the word 'volume'
if i == 0:
volthis = vfind.lower().find('volume')
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
vfind = vfind[volthis:volthis+4] #grab the next 4 characters ;)
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
vfind = vfind[volthis:volthis + 4] # grab the next 4 characters ;)
elif i == 1:
volthis = vfind.lower().find('volume')
vfind = vfind[volthis-4:volthis] #grab the next 4 characters ;)
vfind = vfind[volthis - 4:volthis] # grab the next 4 characters ;)
if '(' in vfind:
#bracket detected in versioning'
@ -303,17 +305,17 @@ def GetComicInfo(comicid,dom,safechk=None):
comic['ComicVersion'] = ledigit
logger.fdebug("Volume information found! Adding to series record : volume " + comic['ComicVersion'])
break
i+=1
i += 1
else:
i+=1
i += 1
if comic['ComicVersion'] == 'noversion':
logger.fdebug('comic[ComicVersion]:' + str(comic['ComicVersion']))
desdeck -=1
desdeck -= 1
else:
break
if vari == "yes":
if vari == "yes":
comic['ComicIssues'] = str(cntit)
else:
comic['ComicIssues'] = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
@ -338,7 +340,7 @@ def GetComicInfo(comicid,dom,safechk=None):
# comic['comicchoice'] = comicchoice
return comic
def GetIssuesInfo(comicid,dom,arcid=None):
def GetIssuesInfo(comicid, dom, arcid=None):
subtracks = dom.getElementsByTagName('issue')
if not mylar.CV_ONLY:
cntiss = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
@ -349,7 +351,7 @@ def GetIssuesInfo(comicid,dom,arcid=None):
logger.fdebug("CV's count is wrong, I counted different...going with my count for physicals" + str(len(subtracks)))
cntiss = len(subtracks) # assume count of issues is wrong, go with ACTUAL physical api count
cntiss = int(cntiss)
n = cntiss-1
n = cntiss -1
else:
n = int(len(subtracks))
tempissue = {}
@ -364,7 +366,7 @@ def GetIssuesInfo(comicid,dom,arcid=None):
issue['Issue_ID'] = dom.getElementsByTagName('id')[n].firstChild.wholeText
issue['Issue_Number'] = dom.getElementsByTagName('issue_number')[n].firstChild.wholeText
issuech.append({
'Issue_ID': issue['Issue_ID'],
'Issue_Number': issue['Issue_Number'],
@ -372,7 +374,7 @@ def GetIssuesInfo(comicid,dom,arcid=None):
})
else:
try:
totnames = len( subtrack.getElementsByTagName('name') )
totnames = len(subtrack.getElementsByTagName('name'))
tot = 0
while (tot < totnames):
if subtrack.getElementsByTagName('name')[tot].parentNode.nodeName == 'volume':
@ -382,19 +384,19 @@ def GetIssuesInfo(comicid,dom,arcid=None):
tempissue['Issue_Name'] = subtrack.getElementsByTagName('name')[tot].firstChild.wholeText
except:
tempissue['Issue_Name'] = None
tot+=1
tot += 1
except:
tempissue['ComicName'] = 'None'
try:
totids = len( subtrack.getElementsByTagName('id') )
totids = len(subtrack.getElementsByTagName('id'))
idt = 0
while (idt < totids):
if subtrack.getElementsByTagName('id')[idt].parentNode.nodeName == 'volume':
tempissue['Comic_ID'] = subtrack.getElementsByTagName('id')[idt].firstChild.wholeText
elif subtrack.getElementsByTagName('id')[idt].parentNode.nodeName == 'issue':
tempissue['Issue_ID'] = subtrack.getElementsByTagName('id')[idt].firstChild.wholeText
idt+=1
idt += 1
except:
tempissue['Issue_Name'] = 'None'
@ -435,12 +437,12 @@ def GetIssuesInfo(comicid,dom,arcid=None):
if tempissue['CoverDate'] < firstdate and tempissue['CoverDate'] != '0000-00-00':
firstdate = tempissue['CoverDate']
n-=1
n-= 1
#issue['firstdate'] = firstdate
return issuech, firstdate
def GetFirstIssue(issueid,dom):
def GetFirstIssue(issueid, dom):
#if the Series Year doesn't exist, get the first issue and take the date from that
try:
first_year = dom.getElementsByTagName('cover_date')[0].firstChild.wholeText
@ -462,7 +464,7 @@ def GetSeriesYears(dom):
serieslist = []
for dm in series:
try:
totids = len( dm.getElementsByTagName('id') )
totids = len(dm.getElementsByTagName('id'))
idc = 0
while (idc < totids):
if dm.getElementsByTagName('id')[idc].parentNode.nodeName == 'volume':
@ -475,7 +477,7 @@ def GetSeriesYears(dom):
tempseries['Series'] = 'None'
tempseries['Publisher'] = 'None'
try:
totnames = len( dm.getElementsByTagName('name') )
totnames = len(dm.getElementsByTagName('name'))
namesc = 0
while (namesc < totnames):
if dm.getElementsByTagName('name')[namesc].parentNode.nodeName == 'volume':
@ -492,7 +494,6 @@ def GetSeriesYears(dom):
logger.warn('There was a problem retrieving the start year for a particular series within the story arc.')
tempseries['SeriesYear'] = '0000'
serieslist.append({"ComicID": tempseries['ComicID'],
"ComicName": tempseries['Series'],
"SeriesYear": tempseries['SeriesYear'],
@ -500,6 +501,7 @@ def GetSeriesYears(dom):
return serieslist
def drophtml(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)

View File

@ -47,7 +47,7 @@ class WriteOnly:
def worker(self):
myDB = DBConnection()
#this should be in it's own thread somewhere, constantly polling the queue and sending them to the writer.
logger.fdebug('worker started.')
logger.fdebug('worker started.')
while True:
thisthread = threading.currentThread().name
if not mylarQueue.empty():
@ -58,7 +58,7 @@ class WriteOnly:
sqlResult = myDB.upsert(QtableName, QvalueDict, QkeyDict)
if sqlResult:
mylarQueue.task_done()
return sqlResult
return sqlResult
else:
time.sleep(1)
#logger.fdebug('[' + str(thisthread) + '] sleeping until active.')
@ -66,12 +66,12 @@ class WriteOnly:
class DBConnection:
def __init__(self, filename="mylar.db"):
self.filename = filename
self.connection = sqlite3.connect(dbFilename(filename), timeout=20)
self.connection.row_factory = sqlite3.Row
self.queue = mylarQueue
def fetch(self, query, args=None):
with db_lock:
@ -115,10 +115,10 @@ class DBConnection:
with db_lock:
if query == None:
return
sqlResult = None
attempt = 0
while attempt < 5:
try:
if args == None:
@ -141,14 +141,14 @@ class DBConnection:
return sqlResult
def select(self, query, args=None):
sqlResults = self.fetch(query, args).fetchall()
if sqlResults == None:
return []
return sqlResults
def selectone(self, query, args=None):
sqlResults = self.fetch(query, args)
@ -162,15 +162,15 @@ class DBConnection:
thisthread = threading.currentThread().name
changesBefore = self.connection.total_changes
genParams = lambda myDict : [x + " = ?" for x in myDict.keys()]
genParams = lambda myDict: [x + " = ?" for x in myDict.keys()]
query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
self.action(query, valueDict.values() + keyDict.values())
if self.connection.total_changes == changesBefore:
query = "INSERT INTO "+tableName+" (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \
query = "INSERT INTO " +tableName +" (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \
" VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")"
self.action(query, valueDict.values() + keyDict.values())

View File

@ -21,22 +21,22 @@ import pprint
import subprocess
import re
#import logger
import mylar
from mylar import logger, helpers
import unicodedata
import sys
import mylar
from mylar import logger, helpers
import unicodedata
import sys
import platform
def file2comicmatch(watchmatch):
#print ("match: " + str(watchmatch))
pass
def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=None):
def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sarc=None):
# use AlternateSearch to check for filenames that follow that naming pattern
# ie. Star Trek TNG Doctor Who Assimilation won't get hits as the
# ie. Star Trek TNG Doctor Who Assimilation won't get hits as the
# checker looks for Star Trek TNG Doctor Who Assimilation2 (according to CV)
# we need to convert to ascii, as watchcomic is utf-8 and special chars f'it up
u_watchcomic = unicodedata.normalize('NFKD', watchcomic).encode('ASCII', 'ignore') #watchcomic.encode('ascii', 'ignore').strip()
logger.fdebug('[FILECHECKER] comic: ' + u_watchcomic)
@ -63,7 +63,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
issue_exceptions = ['AU',
'.INH',
'.NOW',
'AI',
'AI',
'A',
'B',
'C',
@ -86,7 +86,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
basedir = fname['directory']
#if it's a subdir, strip out the main dir and retain the remainder for the filechecker to find it.
#start at position 1 so the initial slash is removed since it's a sub, and os.path.join will choke.
moddir = basedir.replace(dir,'')[1:].rstrip()
moddir = basedir.replace(dir, '')[1:].rstrip()
item = fname['filename']
@ -94,7 +94,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if item.startswith('._'):
logger.info('ignoring os metadata for ' + item)
continue
if item == 'cover.jpg' or item == 'cvinfo': continue
if not item.lower().endswith(extensions):
#logger.fdebug('[FILECHECKER] filename not a valid cbr/cbz - ignoring: ' + item)
@ -122,7 +122,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[SARC] subname: ' + subname)
removest = subname.find('-') # the - gets removed above so we test for the first blank space...
logger.fdebug('[SARC] Checking filename for Reading Order sequence - removest: ' + str(removest))
logger.fdebug('removestdig: ' + subname[:removest-1])
logger.fdebug('removestdig: ' + subname[:removest -1])
if subname[:removest].isdigit() and removest == 3:
subname = subname[4:]
logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : ' + subname)
@ -147,9 +147,9 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
vers4vol = volrem
break
elif subit.lower()[:3] == 'vol':
tsubit = re.sub('vol','', subit.lower())
tsubit = re.sub('vol', '', subit.lower())
try:
if any( [ tsubit.isdigit(), len(tsubit) > 5 ] ):
if any([tsubit.isdigit(), len(tsubit) > 5]):
#if in format vol.2013 etc
#because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
logger.fdebug('[FILECHECKER] volume indicator detected as version #:' + str(subit))
@ -157,7 +157,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
volrem = subit
vers4year = "yes"
except:
continue
continue
#check if a year is present in series title (ie. spider-man 2099)
#also check if decimal present in series title (ie. batman beyond 2.0)
@ -197,7 +197,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
bracket_length_st = watchcomic.find('(')
bracket_length_en = watchcomic.find(')', bracket_length_st)
bracket_length = bracket_length_en - bracket_length_st
bracket_word = watchcomic[bracket_length_st:bracket_length_en+1]
bracket_word = watchcomic[bracket_length_st:bracket_length_en +1]
logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracket_word))
logger.fdebug('[FILECHECKER] numberinseries: ' + str(numberinseries))
@ -217,7 +217,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#logger.fdebug('[FILECHECKER] subnm_mod_en: ' + str(subname[bracket_length_en:]))
#logger.fdebug('[FILECHECKER] modified subname is now : ' + str(subnm_mod))
if bracket_word in subname:
nobrackets_word = re.sub('[\(\)]','', bracket_word).strip()
nobrackets_word = re.sub('[\(\)]', '', bracket_word).strip()
subname = re.sub(nobrackets_word, '', subname).strip()
subnm = re.findall('[^()]+', subname)
@ -232,9 +232,9 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
subthis = re.sub('.cbr', '', subname)
subthis = re.sub('.cbz', '', subthis)
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\.\-]', '', subthis)
subthis = re.sub('\s+',' ', subthis)
subthis = re.sub('\s+', ' ', subthis)
logger.fdebug('[FILECHECKER] sub-cleaned: ' + subthis)
#we need to make sure the file is part of the correct series or else will match falsely
#we need to make sure the file is part of the correct series or else will match falsely
if watchname.lower() not in subthis.lower():
logger.fdebug('[FILECHECKER] ' + watchname + ' this is a false match to ' + subthis + ' - Ignoring this result.')
continue
@ -245,7 +245,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if subthis.startswith('('):
# if it startswith a bracket, then it's probably a year - let's check.
for i in subthis.split():
tmpi = re.sub('[\(\)]','',i).strip()
tmpi = re.sub('[\(\)]', '', i).strip()
if tmpi.isdigit():
if (tmpi.startswith('19') or tmpi.startswith('20')) and len(tmpi) == 4:
logger.fdebug('[FILECHECKER] year detected: ' + str(tmpi))
@ -275,7 +275,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
#unable to find year in filename
logger.fdebug('[FILECHECKER] Unable to detect year within filename. Continuing as is and assuming this is a volume 1 and will work itself out later.')
subname = ogsubthis
subname = ogsubthis
subnm = re.findall('[^()]+', subname)
else:
@ -290,14 +290,14 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
logger.fdebug('[FILECHECKER] more than one decimal detected, and the series does not have decimals - assuming in place of spaces.')
subname = re.sub('[\.]', '', subname)
subnm = re.findall('[^()]+', subname)
else:
if numberinseries == 'True' or decimalinseries == 'True':
#we need to remove the series from the subname and then search the remainder.
subthis = re.sub('.cbr', '', subname)
subthis = re.sub('.cbz', '', subthis)
if decimalinseries == 'True':
if decimalinseries == 'True':
watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', watchcomic) #remove spec chars for watchcomic match.
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis)
else:
@ -305,7 +305,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', watchcomic) #remove spec chars for watchcomic match.
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis)
logger.fdebug('[FILECHECKER] watch-cleaned: ' + watchname)
subthis = re.sub('\s+',' ', subthis)
subthis = re.sub('\s+', ' ', subthis)
logger.fdebug('[FILECHECKER] sub-cleaned: ' + subthis)
#we need to make sure the file is part of the correct series or else will match falsely
if watchname.lower() not in subthis.lower():
@ -317,7 +317,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if subthis.startswith('('):
# if it startswith a bracket, then it's probably a year and the format is incorrect to continue - let's check.
for i in subthis.split():
tmpi = re.sub('[\(\)]','',i).strip()
tmpi = re.sub('[\(\)]', '', i).strip()
if tmpi.isdigit():
if (tmpi.startswith('19') or tmpi.startswith('20')) and len(tmpi) == 4:
logger.fdebug('[FILECHECKER] Year detected: ' + str(tmpi))
@ -325,7 +325,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
subname = re.sub('\(\)', '', subname).strip()
logger.fdebug('[FILECHECKER] Flipping the issue with the year: ' + subname)
break
else:
else:
numcheck = re.findall('[19\d{2}|20\d{2}]', subthis)
if len(numcheck) == 1:
subname = re.sub('(19\d{2}|20\d{2})(.*)', '\\2 (\\1)', subthis)
@ -375,7 +375,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
cnt+=1
#If the Year comes before the Issue # the subname is passed with no Issue number.
#This logic checks for numbers before the extension in the format of 1 01 001
#This logic checks for numbers before the extension in the format of 1 01 001
#and adds to the subname. (Cases where comic name is $Series_$Year_$Issue)
# if len(subnm) > 1:
@ -407,7 +407,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
subname = subnm[0]
if len(subnm) == 1:
# if it still has no year (brackets), check setting and either assume no year needed.
subname = subname
subname = subname
logger.fdebug('[FILECHECKER] subname no brackets: ' + subname)
nonocount = 0
charpos = 0
@ -420,31 +420,31 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
for nono in not_these:
if nono in subname:
subcnt = subname.count(nono)
charpos = indices(subname,nono) # will return a list of char positions in subname
charpos = indices(subname, nono) # will return a list of char positions in subname
logger.fdebug("[" + str(nono) + "] charpos: " + str(charpos))
if nono == '-':
i=0
while (i < len(charpos)):
for i,j in enumerate(charpos):
if j+2 > len(subname):
sublimit = subname[j+1:]
for i, j in enumerate(charpos):
if j +2 > len(subname):
sublimit = subname[j +1:]
else:
sublimit = subname[j+1:j+2]
sublimit = subname[j +1:j +2]
if sublimit.isdigit():
logger.fdebug('[FILECHECKER] possible negative issue detected.')
nonocount = nonocount + subcnt - 1
detneg = "yes"
detneg = "yes"
elif '-' in watchcomic and j < lenwatch:
lenwatch -=1
logger.fdebug('[FILECHECKER] - appears in series title.')
logger.fdebug('[FILECHECKER] up to - :' + subname[:j+1].replace('-', ' '))
logger.fdebug('[FILECHECKER] after - :' + subname[j+1:])
subname = subname[:j+1].replace('-', '') + subname[j+1:]
logger.fdebug('[FILECHECKER] up to - :' + subname[:j +1].replace('-', ' '))
logger.fdebug('[FILECHECKER] after - :' + subname[j +1:])
subname = subname[:j +1].replace('-', '') + subname[j +1:]
logger.fdebug('[FILECHECKER] new subname is : ' + subname)
should_restart = True
leavehyphen = True
i+=1
if detneg == "no" and leavehyphen == False:
if detneg == "no" and leavehyphen == False:
subname = re.sub(str(nono), ' ', subname)
nonocount = nonocount + subcnt
#logger.fdebug('[FILECHECKER] (str(nono) + " detected " + str(subcnt) + " times.")
@ -455,21 +455,21 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
fndit = 0
dcspace = 0
while (x < len(charpos)):
for x,j in enumerate(charpos):
for x, j in enumerate(charpos):
fndit = j
logger.fdebug('fndit: ' + str(fndit))
logger.fdebug('isdigit1: ' + subname[fndit-1:fndit])
logger.fdebug('isdigit2: ' + subname[fndit+1:fndit+2])
if subname[fndit-1:fndit].isdigit() and subname[fndit+1:fndit+2].isdigit():
logger.fdebug('isdigit1: ' + subname[fndit -1:fndit])
logger.fdebug('isdigit2: ' + subname[fndit +1:fndit +2])
if subname[fndit -1:fndit].isdigit() and subname[fndit +1:fndit +2].isdigit():
logger.fdebug('[FILECHECKER] decimal issue detected.')
dcspace+=1
else:
subname = subname[:fndit] + ' ' + subname[fndit+1:]
nonocount+=1
subname = subname[:fndit] + ' ' + subname[fndit +1:]
nonocount+=1
x+=1
nonocount += (subcnt + dcspace)
#if dcspace == 1:
# nonocount = nonocount + subcnt + dcspace
# nonocount = nonocount + subcnt + dcspace
#else:
# subname = re.sub('\.', ' ', subname)
# nonocount = nonocount + subcnt - 1 #(remove the extension from the length)
@ -480,15 +480,15 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
blspc = 0
if nono == '#':
fndit = subname.find(nono)
if subname[fndit+1].isdigit():
subname = re.sub('#','',subname)
if subname[fndit +1].isdigit():
subname = re.sub('#', '', subname)
continue
while x < subcnt:
fndit = subname.find(nono, fndit)
#print ("space before check: " + str(subname[fndit-1:fndit]))
#print ("space after check: " + str(subname[fndit+1:fndit+2]))
if subname[fndit-1:fndit] == ' ' and subname[fndit+1:fndit+2] == ' ':
if subname[fndit -1:fndit] == ' ' and subname[fndit +1:fndit +2] == ' ':
logger.fdebug('[FILECHECKER] blankspace detected before and after ' + str(nono))
blspc+=1
x+=1
@ -518,7 +518,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
modwatchcomic = re.sub('\s+', ' ', modwatchcomic).strip()
if '&' in subname:
logger.fdebug('[FILECHECKER] detected & in subname')
subname = re.sub('\&', ' and ', subname)
subname = re.sub('\&', ' and ', subname)
detectand = True
if ' the ' in subname.lower() or subname.lower().startswith('the '):
subname = re.sub("\\bthe\\b", "", subname.lower())
@ -533,7 +533,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
AS_Alternate = AlternateSearch
for calt in chkthealt:
AS_tupled = False
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
if '!!' in AS_Alternate:
# if it's !! present, it's the comicid associated with the series as an added annual.
# extract the !!, store it and then remove it so things will continue.
@ -542,7 +542,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
as_end = AS_Alternate.find('##', as_start)
if as_end == -1: as_end = len(AS_Alternate)
logger.fdebug('as_start: ' + str(as_end) + ' --- ' + str(AS_Alternate[as_start:as_end]))
AS_ComicID = AS_Alternate[as_start+2:as_end]
AS_ComicID = AS_Alternate[as_start +2:as_end]
logger.fdebug('[FILECHECKER] Extracted comicid for given annual : ' + str(AS_ComicID))
AS_Alternate = re.sub('!!' + str(AS_ComicID), '', AS_Alternate)
AS_tupled = True
@ -554,7 +554,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if detectthe_sub == True:
altsearchcomic = re.sub("\\bthe\\b", "", altsearchcomic.lower())
altsearchcomic = re.sub('\s+', ' ', str(altsearchcomic)).strip()
if AS_tupled:
AS_Tuple.append({"ComicID": AS_ComicID,
"AS_Alternate": altsearchcomic})
@ -625,35 +625,35 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
# cchk = cchk_ls[0]
logger.fdebug('[FILECHECKER] cchk is : ' + str(cchk))
logger.fdebug('[FILECHECKER] we should remove ' + str(nonocount) + ' characters')
logger.fdebug('[FILECHECKER] we should remove ' + str(nonocount) + ' characters')
findtitlepos = subname.find('-')
if charpos != 0:
logger.fdebug('[FILECHECKER] detected ' + str(len(charpos)) + ' special characters')
for i,j in enumerate(charpos):
for i, j in enumerate(charpos):
logger.fdebug('i,j:' + str(i) + ',' + str(j))
logger.fdebug(str(len(subname)) + ' - subname: ' + subname)
logger.fdebug("digitchk: " + subname[j-1:])
logger.fdebug("digitchk: " + subname[j -1:])
if j >= len(subname):
logger.fdebug('[FILECHECKER] ' + str(j) + ' is >= ' + str(len(subname)) + ' .End reached. ignoring remainder.')
break
elif subname[j:] == '-':
try:
if j <= len(subname) and subname[j+1].isdigit():
if j <= len(subname) and subname[j +1].isdigit():
logger.fdebug('[FILECHECKER] negative issue detected.')
#detneg = "yes"
except IndexError:
logger.fdebug('[FILECHECKER] There was a problem parsing the information from this filename: ' + comicpath)
elif j > findtitlepos:
if subname[j:] == '#':
if subname[j+1].isdigit():
if subname[j +1].isdigit():
logger.fdebug('[FILECHECKER] # detected denoting issue#, ignoring.')
else:
else:
nonocount-=1
elif ('-' in watchcomic or '.' in watchcomic) and j < len(watchcomic):
logger.fdebug('[FILECHECKER] - appears in series title, ignoring.')
else:
digitchk = re.sub('#','', subname[j-1:]).strip()
else:
digitchk = re.sub('#', '', subname[j -1:]).strip()
logger.fdebug('[FILECHECKER] special character appears outside of title - ignoring @ position: ' + str(charpos[i]))
nonocount-=1
@ -666,7 +666,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
# if sarc and mylar.READ2FILENAME:
# removest = subname.find(' ') # the - gets removed above so we test for the first blank space...
# if subname[:removest].isdigit():
# jtd_len += removest + 1 # +1 to account for space in place of -
# jtd_len += removest + 1 # +1 to account for space in place of -
# logger.fdebug('[FILECHECKER] adjusted jtd_len to : ' + str(removest) + ' because of story-arc reading order tags')
logger.fdebug('[FILECHECKER] nonocount [' + str(nonocount) + '] cchk [' + cchk + '] length [' + str(len(cchk)) + ']')
@ -683,7 +683,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] after title removed from FILENAME [' + str(item[jtd_len:]) + ']')
logger.fdebug('[FILECHECKER] creating just the digits using SUBNAME, pruning first [' + str(jtd_len) + '] chars from [' + subname + ']')
justthedigits_1 = re.sub('#','', subname[jtd_len:]).strip()
justthedigits_1 = re.sub('#', '', subname[jtd_len:]).strip()
if enable_annual:
logger.fdebug('enable annual is on')
@ -710,7 +710,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('st:' + str(st))
st_d = digitchk[:st]
logger.fdebug('st_d:' + str(st_d))
st_e = digitchk[st+1:]
st_e = digitchk[st +1:]
logger.fdebug('st_e:' + str(st_e))
#x = int(float(st_d))
#logger.fdebug('x:' + str(x))
@ -727,7 +727,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
if len(justthedigits_1) >= len(digitchk) and len(digitchk) > 3:
logger.fdebug('[FILECHECKER] Removing issue title.')
justthedigits_1 = re.sub(digitchk,'', justthedigits_1).strip()
justthedigits_1 = re.sub(digitchk, '', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
hyphensplit = digitchk
@ -738,16 +738,16 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] splitst :' + str(len(splitit)))
orignzb = item
except:
#test this out for manual post-processing items like original sin 003.3 - thor and loki 002...
#test this out for manual post-processing items like original sin 003.3 - thor and loki 002...
#***************************************************************************************
# need to assign digitchk here for issues that don't have a title and fail the above try.
#***************************************************************************************
#***************************************************************************************
try:
logger.fdebug('[FILECHECKER] justthedigits_1 len : ' + str(len(justthedigits_1)))
logger.fdebug('[FILECHECKER] digitchk len : ' + str(len(digitchk)))
if len(justthedigits_1) >= len(digitchk) and len(digitchk) > 3:
logger.fdebug('[FILECHECKER] Removing issue title.')
justthedigits_1 = re.sub(digitchk,'', justthedigits_1).strip()
justthedigits_1 = re.sub(digitchk, '', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
hyphensplit = digitchk
@ -768,7 +768,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
justthedigits = justthedigits_1.split(' ', 1)[0]
digitsvalid = "false"
if not justthedigits.isdigit() and 'annual' not in justthedigits.lower():
logger.fdebug('[FILECHECKER] Invalid character found in filename after item removal - cannot find issue # with this present. Temporarily removing it from the comparison to be able to proceed.')
try:
@ -823,9 +823,9 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
tmpthedigits = None
# justthedigits = justthedigits.split(' ', 1)[0]
#if the issue has an alphanumeric (issue_exceptions, join it and push it through)
logger.fdebug('[FILECHECKER] JUSTTHEDIGITS [' + justthedigits + ']' )
logger.fdebug('[FILECHECKER] JUSTTHEDIGITS [' + justthedigits + ']')
if digitsvalid == "true":
pass
else:
@ -835,7 +835,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if '.' in justthedigits:
tmpdec = justthedigits.find('.')
b4dec = justthedigits[:tmpdec]
a4dec = justthedigits[tmpdec+1:]
a4dec = justthedigits[tmpdec +1:]
if a4dec.isdigit() and b4dec.isdigit():
logger.fdebug('[FILECHECKER] DECIMAL ISSUE DETECTED')
digitsvalid = "true"
@ -852,11 +852,11 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] final justthedigits [' + justthedigits + ']')
if digitsvalid == "false":
if digitsvalid == "false":
logger.fdebug('[FILECHECKER] Issue number not properly detected...ignoring.')
comiccnt -=1 # remove the entry from the list count as it was incorrrectly tallied.
continue
continue
if manual is not None:
#this is needed for Manual Run to determine matches
@ -884,8 +884,8 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
# even if it's a V1, we need to pull the date for the given issue ID and get the publication year
# for the issue. Because even if it's a V1, if there are additional Volumes then it's possible that
# it will take the incorrect series. (ie. Detective Comics (1937) & Detective Comics (2011).
# If issue #28 (2013) is found, it exists in both series, and because DC 1937 is a V1, it will bypass
# it will take the incorrect series. (ie. Detective Comics (1937) & Detective Comics (2011).
# If issue #28 (2013) is found, it exists in both series, and because DC 1937 is a V1, it will bypass
# the year check which will result in the incorrect series being picked (1937)
@ -948,7 +948,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if (subnm[cnt].startswith('19') or subnm[cnt].startswith('20')) and len(subnm[cnt]) == 4:
logger.fdebug('[FILECHECKER] year detected: ' + subnm[cnt])
result_comyear = subnm[cnt]
##### - checking to see what removing this does for the masses
##### - checking to see what removing this does for the masses
if int(result_comyear) <= int(maxyear) and int(result_comyear) >= int(comyear):
logger.fdebug('[FILECHECKER] ' + str(result_comyear) + ' is within the series range of ' + str(comyear) + '-' + str(maxyear))
#still possible for incorrect match if multiple reboots of series end/start in same year
@ -1009,7 +1009,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#if the issue title was present and it contained a numeric, it will pull that as the issue incorrectly
if isstitle_chk == True:
justthedigits = possibleissue_num
subname = re.sub(' '.join(vals[0]['isstitle_removal']),'',subname).strip()
subname = re.sub(' '.join(vals[0]['isstitle_removal']), '', subname).strip()
else:
logger.fdebug('No issue title.')
@ -1033,24 +1033,24 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
sub_removed = subname.replace('_', ' ')
logger.fdebug('[FILECHECKER] sub_removed: ' + sub_removed)
split_sub = sub_removed.rsplit(' ',1)[0].split(' ') #removes last word (assuming it's the issue#)
split_sub = sub_removed.rsplit(' ', 1)[0].split(' ') #removes last word (assuming it's the issue#)
split_mod = modwatchcomic.replace('_', ' ').split() #batman
i = 0
newc = ''
while (i < len(split_mod)):
newc += split_sub[i] + ' '
newc += split_sub[i] + ' '
i+=1
if newc:
split_sub = newc.strip().split()
logger.fdebug('[FILECHECKER] split_sub: ' + str(split_sub))
logger.fdebug('[FILECHECKER] split_mod: ' + str(split_mod))
x = len(split_sub)-1
x = len(split_sub) -1
scnt = 0
if x > len(split_mod)-1:
if x > len(split_mod) -1:
logger.fdebug('[FILECHECKER] number of words do not match...aborting.')
else:
while ( x > -1 ):
while (x > -1):
logger.fdebug(str(split_sub[x]) + ' comparing to ' + str(split_mod[x]))
if str(split_sub[x]).lower() == str(split_mod[x]).lower():
scnt+=1
@ -1062,7 +1062,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
totalcnt = int(len(split_mod))
logger.fdebug('[FILECHECKER] split_mod length:' + str(totalcnt))
try:
spercent = (wordcnt/totalcnt) * 100
spercent = (wordcnt /totalcnt) * 100
except ZeroDivisionError:
spercent = 0
logger.fdebug('[FILECHECKER] we got ' + str(spercent) + ' percent.')
@ -1129,7 +1129,7 @@ def validateAndCreateDirectory(dir, create=False, module=None):
try:
permission = int(mylar.CHMOD_DIR, 8)
os.umask(0) # this is probably redudant, but it doesn't hurt to clear the umask here.
os.makedirs(dir.rstrip(), permission )
os.makedirs(dir.rstrip(), permission)
except OSError:
raise SystemExit(module + ' Could not create directory: ' + dir + '. Exiting....')
return True
@ -1140,7 +1140,7 @@ def validateAndCreateDirectory(dir, create=False, module=None):
def indices(string, char):
return [ i for i,c in enumerate(string) if c == char ]
return [i for i, c in enumerate(string) if c == char]
def traverse_directories(dir):
filelist = []
@ -1172,4 +1172,4 @@ def crc(filename):
#return "%X"%(prev & 0xFFFFFFFF)
#speed in lieu of memory (file into memory entirely)
return "%X" % (zlib.crc32(open(filename,"rb").read()) & 0xFFFFFFFF)
return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF)

View File

@ -27,9 +27,9 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
encodeSearch = urllib.quote_plus(searchName)
splitSearch = encodeSearch.split(" ")
joinSearch = "+".join(splitSearch)+"+"+searchIssue
searchIsOne = "0"+searchIssue
searchIsTwo = "00"+searchIssue
joinSearch = "+".join(splitSearch) +"+" +searchIssue
searchIsOne = "0" +searchIssue
searchIsTwo = "00" +searchIssue
if mylar.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr"
elif mylar.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz"
@ -39,7 +39,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
regexName = searchName.replace(" ", '((\\s)?[-:])?(\\s)?')
#logger.fdebug('searchName:' + searchName)
#logger.fdebug('regexName:' + regexName)
@ -82,7 +82,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
"link": urlParse["href"],
"length": urlParse["length"],
"pubdate": feed.entries[countUp].updated})
countUp=countUp+1
countUp=countUp +1
logger.fdebug('keypair: ' + str(keyPair))
@ -94,10 +94,10 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
regExOne = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, searchYear)
#Sometimes comics aren't actually published the same year comicVine says - trying to adjust for these cases
regExTwo = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1)
regExThree = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1)
regExFour = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1)
regExFive = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1)
regExTwo = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) +1)
regExThree = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) -1)
regExFour = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) +1)
regExFive = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) -1)
regexList=[regEx, regExOne, regExTwo, regExThree, regExFour, regExFive]
@ -118,7 +118,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
# while regExCount < len(regexList):
# regExTest = re.findall(regexList[regExCount], subs, flags=re.IGNORECASE)
# regExCount = regExCount +1
# if regExTest:
# if regExTest:
# logger.fdebug(title)
# entries.append({
# 'title': subs,
@ -146,11 +146,11 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
if (searchYear in subs or ComicYearFix in subs) and noYear == 'True':
#this would occur on the next check in the line, if year exists and
#the noYear check in the first check came back valid append it
subs = noYearline + ' (' + searchYear + ')'
subs = noYearline + ' (' + searchYear + ')'
noYear = 'False'
if noYear == 'False':
entries.append({
'title': subs,
'link': entry['link'],
@ -158,11 +158,11 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
'length': entry['length']
})
break # break out so we don't write more shit.
# if len(entries) >= 1:
if tallycount >= 1:
mres['entries'] = entries
return mres
return mres
else:
logger.fdebug("No Results Found")
return "no results"

View File

@ -6,7 +6,7 @@ import time
import mylar
from mylar import logger
def putfile(localpath,file): #localpath=full path to .torrent (including filename), file=filename of torrent
def putfile(localpath, file): #localpath=full path to .torrent (including filename), file=filename of torrent
try:
import paramiko
@ -17,7 +17,7 @@ def putfile(localpath,file): #localpath=full path to .torrent (including file
logger.fdebug('aborting send.')
return "fail"
host = mylar.SEEDBOX_HOST
host = mylar.SEEDBOX_HOST
port = int(mylar.SEEDBOX_PORT) #this is usually 22
transport = paramiko.Transport((host, port))
@ -45,7 +45,7 @@ def putfile(localpath,file): #localpath=full path to .torrent (including file
return "fail"
sendcheck = False
while sendcheck == False:
try:
sftp.put(localpath, rempath)
@ -79,23 +79,23 @@ def sendfiles(filelist):
return
fhost = mylar.TAB_HOST.find(':')
host = mylar.TAB_HOST[:fhost]
port = int(mylar.TAB_HOST[fhost+1:])
host = mylar.TAB_HOST[:fhost]
port = int(mylar.TAB_HOST[fhost +1:])
logger.fdebug('Destination: ' + host)
logger.fdebug('Using SSH port : ' + str(port))
transport = paramiko.Transport((host, port))
password = mylar.TAB_PASS
username = mylar.TAB_USER
password = mylar.TAB_PASS
username = mylar.TAB_USER
transport.connect(username = username, password = password)
sftp = paramiko.SFTPClient.from_transport(transport)
remotepath = mylar.TAB_DIRECTORY
logger.fdebug('remote path set to ' + remotepath)
if len(filelist) > 0:
logger.info('Initiating send for ' + str(len(filelist)) + ' files...')
return sendtohome(sftp, remotepath, filelist, transport)
@ -104,7 +104,7 @@ def sendfiles(filelist):
def sendtohome(sftp, remotepath, filelist, transport):
fhost = mylar.TAB_HOST.find(':')
host = mylar.TAB_HOST[:fhost]
port = int(mylar.TAB_HOST[fhost+1:])
port = int(mylar.TAB_HOST[fhost +1:])
successlist = []
filestotal = len(filelist)
@ -126,13 +126,13 @@ def sendtohome(sftp, remotepath, filelist, transport):
filename = tempfile.replace('\0ff1a', '-')
#now we encode the structure to ascii so we can write directories/filenames without error.
filename = tempfile.encode('ascii','ignore')
filename = tempfile.encode('ascii', 'ignore')
remdir = remotepath
localsend = files['filepath']
logger.info('Sending : ' + localsend)
remotesend = os.path.join(remdir,filename)
remotesend = os.path.join(remdir, filename)
logger.info('To : ' + remotesend)
try:
@ -146,13 +146,13 @@ def sendtohome(sftp, remotepath, filelist, transport):
if not filechk:
sendcheck = False
count = 1
while sendcheck == False:
try:
sftp.put(localsend, remotesend)#, callback=printTotals)
sendcheck = True
except Exception, e:
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__,e))
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__, e))
logger.info('Forcibly closing connection and attempting to reconnect')
sftp.close()
transport.close()
@ -181,7 +181,7 @@ def sendtohome(sftp, remotepath, filelist, transport):
sftp.put(localsend, remotesend)
sendcheck = True
except Exception, e:
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__,e))
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__, e))
logger.info('Forcibly closing connection and attempting to reconnect')
sftp.close()
transport.close()

View File

@ -24,8 +24,8 @@ import mylar
def multikeysort(items, columns):
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
@ -33,51 +33,51 @@ def multikeysort(items, columns):
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def checked(variable):
if variable:
return 'Checked'
else:
return ''
def radio(variable, pos):
if variable == pos:
return 'Checked'
else:
return ''
def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',
0xc6:'Ae', 0xc7:'C',
0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E', 0x86:'e',
0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',
0xd0:'Th', 0xd1:'N',
0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',
0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',
0xdd:'Y', 0xde:'th', 0xdf:'ss',
0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',
0xe6:'ae', 0xe7:'c',
0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e', 0x0259:'e',
0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',
0xf0:'th', 0xf1:'n',
0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',
0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',
0xfd:'y', 0xfe:'th', 0xff:'y',
0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',
0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',
0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',
0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',
0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'",
0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',
0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',
0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',
0xd7:'*', 0xf7:'/'
xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
@ -89,10 +89,10 @@ def latinToAscii(unicrap):
else:
r += str(i)
return r
def convert_milliseconds(ms):
seconds = ms/1000
seconds = ms /1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
@ -100,7 +100,7 @@ def convert_milliseconds(ms):
minutes = time.strftime("%M:%S", gmtime)
return minutes
def convert_seconds(s):
gmtime = time.gmtime(s)
@ -110,19 +110,19 @@ def convert_seconds(s):
minutes = time.strftime("%M:%S", gmtime)
return minutes
def today():
today = datetime.date.today()
yyyymmdd = datetime.date.isoformat(today)
return yyyymmdd
def now():
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def bytes_to_mb(bytes):
mb = int(bytes)/1048576
mb = int(bytes) /1048576
size = '%.1f MB' % mb
return size
@ -136,7 +136,7 @@ def human_size(size_bytes):
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
suffixes_table = [('bytes', 0), ('KB', 0), ('MB', 1), ('GB', 2), ('TB', 2), ('PB', 2)]
num = float(0 if size_bytes is None else size_bytes)
for suffix, precision in suffixes_table:
@ -163,34 +163,34 @@ def human2bytes(s):
num = s[:-1]
assert num.isdigit() and letter in symbols
num = float(num)
prefix = {symbols[0]:1}
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
prefix[s] = 1 << (i +1) *10
return int(num * prefix[letter])
def replace_all(text, dic):
for i, j in dic.iteritems():
text = text.replace(i, j)
return text.rstrip()
def cleanName(string):
pass1 = latinToAscii(string).lower()
out_string = re.sub('[\/\@\#\$\%\^\*\+\"\[\]\{\}\<\>\=\_]', '', pass1).encode('utf-8')
return out_string
def cleanTitle(title):
title = re.sub('[\.\-\/\_]', ' ', title).lower()
# Strip out extra whitespace
title = ' '.join(title.split())
title = title.title()
return title
def extract_logline(s):
# Default log format
pattern = re.compile(r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)', re.VERBOSE)
@ -203,7 +203,7 @@ def extract_logline(s):
return (timestamp, level, thread, message)
else:
return None
def is_number(s):
try:
float(s)
@ -225,7 +225,7 @@ def decimal_issue(iss):
deciss = int(iss) * 1000
else:
iss_b4dec = iss[:iss_find]
iss_decval = iss[iss_find+1:]
iss_decval = iss[iss_find +1:]
if int(iss_decval) == 0:
iss = iss_b4dec
issdec = int(iss_decval)
@ -280,16 +280,16 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
return
else:
logger.info('Int Issue_number compare found. continuing...')
issueid = chkissue['IssueID']
issueid = chkissue['IssueID']
else:
issueid = chkissue['IssueID']
#use issueid to get publisher, series, year, issue number
logger.fdebug('issueid is now : ' + str(issueid))
issuenzb = myDB.selectone("SELECT * from issues WHERE ComicID=? AND IssueID=?", [comicid,issueid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
if issuenzb is None:
logger.fdebug('not an issue, checking against annuals')
issuenzb = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND IssueID=?", [comicid,issueid]).fetchone()
issuenzb = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
if issuenzb is None:
logger.fdebug('Unable to rename - cannot locate issue id within db')
return
@ -308,7 +308,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
'C',
'X',
'O']
valid_spaces = ('.','-')
valid_spaces = ('.', '-')
for issexcept in issue_exceptions:
if issexcept.lower() in issuenum.lower():
logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
@ -316,8 +316,8 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('character space denoted as : ' + iss_space)
else:
logger.fdebug('character space not denoted.')
iss_space = ''
# if issexcept == 'INH':
iss_space = ''
# if issexcept == 'INH':
# issue_except = '.INH'
if issexcept == 'NOW':
if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
@ -340,12 +340,12 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
# if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
# issuenum = re.sub("[^0-9]", "", issuenum)
# issue_except = '.NOW'
if '.' in issuenum:
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
iss_decval = issuenum[iss_find+1:]
if iss_decval.endswith('.'):
iss_decval = issuenum[iss_find +1:]
if iss_decval.endswith('.'):
iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
iss = iss_b4dec
@ -373,7 +373,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
if mylar.ZERO_LEVEL_N == "none": zeroadd = ""
elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"
logger.fdebug('Zero Suppression set to : ' + str(mylar.ZERO_LEVEL_N))
prettycomiss = None
@ -444,7 +444,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('Pretty Comic Issue is : ' + str(prettycomiss))
issueyear = issuenzb['IssueDate'][:4]
month = issuenzb['IssueDate'][5:7].replace('-','').strip()
month = issuenzb['IssueDate'][5:7].replace('-', '').strip()
month_name = fullmonth(month)
logger.fdebug('Issue Year : ' + str(issueyear))
comicnzb= myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
@ -466,7 +466,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('No version # found for series, removing from filename')
@ -475,7 +475,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
chunk_file_format = mylar.FILE_FORMAT
if annualize is None:
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('not an annual - removing from filename paramaters')
@ -492,7 +492,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('[' + series + '][ANNUALS-ON][ANNUAL IN SERIES][NOT $ANNUAL] prettycomiss: ' + str(prettycomiss))
else:
#because it exists within title, strip it then use formatting tag for placement of wording.
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('[' + series + '][ANNUALS-ON][ANNUAL IN SERIES][$ANNUAL] prettycomiss: ' + str(prettycomiss))
@ -516,7 +516,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('[' + series + '][ANNUALS-OFF][ANNUAL IN SERIES][NOT $ANNUAL] prettycomiss: ' + str(prettycomiss))
else:
#because it exists within title, strip it then use formatting tag for placement of wording.
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('[' + series + '][ANNUALS-OFF][ANNUAL IN SERIES][$ANNUAL] prettycomiss: ' + str(prettycomiss))
@ -533,15 +533,15 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('Annual detected within series title of ' + series + '. Not auto-correcting issue #')
seriesfilename = seriesfilename.encode('ascii', 'ignore').strip()
filebad = [':',',','/','?','!','\''] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
filebad = [':', ',', '/', '?', '!', '\''] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
for dbd in filebad:
if dbd in seriesfilename:
if dbd == '/': repthechar = '-'
else: repthechar = ''
seriesfilename = seriesfilename.replace(dbd,repthechar)
seriesfilename = seriesfilename.replace(dbd, repthechar)
logger.fdebug('Altering series name due to filenaming restrictions: ' + seriesfilename)
publisher = re.sub('!','', publisher)
publisher = re.sub('!', '', publisher)
file_values = {'$Series': seriesfilename,
'$Issue': prettycomiss,
@ -585,10 +585,10 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('Source: ' + str(ofilename))
logger.fdebug('Destination: ' + str(dst))
rename_this = { "destination_dir" : dst,
"nfilename" : nfilename,
"issueid" : issueid,
"comicid" : comicid }
rename_this = {"destination_dir": dst,
"nfilename": nfilename,
"issueid": issueid,
"comicid": comicid}
return rename_this
@ -603,12 +603,12 @@ def apiremove(apistring, type):
#type = & to denote up until next api variable
value_regex = re.compile("(?<=%26i=1%26r=)(?P<value>.*?)(?=" + str(type) +")")
#match = value_regex.search(apistring)
apiremoved = value_regex.sub("xUDONTNEEDTOKNOWTHISx", apistring)
apiremoved = value_regex.sub("xUDONTNEEDTOKNOWTHISx", apistring)
#need to remove the urlencoded-portions as well in future
return apiremoved
def ComicSort(comicorder=None,sequence=None,imported=None):
def ComicSort(comicorder=None, sequence=None, imported=None):
if sequence:
# if it's on startup, load the sql into a tuple for use to avoid record-locking
i = 0
@ -641,16 +641,16 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
comicidlist.append(csort['ComicID'])
i+=1
if sequence == 'startup':
if i == 0:
comicorder['SortOrder'] = ({'ComicID':'99999','ComicOrder':1})
if i == 0:
comicorder['SortOrder'] = ({'ComicID': '99999', 'ComicOrder': 1})
comicorder['LastOrderNo'] = 1
comicorder['LastOrderID'] = 99999
else:
else:
comicorder['SortOrder'] = comicorderlist
comicorder['LastOrderNo'] = i-1
comicorder['LastOrderID'] = comicorder['SortOrder'][i-1]['ComicID']
comicorder['LastOrderNo'] = i -1
comicorder['LastOrderID'] = comicorder['SortOrder'][i -1]['ComicID']
if i < 0: i == 0
logger.info('Sucessfully ordered ' + str(i-1) + ' series in your watchlist.')
logger.info('Sucessfully ordered ' + str(i -1) + ' series in your watchlist.')
return comicorder
elif sequence == 'update':
mylar.COMICSORT['SortOrder'] = comicorderlist
@ -658,10 +658,10 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
if i == 0:
placemnt = 1
else:
placemnt = int(i-1)
placemnt = int(i -1)
mylar.COMICSORT['LastOrderNo'] = placemnt
mylar.COMICSORT['LastOrderID'] = mylar.COMICSORT['SortOrder'][placemnt]['ComicID']
return
return
else:
# for new series adds, we already know the comicid, so we set the sortorder to an abnormally high #
# we DO NOT write to the db to avoid record-locking.
@ -679,10 +679,10 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
mylar.COMICSORT['LastOrderNo'] = lastorderval
mylar.COMICSORT['LastOrderID'] = imported
return
def fullmonth(monthno):
#simple numerical to worded month conversion....
basmonths = {'1':'January','2':'February','3':'March','4':'April','5':'May','6':'June','7':'July','8':'August','9':'September','10':'October','11':'November','12':'December'}
basmonths = {'1': 'January', '2': 'February', '3': 'March', '4': 'April', '5': 'May', '6': 'June', '7': 'July', '8': 'August', '9': 'September', '10': 'October', '11': 'November', '12': 'December'}
monthconv = None
@ -710,19 +710,19 @@ def updateComicLocation():
if dirlist is not None:
for dl in dirlist:
u_comicnm = dl['ComicName']
# let's remove the non-standard characters here that will break filenaming / searching.
comicname_folder = filesafe(u_comicnm)
publisher = re.sub('!','',dl['ComicPublisher']) # thanks Boom!
publisher = re.sub('!', '', dl['ComicPublisher']) # thanks Boom!
year = dl['ComicYear']
comversion = dl['ComicVersion']
if comversion is None:
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FOLDER_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FOLDER_FORMAT)
chunk_f = re.compile(r'\s+')
folderformat = chunk_f.sub(' ', chunk_f_f)
else:
@ -746,11 +746,11 @@ def updateComicLocation():
if mylar.FOLDER_FORMAT == '':
comlocation = re.sub(mylar.DESTINATION_DIR, mylar.NEWCOM_DIR, dl['ComicLocation']).strip()
else:
first = replace_all(folderformat, values)
first = replace_all(folderformat, values)
if mylar.REPLACE_SPACES:
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
first = first.replace(' ', mylar.REPLACE_CHAR)
comlocation = os.path.join(mylar.NEWCOM_DIR,first).strip()
comlocation = os.path.join(mylar.NEWCOM_DIR, first).strip()
else:
#DESTINATION_DIR = /mnt/mediavg/Comics
@ -809,7 +809,7 @@ def cleanhtml(raw_html):
def issuedigits(issnum):
import db, logger
int_issnum = None
try:
@ -818,7 +818,7 @@ def issuedigits(issnum):
return 9999999999
if issnum.isdigit():
int_issnum = int( issnum ) * 1000
int_issnum = int(issnum) * 1000
else:
#count = 0
#for char in issnum:
@ -845,7 +845,7 @@ def issuedigits(issnum):
if '!' in issnum: issnum = re.sub('\!', '', issnum)
remdec = issnum.find('.') #find the decimal position.
if remdec == -1:
#if no decimal, it's all one string
#if no decimal, it's all one string
#remove the last 3 characters from the issue # (NOW)
int_issnum = (int(issnum[:-3]) * 1000) + ord('n') + ord('o') + ord('w')
else:
@ -869,13 +869,13 @@ def issuedigits(issnum):
int_issnum = 9999999999 * 1000 # set 9999999999 for integer value of issue
elif '.' in issnum or ',' in issnum:
#logger.fdebug('decimal detected.')
if ',' in issnum: issnum = re.sub(',','.', issnum)
if ',' in issnum: issnum = re.sub(',', '.', issnum)
issst = str(issnum).find('.')
if issst == 0:
issb4dec = 0
else:
issb4dec = str(issnum)[:issst]
decis = str(issnum)[issst+1:]
decis = str(issnum)[issst +1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
@ -899,7 +899,7 @@ def issuedigits(issnum):
#validity check
if x < 0:
#logger.info("I've encountered a negative issue #: " + str(issnum) + ". Trying to accomodate.")
int_issnum = (int(x)*1000) - 1
int_issnum = (int(x) *1000) - 1
else: raise ValueError
except ValueError, e:
#this will account for any alpha in a issue#, so long as it doesn't have decimals.
@ -911,9 +911,9 @@ def issuedigits(issnum):
if issnum[x].isalpha():
#take first occurance of alpha in string and carry it through
tstord = issnum[x:].rstrip()
tstord = re.sub('[\-\,\.\+]','', tstord).rstrip()
tstord = re.sub('[\-\,\.\+]', '', tstord).rstrip()
issno = issnum[:x].rstrip()
issno = re.sub('[\-\,\.\+]','', issno).rstrip()
issno = re.sub('[\-\,\.\+]', '', issno).rstrip()
try:
isschk = float(issno)
except ValueError, e:
@ -989,12 +989,12 @@ def annual_update():
i+=1
logger.info(str(i) + ' series have been updated in the annuals table.')
return
return
def replacetheslash(data):
# this is necessary for the cache directory to display properly in IE/FF.
# os.path.join will pipe in the '\' in windows, which won't resolve
# when viewing through cherrypy - so convert it and viola.
# os.path.join will pipe in the '\' in windows, which won't resolve
# when viewing through cherrypy - so convert it and viola.
if platform.system() == "Windows":
slashreplaced = data.replace('\\', '/')
else:
@ -1031,7 +1031,7 @@ def latestdate_fix():
logger.fdebug('No Series in watchlist to correct latest date')
return
for cl in comiclist:
if cl['ComicName_Filesafe'] is None:
if cl['ComicName_Filesafe'] is None:
cnupdate.append({"comicid": cl['ComicID'],
"comicname_filesafe": filesafe(cl['ComicName'])})
latestdate = cl['LatestDate']
@ -1043,9 +1043,9 @@ def latestdate_fix():
#logger.info('dash found at position ' + str(finddash))
if finddash != 4: #format of mm-yyyy
lat_month = latestdate[:finddash]
lat_year = latestdate[finddash+1:]
lat_year = latestdate[finddash +1:]
else: #format of yyyy-mm
lat_month = latestdate[finddash+1:]
lat_month = latestdate[finddash +1:]
lat_year = latestdate[:finddash]
latestdate = (lat_year) + '-' + str(lat_month) + '-01'
@ -1081,7 +1081,7 @@ def checkFolder():
return
def LoadAlternateSearchNames(seriesname_alt, comicid):
import logger
import logger
#seriesname_alt = db.comics['AlternateSearch']
AS_Alt = []
Alternate_Names = {}
@ -1097,7 +1097,7 @@ def LoadAlternateSearchNames(seriesname_alt, comicid):
AS_Alternate = seriesname_alt
AS_Alt.append(seriesname_alt)
for calt in chkthealt:
AS_Alter = re.sub('##','',calt)
AS_Alter = re.sub('##', '', calt)
u_altsearchcomic = AS_Alter.encode('ascii', 'ignore').strip()
AS_formatrem_seriesname = re.sub('\s+', ' ', u_altsearchcomic)
if AS_formatrem_seriesname[:1] == ' ': AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
@ -1132,7 +1132,7 @@ def havetotals(refreshit=None):
if issue is None:
if refreshit is not None:
logger.fdebug(str(comic['ComicID']) + ' has no issuedata available. Forcing complete Refresh/Rescan')
return True
return True
else:
continue
if mylar.ANNUALS_ON:
@ -1164,7 +1164,7 @@ def havetotals(refreshit=None):
return False # if it's 5/5 or 4/5, send back to updater and restore previous status'
try:
percent = (haveissues*100.0)/totalissues
percent = (haveissues *100.0) /totalissues
if percent > 100:
percent = 101
except (ZeroDivisionError, TypeError):
@ -1178,9 +1178,9 @@ def havetotals(refreshit=None):
recentstatus = 'Unknown'
elif comic['ForceContinuing'] == 1:
recentstatus = 'Continuing'
elif 'present' in comic['ComicPublished'].lower() or ( today()[:4] in comic['LatestDate']):
elif 'present' in comic['ComicPublished'].lower() or (today()[:4] in comic['LatestDate']):
latestdate = comic['LatestDate']
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if comic['NewPublish']:
@ -1201,7 +1201,7 @@ def havetotals(refreshit=None):
"ComicImage": comic['ComicImage'],
"LatestIssue": comic['LatestIssue'],
"LatestDate": comic['LatestDate'],
"ComicPublished": re.sub('(N)','',comic['ComicPublished']).strip(),
"ComicPublished": re.sub('(N)', '', comic['ComicPublished']).strip(),
"Status": comic['Status'],
"recentstatus": recentstatus,
"percent": percent,
@ -1213,23 +1213,23 @@ def havetotals(refreshit=None):
def cvapi_check(web=None):
import logger
#if web is None:
#if web is None:
# logger.fdebug('[ComicVine API] ComicVine API Check Running...')
if mylar.CVAPI_TIME is None or mylar.CVAPI_TIME == '':
c_date = now()
c_obj_date = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S")
c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
mylar.CVAPI_TIME = c_obj_date
else:
if isinstance(mylar.CVAPI_TIME, unicode):
c_obj_date = datetime.datetime.strptime(mylar.CVAPI_TIME,"%Y-%m-%d %H:%M:%S")
c_obj_date = datetime.datetime.strptime(mylar.CVAPI_TIME, "%Y-%m-%d %H:%M:%S")
else:
c_obj_date = mylar.CVAPI_TIME
#if web is None: logger.fdebug('[ComicVine API] API Start Monitoring Time (~15mins): ' + str(mylar.CVAPI_TIME))
now_date = now()
n_date = datetime.datetime.strptime(now_date,"%Y-%m-%d %H:%M:%S")
n_date = datetime.datetime.strptime(now_date, "%Y-%m-%d %H:%M:%S")
#if web is None: logger.fdebug('[ComicVine API] Time now: ' + str(n_date))
absdiff = abs(n_date - c_obj_date)
mins = round(((absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0),2)
mins = round(((absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0), 2)
if mins < 15:
#if web is None: logger.info('[ComicVine API] Comicvine API count now at : ' + str(mylar.CVAPI_COUNT) + ' / ' + str(mylar.CVAPI_MAX) + ' in ' + str(mins) + ' minutes.')
if mylar.CVAPI_COUNT > mylar.CVAPI_MAX:
@ -1238,11 +1238,11 @@ def cvapi_check(web=None):
elif mins > 15:
mylar.CVAPI_COUNT = 0
c_date = now()
mylar.CVAPI_TIME = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S")
mylar.CVAPI_TIME = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
#if web is None: logger.info('[ComicVine API] 15 minute API interval resetting [' + str(mylar.CVAPI_TIME) + ']. Resetting API count to : ' + str(mylar.CVAPI_COUNT))
if web is None:
return
return
else:
line = str(mylar.CVAPI_COUNT) + ' hits / ' + str(mins) + ' minutes'
return line
@ -1252,7 +1252,7 @@ def filesafe(comic):
u_comic = unicodedata.normalize('NFKD', comic).encode('ASCII', 'ignore').strip()
comicname_filesafe = re.sub('[\:\'\,\?\!\\\]', '', u_comic)
comicname_filesafe = re.sub('[\/]','-', comicname_filesafe)
comicname_filesafe = re.sub('[\/]', '-', comicname_filesafe)
return comicname_filesafe
@ -1266,7 +1266,7 @@ def IssueDetails(filelocation, IssueID=None):
if filelocation.endswith('.cbz'):
logger.fdebug('CBZ file detected. Checking for .xml within file')
shutil.copy( filelocation, dstlocation )
shutil.copy(filelocation, dstlocation)
else:
logger.fdebug('filename is not a cbz : ' + filelocation)
return
@ -1287,16 +1287,16 @@ def IssueDetails(filelocation, IssueID=None):
#looks for the first page and assumes it's the cover. (Alternate covers handled later on)
elif '000.jpg' in infile or '000.png' in infile or '00.jpg' in infile or '00.png' in infile:
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CACHE_DIR,'temp.jpg'), "wb")
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
elif any( [ '00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile ]):
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]):
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
altlist = ('00a', '00b', '00c', '00d', '00e')
for alt in altlist:
if alt in infile:
local_file = open(os.path.join(mylar.CACHE_DIR,'temp.jpg'), "wb")
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
@ -1304,12 +1304,12 @@ def IssueDetails(filelocation, IssueID=None):
elif ('001.jpg' in infile or '001.png' in infile) and cover == "notfound":
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CACHE_DIR,'temp.jpg'), "wb")
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
ComicImage = os.path.join('cache', 'temp.jpg?'+str(modtime))
ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime))
IssueImage = replacetheslash(ComicImage)
@ -1319,12 +1319,12 @@ def IssueDetails(filelocation, IssueID=None):
unzip_cmd = "/usr/bin/unzip"
try:
#unzip -z will extract the zip comment field.
data = subprocess.check_output( [ unzip_cmd, '-z', dstlocation ] )
data = subprocess.check_output([unzip_cmd, '-z', dstlocation])
# return data is encoded in bytes, not unicode. Need to figure out how to run check_output returning utf-8
issuetag = 'comment'
except CalledProcessError as e:
logger.warn('Unable to extract comment field from zipfile.')
#logger.info('data:' + str(data))
if issuetag == 'xml':
@ -1350,7 +1350,7 @@ def IssueDetails(filelocation, IssueID=None):
except:
summary = "None"
if '*List' in summary:
if '*List' in summary:
summary_cut = summary.find('*List')
summary = summary[:summary_cut]
#check here to see if Covers exist as they will probably be misnamed when trying to determine the actual cover
@ -1410,7 +1410,7 @@ def IssueDetails(filelocation, IssueID=None):
try:
pagecount = result.getElementsByTagName('PageCount')[0].firstChild.wholeText
except:
pagecount = 0
pagecount = 0
logger.fdebug("number of pages I counted: " + str(pagecount))
i = 0
while (i < int(pagecount)):
@ -1452,7 +1452,7 @@ def IssueDetails(filelocation, IssueID=None):
cover_artist = "None"
penciller = "None"
inker = "None"
for cl in dt['credits']:
for cl in dt['credits']:
if cl['role'] == 'Editor':
if editor == "None": editor = cl['person']
else: editor += ', ' + cl['person']
@ -1530,7 +1530,7 @@ def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None):
if issue is None:
logger.fdebug('Unable to locate given IssueID within the db. Assuming Issue Title is None.')
return None
return issue['IssueName']
def int_num(s):
@ -1538,7 +1538,7 @@ def int_num(s):
return int(s)
except ValueError:
return float(s)
def listLibrary():
import db
library = {}
@ -1558,11 +1558,11 @@ def incr_snatched(ComicID):
import db, logger
myDB = db.DBConnection()
incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone()
logger.fdebug('Incrementing HAVE count total to : ' + str( incr_count['Have'] + 1 ))
logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1))
newCtrl = {"ComicID": ComicID}
newVal = {"Have": incr_count['Have'] + 1}
myDB.upsert("comics", newVal, newCtrl)
return
return
def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
#filename = the filename in question that's being checked against
@ -1597,10 +1597,10 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
logger.info('[DUPECHECK] Series has invalid issue totals [' + str(havechk['Have']) + '/' + str(havechk['Total']) + '] Attempting to Refresh & continue post-processing this issue.')
cid.append(ComicID)
logger.fdebug('[DUPECHECK] ComicID: ' + str(ComicID))
mylar.updater.dbUpdate(ComicIDList=cid,calledfrom='dupechk')
mylar.updater.dbUpdate(ComicIDList=cid, calledfrom='dupechk')
return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
else:
rtnval = "dupe"
rtnval = "dupe"
else:
rtnval = "dupe"
else:
@ -1705,25 +1705,25 @@ def parse_32pfeed(rssfeedline):
KEYS_32P = {}
if mylar.ENABLE_32P and len(rssfeedline) > 1:
userid_st = rssfeedline.find('&user')
userid_en = rssfeedline.find('&',userid_st+1)
userid_en = rssfeedline.find('&', userid_st +1)
if userid_en == -1:
USERID_32P = rssfeedline[userid_st+6:]
USERID_32P = rssfeedline[userid_st +6:]
else:
USERID_32P = rssfeedline[userid_st+6:userid_en]
USERID_32P = rssfeedline[userid_st +6:userid_en]
auth_st = rssfeedline.find('&auth')
auth_en = rssfeedline.find('&',auth_st+1)
auth_en = rssfeedline.find('&', auth_st +1)
if auth_en == -1:
AUTH_32P = rssfeedline[auth_st+6:]
AUTH_32P = rssfeedline[auth_st +6:]
else:
AUTH_32P = rssfeedline[auth_st+6:auth_en]
AUTH_32P = rssfeedline[auth_st +6:auth_en]
authkey_st = rssfeedline.find('&authkey')
authkey_en = rssfeedline.find('&',authkey_st+1)
authkey_en = rssfeedline.find('&', authkey_st +1)
if authkey_en == -1:
AUTHKEY_32P = rssfeedline[authkey_st+9:]
AUTHKEY_32P = rssfeedline[authkey_st +9:]
else:
AUTHKEY_32P = rssfeedline[authkey_st+9:authkey_en]
AUTHKEY_32P = rssfeedline[authkey_st +9:authkey_en]
KEYS_32P = {"user": USERID_32P,
"auth": AUTH_32P,

View File

@ -47,7 +47,7 @@ def is_exists(comicid):
return False
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,calledfrom=None,annload=None,chkwant=None,issuechk=None,issuetype=None,latestissueinfo=None):
def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=None, calledfrom=None, annload=None, chkwant=None, issuechk=None, issuetype=None, latestissueinfo=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
# from mylar import cache
@ -92,7 +92,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
helpers.ComicSort(comicorder=mylar.COMICSORT, imported=comicid)
# we need to lookup the info for the requested ComicID in full now
comic = cv.getComic(comicid,'comic')
comic = cv.getComic(comicid, 'comic')
logger.fdebug(comic)
if not comic:
@ -124,7 +124,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if gcdinfo == "No Match":
updater.no_searchresults(comicid)
nomatch = "true"
logger.info('There was an error when trying to add ' + comic['ComicName'] + ' (' + comic['ComicYear'] + ')' )
logger.info('There was an error when trying to add ' + comic['ComicName'] + ' (' + comic['ComicYear'] + ')')
return nomatch
else:
mismatch_com = "yes"
@ -150,18 +150,18 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if mylar.CV_ONLY:
#we'll defer this until later when we grab all the issues and then figure it out
logger.info('Uh-oh. I cannot find a Series Year for this series. I am going to try analyzing deeper.')
SeriesYear = cv.getComic(comicid,'firstissue',comic['FirstIssueID'])
SeriesYear = cv.getComic(comicid, 'firstissue', comic['FirstIssueID'])
if SeriesYear == '0000':
logger.info('Ok - I could not find a Series Year at all. Loading in the issue data now and will figure out the Series Year.')
CV_NoYearGiven = "yes"
issued = cv.getComic(comicid,'issue')
issued = cv.getComic(comicid, 'issue')
SeriesYear = issued['firstdate'][:4]
else:
SeriesYear = gcdinfo['SeriesYear']
else:
SeriesYear = comic['ComicYear']
logger.info('Sucessfully retrieved details for ' + comic['ComicName'] )
logger.info('Sucessfully retrieved details for ' + comic['ComicName'])
#since the weekly issue check could return either annuals or issues, let's initialize it here so it carries through properly.
weeklyissue_check = []
@ -237,7 +237,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid,'issue')
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
@ -267,7 +267,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"IssueName": issname,
"ComicID": comicid,
"ComicName": comic['ComicName'],
"ReleaseComicID": re.sub('4050-','',firstval['Comic_ID']).strip(),
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name'],
"Status": "Skipped"}
myDB.upsert("annuals", newVals, newCtrl)
@ -319,7 +319,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if comlocation is None:
comicdir = comicname_filesafe
series = comicdir
publisher = re.sub('!','',comic['ComicPublisher']) # thanks Boom!
publisher = re.sub('!', '', comic['ComicPublisher']) # thanks Boom!
publisher = helpers.filesafe(publisher)
year = SeriesYear
comversion = comic['ComicVersion']
@ -327,7 +327,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
mylar.FILE_FORMAT = chunk_f.sub(' ', chunk_f_f)
@ -386,7 +386,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
comicIssues = str(int(comic['ComicIssues']) + 1)
#let's download the image...
if os.path.exists(mylar.CACHE_DIR):pass
if os.path.exists(mylar.CACHE_DIR): pass
else:
#let's make the dir.
try:
@ -404,7 +404,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#urllib.urlretrieve(str(thisci), str(coverfile))
try:
cimage = re.sub('[\+]','%20', comic['ComicImage'])
cimage = re.sub('[\+]', '%20', comic['ComicImage'])
request = urllib2.Request(cimage)#, headers={'Content-Type': 'application/x-www-form-urlencoded'})
#request.add_header('User-Agent', str(mylar.USER_AGENT))
@ -426,7 +426,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImage']))
logger.info('Attempting to use alternate image size to get cover.')
try:
cimage = re.sub('[\+]','%20', comic['ComicImageALT'])
cimage = re.sub('[\+]', '%20', comic['ComicImageALT'])
request = urllib2.Request(cimage)
response = urllib2.urlopen(request)
com_image = response.read()
@ -437,7 +437,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
except Exception, e:
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImageALT']))
PRComicImage = os.path.join('cache',str(comicid) + ".jpg")
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage)
#this is for Firefox when outside the LAN...it works, but I don't know how to implement it
@ -447,8 +447,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.COMIC_COVER_LOCAL:
try:
comiclocal = os.path.join(comlocation,'cover.jpg')
shutil.copy(coverfile,comiclocal)
comiclocal = os.path.join(comlocation, 'cover.jpg')
shutil.copy(coverfile, comiclocal)
except IOError as e:
logger.error('Unable to save cover (' + str(coverfile) + ') into series directory (' + str(comiclocal) + ') at this time.')
@ -496,11 +496,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if CV_NoYearGiven == 'no':
#if set to 'no' then we haven't pulled down the issues, otherwise we did it already
issued = cv.getComic(comicid,'issue')
issued = cv.getComic(comicid, 'issue')
if issued is None:
logger.warn('Unable to retrieve data from ComicVine. Get your own API key already!')
return
logger.info('Sucessfully retrieved issue details for ' + comic['ComicName'] )
logger.info('Sucessfully retrieved issue details for ' + comic['ComicName'])
#move to own function so can call independently to only refresh issue data
#issued is from cv.getComic, comic['ComicName'] & comicid would both be already known to do independent call.
@ -510,8 +510,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
return
if mylar.CVINFO or (mylar.CV_ONLY and mylar.CVINFO):
if not os.path.exists(os.path.join(comlocation,"cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation,"cvinfo"),"w") as text_file:
if not os.path.exists(os.path.join(comlocation, "cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation, "cvinfo"), "w") as text_file:
text_file.write(str(comic['ComicURL']))
logger.info('Updating complete for: ' + comic['ComicName'])
@ -542,16 +542,16 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
else:
if mylar.IMP_MOVE:
logger.info('Mass import - Move files')
moveit.movefiles(comicid,comlocation,ogcname)
moveit.movefiles(comicid, comlocation, ogcname)
else:
logger.info('Mass import - Moving not Enabled. Setting Archived Status for import.')
moveit.archivefiles(comicid,ogcname)
moveit.archivefiles(comicid, ogcname)
#check for existing files...
statbefore = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
statbefore = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid, str(latestiss)]).fetchone()
logger.fdebug('issue: ' + str(latestiss) + ' status before chk :' + str(statbefore['Status']))
updater.forceRescan(comicid)
statafter = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
statafter = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid, str(latestiss)]).fetchone()
logger.fdebug('issue: ' + str(latestiss) + ' status after chk :' + str(statafter['Status']))
logger.fdebug('pullupd: ' + str(pullupd))
@ -562,7 +562,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
# do this for only Present comics....
if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present' and series_status == 'Active': #and 'Present' in gcdinfo['resultPublished']:
logger.fdebug('latestissue: #' + str(latestiss))
chkstats = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
chkstats = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid, str(latestiss)]).fetchone()
if chkstats is None:
if mylar.ANNUALS_ON:
chkstats = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?", [comicid, latestiss]).fetchone()
@ -575,7 +575,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
cn_pull = comicname_filesafe
else:
cn_pull = comic['ComicName']
updater.newpullcheck(ComicName=cn_pull,ComicID=comicid,issue=latestiss)
updater.newpullcheck(ComicName=cn_pull, ComicID=comicid, issue=latestiss)
#here we grab issues that have been marked as wanted above...
results = []
@ -647,7 +647,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.info('Sucessfully added ' + comic['ComicName'] + ' (' + str(SeriesYear) + ') by directly using the ComicVine ID')
return
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None):
# this is for importing via GCD only and not using CV.
# used when volume spanning is discovered for a Comic (and can't be added using CV).
# Issue Counts are wrong (and can't be added).
@ -707,11 +707,11 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
resultURL = "/series/" + str(comicid) + "/"
gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None)
if gcdinfo == "No Match":
logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" )
logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")")
updater.no_searchresults(gcomicid)
nomatch = "true"
return nomatch
logger.info(u"Sucessfully retrieved details for " + ComicName )
logger.info(u"Sucessfully retrieved details for " + ComicName)
# print ("Series Published" + parseit.resultPublished)
#--End
@ -726,11 +726,11 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
comicdir = u_comicname
if ':' in comicdir:
comicdir = comicdir.replace(':','')
comicdir = comicdir.replace(':', '')
if '/' in comicdir:
comicdir = comicdir.replace('/','-')
comicdir = comicdir.replace('/', '-')
if ',' in comicdir:
comicdir = comicdir.replace(',','')
comicdir = comicdir.replace(',', '')
else: comicdir = u_comicname
series = comicdir
@ -774,7 +774,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
comicIssues = gcdinfo['totalissues']
#let's download the image...
if os.path.exists(mylar.CACHE_DIR):pass
if os.path.exists(mylar.CACHE_DIR): pass
else:
#let's make the dir.
try:
@ -790,7 +790,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
urllib.urlretrieve(str(ComicImage), str(coverfile))
try:
with open(str(coverfile)) as f:
ComicImage = os.path.join('cache',str(gcomicid) + ".jpg")
ComicImage = os.path.join('cache', str(gcomicid) + ".jpg")
#this is for Firefox when outside the LAN...it works, but I don't know how to implement it
#without breaking the normal flow for inside the LAN (above)
@ -800,7 +800,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.COMIC_COVER_LOCAL:
comiclocal = os.path.join(comlocation + "/cover.jpg")
shutil.copy(ComicImage,comiclocal)
shutil.copy(ComicImage, comiclocal)
except IOError as e:
logger.error(u"Unable to save cover locally at this time.")
@ -832,7 +832,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if pullupd is None:
helpers.ComicSort(sequence='update')
logger.info(u"Sucessfully retrieved issue details for " + ComicName )
logger.info(u"Sucessfully retrieved issue details for " + ComicName)
n = 0
iscnt = int(comicIssues)
issnum = []
@ -856,7 +856,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if gcdinfo['gcdvariation'] == 'gcd':
#print ("gcd-variation accounted for.")
issdate = '0000-00-00'
int_issnum = int ( issis / 1000 )
int_issnum = int (issis / 1000)
break
if 'nn' in str(gcdval['GCDIssue']):
#no number detected - GN, TP or the like
@ -867,7 +867,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
issst = str(gcdval['GCDIssue']).find('.')
issb4dec = str(gcdval['GCDIssue'])[:issst]
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(gcdval['GCDIssue'])[issst+1:]
decis = str(gcdval['GCDIssue'])[issst +1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
@ -881,7 +881,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
gcdis = int(str(gcdval['GCDIssue'])) * 1000
gcd_issue = str(gcdval['GCDIssue'])
#get the latest issue / date using the date.
int_issnum = int( gcdis / 1000 )
int_issnum = int(gcdis / 1000)
issdate = str(gcdval['GCDDate'])
issid = "G" + str(gcdval['IssueID'])
if gcdval['GCDDate'] > latestdate:
@ -946,7 +946,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if mylar.CVINFO:
if not os.path.exists(comlocation + "/cvinfo"):
with open(comlocation + "/cvinfo","w") as text_file:
with open(comlocation + "/cvinfo", "w") as text_file:
text_file.write("http://www.comicvine.com/volume/49-" + str(comicid))
logger.info(u"Updating complete for: " + ComicName)
@ -957,10 +957,10 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
else:
if mylar.IMP_MOVE:
logger.info("Mass import - Move files")
moveit.movefiles(gcomicid,comlocation,ogcname)
moveit.movefiles(gcomicid, comlocation, ogcname)
else:
logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.")
moveit.archivefiles(gcomicid,ogcname)
moveit.archivefiles(gcomicid, ogcname)
#check for existing files...
updater.forceRescan(gcomicid)
@ -989,7 +989,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
logger.info(u"Finished grabbing what I could.")
def issue_collection(issuedata,nostatus):
def issue_collection(issuedata, nostatus):
myDB = db.DBConnection()
nowdate = datetime.datetime.now()
@ -1024,7 +1024,7 @@ def issue_collection(issuedata,nostatus):
# Only change the status & add DateAdded if the issue is already in the database
if iss_exists is None:
newValueDict['DateAdded'] = helpers.today()
datechk = re.sub('-','', issue['ReleaseDate']).strip() # converts date to 20140718 format
datechk = re.sub('-', '', issue['ReleaseDate']).strip() # converts date to 20140718 format
#logger.fdebug('issue #' + str(issue['Issue_Number']) + 'does not exist in db.')
if mylar.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
@ -1066,7 +1066,7 @@ def manualAnnual(manual_comicid, comicname, comicyear, comicid):
n = 0
noissues = sr['ComicIssues']
logger.fdebug('there are ' + str(noissues) + ' annuals within this series.')
issued = cv.getComic(re.sub('4050-','',manual_comicid).strip(),'issue')
issued = cv.getComic(re.sub('4050-', '', manual_comicid).strip(), 'issue')
while (n < int(noissues)):
try:
firstval = issued['issuechoice'][n]
@ -1095,9 +1095,9 @@ def manualAnnual(manual_comicid, comicname, comicyear, comicid):
"ReleaseDate": stdate,
"IssueName": issname,
"ComicID": comicid, #this is the series ID
"ReleaseComicID": re.sub('4050-','',manual_comicid).strip(), #this is the series ID for the annual(s)
"ReleaseComicID": re.sub('4050-', '', manual_comicid).strip(), #this is the series ID for the annual(s)
"ComicName": comicname, #series ComicName
"ReleaseComicName" :sr['ComicName'], #series ComicName for the manual_comicid
"ReleaseComicName": sr['ComicName'], #series ComicName for the manual_comicid
"Status": "Skipped"}
#need to add in the values for the new series to be added.
#"M_ComicName": sr['ComicName'],
@ -1121,7 +1121,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#to facilitate independent calls to updateissuedata ONLY, account for data not available and get it.
#chkType comes from the weeklypulllist - either 'annual' or not to distinguish annuals vs. issues
if comicIssues is None:
comic = cv.getComic(comicid,'comic')
comic = cv.getComic(comicid, 'comic')
if comic is None:
logger.warn('Error retrieving from ComicVine - either the site is down or you are not using your own CV API key')
return
@ -1132,7 +1132,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if comicname is None:
comicname = comic['ComicName']
if issued is None:
issued = cv.getComic(comicid,'issue')
issued = cv.getComic(comicid, 'issue')
if issued is None:
logger.warn('Error retrieving from ComicVine - either the site is down or you are not using your own CV API key')
return
@ -1177,7 +1177,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
issdate = str(firstval['Issue_Date'])
storedate = str(firstval['Store_Date'])
if issnum.isdigit():
int_issnum = int( issnum ) * 1000
int_issnum = int(issnum) * 1000
else:
if 'a.i.' in issnum.lower() or 'ai' in issnum.lower():
issnum = re.sub('\.', '', issnum)
@ -1199,7 +1199,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#issnum = utf-8 will encode the infinity symbol without any help
int_issnum = 9999999999 * 1000 # set 9999999999 for integer value of issue
elif '.' in issnum or ',' in issnum:
if ',' in issnum: issnum = re.sub(',','.', issnum)
if ',' in issnum: issnum = re.sub(',', '.', issnum)
issst = str(issnum).find('.')
#logger.fdebug("issst:" + str(issst))
if issst == 0:
@ -1208,7 +1208,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
issb4dec = str(issnum)[:issst]
#logger.fdebug("issb4dec:" + str(issb4dec))
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(issnum)[issst+1:]
decis = str(issnum)[issst +1:]
#logger.fdebug("decis:" + str(decis))
if len(decis) == 1:
decisval = int(decis) * 10
@ -1237,7 +1237,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if x < 0:
logger.info('I have encountered a negative issue #: ' + str(issnum) + '. Trying to accomodate.')
logger.fdebug('value of x is : ' + str(x))
int_issnum = (int(x)*1000) - 1
int_issnum = (int(x) *1000) - 1
else: raise ValueError
except ValueError, e:
x = 0
@ -1248,9 +1248,9 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if issnum[x].isalpha():
#take first occurance of alpha in string and carry it through
tstord = issnum[x:].rstrip()
tstord = re.sub('[\-\,\.\+]','', tstord).rstrip()
tstord = re.sub('[\-\,\.\+]', '', tstord).rstrip()
issno = issnum[:x].rstrip()
issno = re.sub('[\-\,\.\+]','', issno).rstrip()
issno = re.sub('[\-\,\.\+]', '', issno).rstrip()
try:
isschk = float(issno)
except ValueError, e:
@ -1325,10 +1325,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
else:
if len(issuedata) >= 1 and not calledfrom == 'dbupdate':
logger.fdebug('initiating issue updating - info & status')
issue_collection(issuedata,nostatus='False')
issue_collection(issuedata, nostatus='False')
else:
logger.fdebug('initiating issue updating - just the info')
issue_collection(issuedata,nostatus='True')
issue_collection(issuedata, nostatus='True')
styear = str(SeriesYear)
@ -1337,7 +1337,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
else:
stmonth = helpers.fullmonth(firstdate[5:7])
ltyear = re.sub('/s','', latestdate[:4])
ltyear = re.sub('/s', '', latestdate[:4])
if latestdate[5:7] == '00':
ltmonth = "?"
else:
@ -1346,12 +1346,12 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#try to determine if it's an 'actively' published comic from above dates
#threshold is if it's within a month (<55 days) let's assume it's recent.
try:
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
except:
logger.error('Cannot determine Latest Date for given series. This is most likely due to an issue having a date of : 0000-00-00')
latestdate = str(SeriesYear) + '-01-01'
logger.error('Setting Latest Date to be ' + str(latestdate) + '. You should inform CV that the issue data is stale.')
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
@ -1470,7 +1470,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid,'issue')
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
@ -1500,12 +1500,12 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
"IssueName": issname,
"ComicID": comicid,
"ComicName": ComicName,
"ReleaseComicID": re.sub('4050-','',firstval['Comic_ID']).strip(),
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name']}
iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone()
if iss_exists is None:
datechk = re.sub('-','', issdate).strip() # converts date to 20140718 format
datechk = re.sub('-', '', issdate).strip() # converts date to 20140718 format
if mylar.AUTOWANT_ALL:
newVals['Status'] = "Wanted"
elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING:

View File

@ -17,7 +17,7 @@ from __future__ import with_statement
import os
import glob
import re
import re
import shutil
import mylar
@ -28,28 +28,28 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
if cron and not mylar.LIBRARYSCAN:
return
if not dir:
dir = mylar.COMIC_DIR
# If we're appending a dir, it's coming from the post processor which is
# already bytestring
if not append:
dir = dir.encode(mylar.SYS_ENCODING)
if not os.path.isdir(dir):
logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace'))
return
logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace'))
basedir = dir
comic_list = []
comiccnt = 0
extensions = ('cbr','cbz')
for r,d,f in os.walk(dir):
extensions = ('cbr', 'cbz')
for r, d, f in os.walk(dir):
#for directory in d[:]:
# if directory.startswith("."):
# d.remove(directory)
@ -66,10 +66,10 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace')
comiccnt+=1
comic_dict = { 'ComicFilename': comic,
comic_dict = {'ComicFilename': comic,
'ComicLocation': comicpath,
'ComicSize': comicsize,
'Unicode_ComicLocation': unicode_comic_path }
'Unicode_ComicLocation': unicode_comic_path}
comic_list.append(comic_dict)
logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now")
@ -107,7 +107,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
watchcomic = watchcomic[-4:]
alt_chk = "no" # alt-checker flag (default to no)
# account for alternate names as well
if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None':
altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['AlternateSearch']).encode('utf-8').strip()
@ -132,7 +132,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
watchfound = 0
datelist = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
datelist = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
# datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
# #search for number as text, and change to numeric
# for numbs in basnumbs:
@ -166,7 +166,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
# # if used by ComicTagger, Notes field will have the IssueID.
# issuenotes = issueinfo[0]['notes']
# logger.fdebug('Notes: ' + issuenotes)
comfilename = i['ComicFilename']
comlocation = i['ComicLocation']
@ -248,7 +248,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
#start at the end.
logger.fdebug("word: " + str(cs[i]))
#assume once we find issue - everything prior is the actual title
#idetected = no will ignore everything so it will assume all title
#idetected = no will ignore everything so it will assume all title
if cs[i][:-2] == '19' or cs[i][:-2] == '20' and idetected == 'no':
logger.fdebug("year detected: " + str(cs[i]))
ydetected = 'yes'
@ -258,7 +258,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
#make sure it's a number on either side of decimal and assume decimal issue.
decst = cs[i].find('.')
dec_st = cs[i][:decst]
dec_en = cs[i][decst+1:]
dec_en = cs[i][decst +1:]
logger.fdebug("st: " + str(dec_st))
logger.fdebug("en: " + str(dec_en))
if dec_st.isdigit() and dec_en.isdigit():
@ -294,7 +294,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
while (findcn < len(cnsplit)):
cname = cname + cs[findcn] + " "
findcn+=1
cname = cname[:len(cname)-1] # drop the end space...
cname = cname[:len(cname) -1] # drop the end space...
print ("assuming name is : " + cname)
com_NAME = cname
print ("com_NAME : " + com_NAME)
@ -323,7 +323,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
logger.fdebug('charcount is : ' + str(charcount))
if charcount > 0:
logger.fdebug('entering loop')
for i,m in enumerate(re.finditer('\#', d_filename)):
for i, m in enumerate(re.finditer('\#', d_filename)):
if m.end() <= displength:
logger.fdebug(comfilename[m.start():m.end()])
# find occurance in c_filename, then replace into d_filname so special characters are brought across
@ -341,12 +341,12 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
#changed this from '' to ' '
comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
comic_iss = comic_iss_b4.replace('.',' ')
comic_iss = comic_iss_b4.replace('.', ' ')
comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
logger.fdebug("adjusted comic and issue: " + str(comic_iss))
#remove 'the' from here for proper comparisons.
if ' the ' in comic_iss.lower():
comic_iss = re.sub('\\bthe\\b','', comic_iss).strip()
comic_iss = re.sub('\\bthe\\b', '', comic_iss).strip()
splitit = comic_iss.split(None)
logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
#here we cycle through the Watchlist looking for a match.
@ -377,7 +377,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
scount = 0
logger.fdebug("search-length: " + str(splitst))
logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
while ( n <= (splitst)-1 ):
while (n <= (splitst) -1):
logger.fdebug("splitit: " + str(splitit[n]))
if n < (splitst) and n < len(watchcomic_split):
logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
@ -410,7 +410,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
logger.fdebug("scount:" + str(wordcnt))
totalcnt = int(splitst)
logger.fdebug("splitit-len:" + str(totalcnt))
spercent = (wordcnt/totalcnt) * 100
spercent = (wordcnt /totalcnt) * 100
logger.fdebug("we got " + str(spercent) + " percent.")
if int(spercent) >= 80:
logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!")
@ -434,7 +434,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
n = 0
if volyr is None:
if result_comyear is None:
if result_comyear is None:
result_comyear = '0000' #no year in filename basically.
else:
if result_comyear is None:
@ -450,15 +450,15 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
logger.fdebug("adding " + com_NAME + " to the import-queue!")
impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
logger.fdebug("impid: " + str(impid))
import_by_comicids.append({
"impid" : impid,
"watchmatch" : watchmatch,
"displayname" : dispname,
"comicname" : dispname, #com_NAME,
"comicyear" : result_comyear,
"volume" : vol_label,
"comfilename" : comfilename,
"comlocation" : comlocation.decode(mylar.SYS_ENCODING)
import_by_comicids.append({
"impid": impid,
"watchmatch": watchmatch,
"displayname": dispname,
"comicname": dispname, #com_NAME,
"comicyear": result_comyear,
"volume": vol_label,
"comfilename": comfilename,
"comlocation": comlocation.decode(mylar.SYS_ENCODING)
})
logger.fdebug('import_by_ids: ' + str(import_by_comicids))
@ -472,11 +472,11 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
if watchfound > 0:
if mylar.IMP_MOVE:
logger.info("You checked off Move Files...so that's what I'm going to do")
logger.info("You checked off Move Files...so that's what I'm going to do")
#check to see if Move Files is enabled.
#if not being moved, set the archive bit.
print("Moving files into appropriate directory")
while (wat < watchfound):
while (wat < watchfound):
watch_the_list = watchchoice['watchlist'][wat]
watch_comlocation = watch_the_list['ComicLocation']
watch_comicid = watch_the_list['ComicID']
@ -485,7 +485,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
watch_comiciss = watch_the_list['ComicIssue']
print ("ComicLocation: " + str(watch_comlocation))
orig_comlocation = watch_the_list['OriginalLocation']
orig_filename = watch_the_list['OriginalFilename']
orig_filename = watch_the_list['OriginalFilename']
print ("Orig. Location: " + str(orig_comlocation))
print ("Orig. Filename: " + str(orig_filename))
#before moving check to see if Rename to Mylar structure is enabled.
@ -493,13 +493,13 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
nfilename = renameit['nfilename']
dst_path = os.path.join(watch_comlocation,nfilename)
dst_path = os.path.join(watch_comlocation, nfilename)
if str(watch_comicid) not in comicids:
comicids.append(watch_comicid)
else:
print("Renaming files not enabled, keeping original filename(s)")
dst_path = os.path.join(watch_comlocation,orig_filename)
dst_path = os.path.join(watch_comlocation, orig_filename)
#os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
#src = os.path.join(, str(nfilename + ext))
@ -523,19 +523,19 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
else:
print("...Existing status: " + str(issuechk['Status']))
control = {"IssueID": issuechk['IssueID']}
values = { "Status": "Archived"}
values = {"Status": "Archived"}
print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ")
myDB.upsert("issues", values, control)
if str(watch_comicid) not in comicids:
comicids.append(watch_comicid)
comicids.append(watch_comicid)
wat+=1
if comicids is None: pass
else:
c_upd = len(comicids)
c = 0
while (c < c_upd ):
while (c < c_upd):
print ("Rescanning.. " + str(c))
updater.forceRescan(c)
updater.forceRescan(c)
if not len(import_by_comicids):
return "Completed"
if len(import_by_comicids) > 0:
@ -548,7 +548,7 @@ def scanLibrary(scan=None, queue=None):
valreturn = []
if scan:
try:
soma,noids = libraryScan()
soma, noids = libraryScan()
except Exception, e:
logger.error('Unable to complete the scan: %s' % e)
return
@ -593,8 +593,8 @@ def scanLibrary(scan=None, queue=None):
# unzip -z filename.cbz will show the comment field of the zip which contains the metadata.
#self.importResults()
valreturn.append({"somevalue" : 'self.ie',
"result" : 'success'})
valreturn.append({"somevalue": 'self.ie',
"result": 'success'})
return queue.put(valreturn)
#raise cherrypy.HTTPRedirect("importResults")

View File

@ -106,7 +106,7 @@ def initLogger(verbose=1):
console_formatter = Formatter('%(asctime)s - %(levelname)s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
console_handler = StreamHandler()
console_handler.setFormatter(console_formatter)
#print 'verbose is ' + str(verbose)
#print 'verbose is ' + str(verbose)
#if verbose == 2:
# console_handler.setLevel(logging.DEBUG)
#else:

View File

@ -43,7 +43,7 @@ if platform.python_version() == '2.7.6':
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
def pullsearch(comicapi,comicquery,offset,explicit,type):
def pullsearch(comicapi, comicquery, offset, explicit, type):
u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
u_comicquery = u_comicquery.replace(" ", "%20")
@ -67,7 +67,7 @@ def pullsearch(comicapi,comicquery,offset,explicit,type):
except urllib2.HTTPError, err:
logger.error('err : ' + str(err))
logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
return
return
#increment CV API counter.
mylar.CVAPI_COUNT +=1
#convert to string:
@ -80,18 +80,18 @@ def pullsearch(comicapi,comicquery,offset,explicit,type):
def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#with mb_lock:
#with mb_lock:
comiclist = []
comicResults = None
comicLibrary = listLibrary()
chars = set('!?*')
if any((c in chars) for c in name):
name = '"'+name+'"'
name = '"' +name +'"'
#print ("limityear: " + str(limityear))
#print ("limityear: " + str(limityear))
if limityear is None: limityear = 'None'
comicquery = name
#comicquery=name.replace(" ", "%20")
@ -122,8 +122,8 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
if type is None:
type = 'volume'
#let's find out how many results we get from the query...
searched = pullsearch(comicapi,comicquery,0,explicit,type)
#let's find out how many results we get from the query...
searched = pullsearch(comicapi, comicquery, 0, explicit, type)
if searched is None: return False
totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
logger.fdebug("there are " + str(totalResults) + " search results...")
@ -133,20 +133,20 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
while (countResults < int(totalResults)):
#logger.fdebug("querying " + str(countResults))
if countResults > 0:
#2012/22/02 - CV API flipped back to offset usage instead of page
#2012/22/02 - CV API flipped back to offset usage instead of page
if explicit == 'all' or explicit == 'loose':
#all / loose uses page for offset
offsetcount = (countResults/100) + 1
offsetcount = (countResults /100) + 1
else:
#explicit uses offset
offsetcount = countResults
searched = pullsearch(comicapi,comicquery,offsetcount,explicit,type)
searched = pullsearch(comicapi, comicquery, offsetcount, explicit, type)
comicResults = searched.getElementsByTagName(type) #('volume')
body = ''
n = 0
n = 0
if not comicResults:
break
break
for result in comicResults:
#retrieve the first xml tag (<tag>data</tag>)
#that the parser finds with name tagName:
@ -155,11 +155,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#call cv.py here to find out issue count in story arc
try:
logger.fdebug('story_arc ascension')
names = len( result.getElementsByTagName('name') )
names = len(result.getElementsByTagName('name'))
n = 0
logger.fdebug('length: ' + str(names))
xmlpub = None #set this incase the publisher field isn't populated in the xml
while ( n < names ):
while (n < names):
logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName)
if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc':
logger.fdebug('yes')
@ -180,11 +180,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
logger.warn('error retrieving story arc search results.')
return
siteurl = len( result.getElementsByTagName('site_detail_url') )
siteurl = len(result.getElementsByTagName('site_detail_url'))
s = 0
logger.fdebug('length: ' + str(names))
xmlurl = None
while ( s < siteurl ):
while (s < siteurl):
logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName)
if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc':
try:
@ -216,15 +216,15 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
try:
logger.fdebug('story_arc ascension')
issuecount = len( arcdom.getElementsByTagName('issue') )
issuecount = len(arcdom.getElementsByTagName('issue'))
issuedom = arcdom.getElementsByTagName('issue')
isc = 0
isc = 0
arclist = ''
for isd in issuedom:
zeline = isd.getElementsByTagName('id')
isdlen = len( zeline )
isdlen = len(zeline)
isb = 0
while ( isb < isdlen):
while (isb < isdlen):
if isc == 0:
arclist = str(zeline[isb].firstChild.wholeText).strip()
else:
@ -240,7 +240,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
try:
firstid = None
arcyear = None
fid = len ( arcdom.getElementsByTagName('id') )
fid = len (arcdom.getElementsByTagName('id'))
fi = 0
while (fi < fid):
if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
@ -253,7 +253,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
if firstid is not None:
firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
logger.fdebug('success')
arcyear = cv.GetFirstIssue(firstid,firstdom)
arcyear = cv.GetFirstIssue(firstid, firstdom)
except:
logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')
@ -271,7 +271,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
except:
xmldeck = "None"
if xmlid in comicLibrary:
haveit = comicLibrary[xmlid]
else:
@ -292,7 +292,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
})
logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist))
else:
xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
#here we can determine what called us, and either start gathering all issues or just limited ones.
@ -311,12 +311,12 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#logger.info('There are : ' + str(xmlcnt) + ' issues in this series.')
#logger.info('The first issue started at # ' + str(xmlfirst))
cnt_numerical = int(xmlcnt) + int(xmlfirst) # (of issues + start of first issue = numerical range)
#logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
#logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
if cnt_numerical >= limiter:
cnl = len ( result.getElementsByTagName('name') )
cnl = len (result.getElementsByTagName('name'))
cl = 0
xmlTag = 'None'
xmlimage = "cache/blankcover.jpg"
@ -336,7 +336,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#logger.info('name:' + str(xmlTag) + ' -- ' + str(xmlYr))
if xmlYr in limityear or limityear == 'None':
xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
idl = len ( result.getElementsByTagName('id') )
idl = len (result.getElementsByTagName('id'))
idt = 0
xmlid = None
while (idt < idl):
@ -347,7 +347,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
if xmlid is None:
logger.error('Unable to figure out the comicid - skipping this : ' + str(xmlurl))
continue
continue
#logger.info('xmlid: ' + str(xmlid))
publishers = result.getElementsByTagName('publisher')
if len(publishers) > 0:
@ -390,9 +390,9 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#logger.fdebug('year: ' + str(xmlYr) + ' - constraint met: ' + str(xmlTag) + '[' + str(xmlYr) + '] --- 4050-' + str(xmlid))
else:
pass
#logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
n+=1
#logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
n+=1
#search results are limited to 100 and by pagination now...let's account for this.
countResults = countResults + 100
return comiclist, explicit

View File

@ -4,7 +4,7 @@ import os
import shutil
def movefiles(comicid,comlocation,ogcname,imported=None):
def movefiles(comicid, comlocation, ogcname, imported=None):
myDB = db.DBConnection()
logger.fdebug('comlocation is : ' + str(comlocation))
logger.fdebug('original comicname is : ' + str(ogcname))
@ -16,17 +16,17 @@ def movefiles(comicid,comlocation,ogcname,imported=None):
srcimp = impr['ComicLocation']
orig_filename = impr['ComicFilename']
orig_iss = impr['impID'].rfind('-')
orig_iss = impr['impID'][orig_iss+1:]
orig_iss = impr['impID'][orig_iss +1:]
logger.fdebug("Issue :" + str(orig_iss))
#before moving check to see if Rename to Mylar structure is enabled.
if mylar.IMP_RENAME and mylar.FILE_FORMAT != '':
logger.fdebug("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
renameit = helpers.rename_param(comicid, impr['ComicName'], orig_iss, orig_filename)
nfilename = renameit['nfilename']
dstimp = os.path.join(comlocation,nfilename)
dstimp = os.path.join(comlocation, nfilename)
else:
logger.fdebug("Renaming files not enabled, keeping original filename(s)")
dstimp = os.path.join(comlocation,orig_filename)
dstimp = os.path.join(comlocation, orig_filename)
logger.info("moving " + str(srcimp) + " ... to " + str(dstimp))
try:
@ -40,11 +40,11 @@ def movefiles(comicid,comlocation,ogcname,imported=None):
if results is not None:
for result in results:
controlValue = {"impID": result['impid']}
newValue = {"Status": "Imported" }
newValue = {"Status": "Imported"}
myDB.upsert("importresults", newValue, controlValue)
return
def archivefiles(comicid,ogcname):
def archivefiles(comicid, ogcname):
myDB = db.DBConnection()
# if move files isn't enabled, let's set all found comics to Archive status :)
result = myDB.select("SELECT * FROM importresults WHERE ComicName=?", [ogcname])
@ -53,5 +53,5 @@ def archivefiles(comicid,ogcname):
ogdir = result['Location']
origdir = os.path.join(os.path.dirname(ogdir))
updater.forceRescan(comicid,archive=origdir) #send to rescanner with archive mode turned on
updater.forceRescan(comicid, archive=origdir) #send to rescanner with archive mode turned on

View File

@ -19,7 +19,7 @@ from mylar import logger
def newpull():
pagelinks = "http://www.previewsworld.com/Home/1/1/71/952"
pageresponse = urllib2.urlopen ( pagelinks )
pageresponse = urllib2.urlopen (pagelinks)
soup = BeautifulSoup (pageresponse)
getthedate = soup.findAll("div", {"class": "Headline"})[0]
#the date will be in the FIRST ahref
@ -42,7 +42,7 @@ def newpull():
endthis = False
pull_list = []
publishers = {'914':'DARK HORSE COMICS', '915':'DC COMICS', '916':'IDW PUBLISHING', '917':'IMAGE COMICS', '918':'MARVEL COMICS', '952':'COMICS & GRAPHIC NOVELS'}
publishers = {'914': 'DARK HORSE COMICS', '915': 'DC COMICS', '916': 'IDW PUBLISHING', '917': 'IMAGE COMICS', '918': 'MARVEL COMICS', '952': 'COMICS & GRAPHIC NOVELS'}
while (x < lenlinks):
headt = cntlinks[x] #iterate through the hrefs pulling out only results.
@ -64,7 +64,7 @@ def newpull():
if issue_lk == -1:
continue
#headName = headt.findNext(text=True)
publisher_id = issue_link[issue_lk-3:issue_lk]
publisher_id = issue_link[issue_lk -3:issue_lk]
for pub in publishers:
if pub == publisher_id:
isspublisher = publishers[pub]
@ -85,7 +85,7 @@ def newpull():
"name": found_iss[1].findNext(text=True),
"price": found_iss[2],
"publisher": isspublisher,
"ID" : urlID})
"ID": urlID})
if endthis == True: break
x+=1
@ -95,7 +95,7 @@ def newpull():
try:
csvfile = open(str(except_file), 'rb')
csvfile.close()
except (OSError,IOError):
except (OSError, IOError):
logger.fdebug('file does not exist - continuing.')
else:
logger.fdebug('file exists - removing.')
@ -112,10 +112,10 @@ def newpull():
exceptln = pl['publisher'] + "\n" + str(pl['ID']) + "\t" + str(pl['name']) + "\t" + str(pl['price'])
for lb in breakhtml:
exceptln = re.sub(lb,'', exceptln).strip()
exceptln = re.sub(lb, '', exceptln).strip()
exceptline = exceptln.decode('utf-8','ignore')
f.write('%s\n' % (exceptline.encode('ascii','replace').strip()))
exceptline = exceptln.decode('utf-8', 'ignore')
f.write('%s\n' % (exceptline.encode('ascii', 'replace').strip()))
oldpub = pl['publisher']

View File

@ -37,7 +37,7 @@ class PROWL:
def __init__(self):
self.enabled = mylar.PROWL_ENABLED
self.keys = mylar.PROWL_KEYS
self.priority = mylar.PROWL_PRIORITY
self.priority = mylar.PROWL_PRIORITY
pass
def conf(self, options):
@ -52,12 +52,12 @@ class PROWL:
module += '[NOTIFIER]'
http_handler = HTTPSConnection("api.prowlapp.com")
data = {'apikey': mylar.PROWL_KEYS,
'application': 'Mylar',
'event': event,
'description': message.encode("utf-8"),
'priority': mylar.PROWL_PRIORITY }
'priority': mylar.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
@ -69,7 +69,7 @@ class PROWL:
if request_status == 200:
logger.info(module + ' Prowl notifications sent.')
return True
elif request_status == 401:
elif request_status == 401:
logger.info(module + ' Prowl auth failed: %s' % response.reason)
return False
else:
@ -87,19 +87,19 @@ class PROWL:
self.priority = priority
self.notify('ZOMG Lazors Pewpewpew!', 'Test Message')
class NMA:
def __init__(self):
self.apikey = mylar.NMA_APIKEY
self.priority = mylar.NMA_PRIORITY
def _send(self, data, module):
url_data = urllib.urlencode(data)
url = 'https://www.notifymyandroid.com/publicapi/notify'
req = urllib2.Request(url, url_data)
try:
@ -109,18 +109,18 @@ class NMA:
return
response = handle.read().decode(mylar.SYS_ENCODING)
return response
return response
def notify(self, snline=None, prline=None, prline2=None, snatched_nzb=None, sent_to=None, prov=None, module=None):
if module is None:
module = ''
module += '[NOTIFIER]'
module += '[NOTIFIER]'
apikey = self.apikey
priority = self.priority
if snatched_nzb:
if snatched_nzb[-1] == '\.': snatched_nzb = snatched_nzb[:-1]
event = snline
@ -128,14 +128,14 @@ class NMA:
else:
event = prline
description = prline2
data = { 'apikey': apikey, 'application':'Mylar', 'event': event, 'description': description, 'priority': priority}
data = {'apikey': apikey, 'application': 'Mylar', 'event': event, 'description': description, 'priority': priority}
logger.info(module + ' Sending notification request to NotifyMyAndroid')
request = self._send(data,module)
request = self._send(data, module)
if not request:
logger.warn(module + ' Error sending notification request to NotifyMyAndroid')
logger.warn(module + ' Error sending notification request to NotifyMyAndroid')
# 2013-04-01 Added Pushover.net notifications, based on copy of Prowl class above.
# No extra care has been put into API friendliness at the moment (read: https://pushover.net/api#friendly)
@ -154,9 +154,9 @@ class PUSHOVER:
# device - option for specifying which of your registered devices Mylar should send to. No option given, it sends to all devices on Pushover (default)
# URL / URL_TITLE (both for use with the COPS/OPDS server I'm building maybe?)
# Sound - name of soundfile to override default sound choice
# not sure if this is needed for Pushover
#def conf(self, options):
# return cherrypy.config['config'].get('Pushover', options)
@ -168,12 +168,12 @@ class PUSHOVER:
module += '[NOTIFIER]'
http_handler = HTTPSConnection("api.pushover.net:443")
data = {'token': mylar.PUSHOVER_APIKEY,
'user': mylar.PUSHOVER_USERKEY,
'message': message.encode("utf-8"),
'title': event,
'priority': mylar.PUSHOVER_PRIORITY }
'priority': mylar.PUSHOVER_PRIORITY}
http_handler.request("POST",
"/1/messages.json",
@ -210,7 +210,7 @@ class PUSHOVER:
class BOXCAR:
#new BoxCar2 API
#new BoxCar2 API
def __init__(self):
self.url = 'https://new.boxcar.io/api/notifications'
@ -299,7 +299,7 @@ class PUSHBULLET:
if module is None:
module = ''
module += '[NOTIFIER]'
http_handler = HTTPSConnection("api.pushbullet.com")
if method == 'GET':
@ -323,7 +323,7 @@ class PUSHBULLET:
data = {'type': "note", #'device_iden': self.deviceid,
'title': event.encode('utf-8'), #"mylar",
'body': message.encode('utf-8') }
'body': message.encode('utf-8')}
http_handler.request("POST",
"/v2/pushes",

View File

@ -14,14 +14,14 @@
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from bs4 import BeautifulSoup, UnicodeDammit
import urllib2
import re
import helpers
import logger
import datetime
from bs4 import BeautifulSoup, UnicodeDammit
import urllib2
import re
import helpers
import logger
import datetime
import sys
from decimal import Decimal
from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
import mylar
@ -42,10 +42,10 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
comicnm_1 = re.sub('\+', '%2B', comicnm)
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
@ -59,46 +59,46 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
n_odd = -1
n_even = -1
n = 0
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class" : "listing_even"})[n_even]
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
#print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a',href=True)[1]
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
#print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ','')
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('','')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ','')
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
#print ( "Year: " + str(resultYear[n]) )
#print ( "Issues: " + str(resultIssues[n]) )
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub(' ', '', CleanResultName).lower()
#print ("CleanComicName: " + str(CleanComicName))
#print ("CleanResultName: " + str(CleanResultName))
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
#this has been seen in a few instances already, so trying to adjust.
#when the series year is 2011, in gcd it might be 2012 due to publication
#dates overlapping between Dec/11 and Jan/12. Let's accept a match with a
#dates overlapping between Dec/11 and Jan/12. Let's accept a match with a
#1 year grace space, and then pull in the first issue to see the actual pub
# date and if coincides with the other date..match it.
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear)+1):
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
#print ("n:" + str(n) + "...matched by year to Mylar!")
#print ( "Year: " + str(resultYear[n]) )
#Occasionally there are discrepancies in comic count between
@ -106,11 +106,11 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
#as GCD does. Therefore, let's increase the CV count by 1 to get it
#to match, any more variation could cause incorrect matching.
#ie. witchblade on GCD says 159 issues, CV states 161.
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total)+1 or (int(resultIssues[n])+1) == int(Total):
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total) +1 or (int(resultIssues[n]) +1) == int(Total):
#print ("initial issue match..continuing.")
if int(resultIssues[n]) == int(Total)+1:
if int(resultIssues[n]) == int(Total) +1:
issvariation = "cv"
elif int(resultIssues[n])+1 == int(Total):
elif int(resultIssues[n]) +1 == int(Total):
issvariation = "gcd"
else:
issvariation = "no"
@ -122,13 +122,13 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
resultPublished = rptxt.findNext(text=True)
#print ("Series Published: " + str(resultPublished))
break
n+=1
# it's possible that comicvine would return a comic name incorrectly, or gcd
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':11,'twelve':12}
basnumbs = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
@ -142,8 +142,8 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
break
if ComicName.lower().startswith('the '):
ComicName = ComicName[4:]
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if ':' in ComicName:
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if ':' in ComicName:
ComicName = re.sub(':', '', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if '-' in ComicName:
@ -151,7 +151,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if 'and' in ComicName.lower():
ComicName = ComicName.replace('and', '&')
return GCDScraper(ComicName, ComicYear, Total, ComicID)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if not quickmatch: return 'No Match'
#vari_loop = 0
if quickmatch == "yes":
@ -189,12 +189,12 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
resultURL = boong['comseriesID']
ComicID = boong['comicid']
TotalIssues+= int(boong['comseriesIssues'])
else:
else:
resultURL = resultURL
# if we're here - it means it's a mismatched name.
# let's pull down the publication date as it'll be blank otherwise
inputMIS = 'http://www.comics.org' + str(resultURL)
resp = urllib2.urlopen ( inputMIS )
resp = urllib2.urlopen (inputMIS)
# soup = BeautifulSoup ( resp )
try:
soup = BeautifulSoup(urllib2.urlopen(inputMIS))
@ -207,22 +207,22 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
logger.info("not working...aborting. Tell Evilhero.")
return
#If CV doesn't have the Series Year (Stupid)...Let's store the Comics.org stated year just in case.
pyearit = soup.find("div", {"class" : "item_data"})
pyearit = soup.find("div", {"class": "item_data"})
pyeartxt = pyearit.find(text=re.compile(r"Series"))
pyearst = pyeartxt.index('Series')
ParseYear = pyeartxt[int(pyearst)-5:int(pyearst)]
ParseYear = pyeartxt[int(pyearst) -5:int(pyearst)]
parsed = soup.find("div", {"id" : "series_data"})
parsed = soup.find("div", {"id": "series_data"})
#recent structure changes - need to adjust now
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
subtxt3 = parsed.find("dd", {"id": "publication_dates"})
resultPublished = subtxt3.findNext(text=True).rstrip()
#print ("pubdate:" + str(resultPublished))
parsfind = parsed.findAll("dt", {"class" : "long"})
parsfind = parsed.findAll("dt", {"class": "long"})
seriesloop = len(parsfind)
resultFormat = ''
for pf in parsfind:
if 'Publishing Format:' in pf.findNext(text=True):
subtxt9 = pf.find("dd", {"id" : "series_format"})
subtxt9 = pf.find("dd", {"id": "series_format"})
resultFormat = subtxt9.findNext(text=True).rstrip()
continue
# the caveat - if a series is ongoing but only has 1 issue published at a particular point in time,
@ -233,11 +233,11 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
resultPublished = resultPublished + " - Present"
if 'limited series' in resultFormat.lower() and '?' in resultPublished:
resultPublished = resultPublished + " (Limited Series)"
coverst = soup.find("div", {"id" : "series_cover"})
if coverst < 0:
coverst = soup.find("div", {"id": "series_cover"})
if coverst < 0:
gcdcover = "None"
else:
subcoverst = coverst('img',src=True)[0]
subcoverst = coverst('img', src=True)[0]
gcdcover = subcoverst['src']
#print ("resultURL:" + str(resultURL))
@ -258,8 +258,8 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
#print ("pub date defaulting")
datetype = "on-sale"
cnt1 = len(soup.findAll("tr", {"class" : "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class" : "row_even_True"}))
cnt1 = len(soup.findAll("tr", {"class": "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class": "row_even_True"}))
cnt = int(cnt1 + cnt2)
@ -271,19 +271,19 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
PI = "1.00"
altcount = 0
PrevYRMO = "0000-00"
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_odd+=1
parsed = soup.findAll("tr", {"class" : "row_even_False"})[n_odd]
parsed = soup.findAll("tr", {"class": "row_even_False"})[n_odd]
ntype = "odd"
else:
n_even+=1
ntype = "even"
parsed = soup.findAll("tr", {"class" : "row_even_True"})[n_even]
parsed = soup.findAll("tr", {"class": "row_even_True"})[n_even]
subtxt3 = parsed.find("a")
ParseIssue = subtxt3.findNext(text=True)
fid = parsed('a',href=True)[0]
fid = parsed('a', href=True)[0]
resultGID = fid['href']
resultID = resultGID[7:-1]
@ -291,14 +291,14 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
variant="no"
if 'Vol' in ParseIssue or '[' in ParseIssue or 'a' in ParseIssue or 'b' in ParseIssue or 'c' in ParseIssue:
m = re.findall('[^\[\]]+', ParseIssue)
# ^^ takes care of []
# ^^ takes care of []
# if it's a decimal - variant ...whoo-boy is messed.
if '.' in m[0]:
dec_chk = m[0]
#if it's a digit before and after decimal, assume decimal issue
dec_st = dec_chk.find('.')
dec_b4 = dec_chk[:dec_st]
dec_ad = dec_chk[dec_st+1:]
dec_ad = dec_chk[dec_st +1:]
dec_ad = re.sub("\s", "", dec_ad)
if dec_b4.isdigit() and dec_ad.isdigit():
#logger.fdebug("Alternate decimal issue...*Whew* glad I caught that")
@ -307,10 +307,10 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
#logger.fdebug("it's a decimal, but there's no digits before or after decimal")
#not a decimal issue, drop it down to the regex below.
ParseIssue = re.sub("[^0-9]", " ", dec_chk)
else:
else:
ParseIssue = re.sub("[^0-9]", " ", m[0])
# ^^ removes everything but the digits from the remaining non-brackets
logger.fdebug("variant cover detected : " + str(ParseIssue))
variant="yes"
altcount = 1
@ -334,7 +334,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
if '.' in isschk:
isschk_find = isschk.find('.')
isschk_b4dec = isschk[:isschk_find]
isschk_decval = isschk[isschk_find+1:]
isschk_decval = isschk[isschk_find +1:]
#logger.fdebug("decimal detected for " + str(isschk))
#logger.fdebug("isschk_decval is " + str(isschk_decval))
if len(isschk_decval) == 1:
@ -350,10 +350,10 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
if variant == "yes":
#logger.fdebug("alternate cover detected - skipping/ignoring.")
altcount = 1
# in order to get the compare right, let's decimialize the string to '.00'.
# if halfchk == "yes": pass
# else:
# else:
# ParseIssue = ParseIssue + isschk_decval
datematch="false"
@ -381,9 +381,9 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
gcdinfo['ComicIssue'] = ParseIssue
#--- let's use pubdate.
#try publicationd date first
ParseDate = GettheDate(parsed,PrevYRMO)
ParseDate = ParseDate.replace(' ','')
ParseDate = GettheDate(parsed, PrevYRMO)
ParseDate = ParseDate.replace(' ', '')
PrevYRMO = ParseDate
gcdinfo['ComicDate'] = ParseDate
#^^ will retrieve date #
@ -405,7 +405,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
gcdinfo['gcdchoice'] = gcdchoice
altcount = 0
altcount = 0
n+=1
i+=1
gcdinfo['gcdvariation'] = issvariation
@ -420,10 +420,10 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
return gcdinfo
## -- end (GCD) -- ##
def GettheDate(parsed,PrevYRMO):
def GettheDate(parsed, PrevYRMO):
#--- let's use pubdate.
#try publicationd date first
#logger.fdebug("parsed:" + str(parsed))
#logger.fdebug("parsed:" + str(parsed))
subtxt1 = parsed('td')[1]
ParseDate = subtxt1.findNext(text=True).rstrip()
pformat = 'pub'
@ -432,32 +432,32 @@ def GettheDate(parsed,PrevYRMO):
ParseDate = subtxt1.findNext(text=True)
pformat = 'on-sale'
if len(ParseDate) < 7: ParseDate = '0000-00' #invalid on-sale date format , drop it 0000-00 to avoid errors
basmonths = {'january':'01','february':'02','march':'03','april':'04','may':'05','june':'06','july':'07','august':'08','september':'09','october':'10','november':'11','december':'12'}
basmonths = {'january': '01', 'february': '02', 'march': '03', 'april': '04', 'may': '05', 'june': '06', 'july': '07', 'august': '08', 'september': '09', 'october': '10', 'november': '11', 'december': '12'}
pdlen = len(ParseDate)
pdfind = ParseDate.find(' ',2)
pdfind = ParseDate.find(' ', 2)
#logger.fdebug("length: " + str(pdlen) + "....first space @ pos " + str(pdfind))
#logger.fdebug("this should be the year: " + str(ParseDate[pdfind+1:pdlen-1]))
if pformat == 'on-sale': pass # date is in correct format...
else:
if ParseDate[pdfind+1:pdlen-1].isdigit():
if ParseDate[pdfind +1:pdlen -1].isdigit():
#assume valid date.
#search for number as text, and change to numeric
for numbs in basmonths:
if numbs in ParseDate.lower():
pconv = basmonths[numbs]
ParseYear = re.sub('/s','',ParseDate[-5:])
ParseYear = re.sub('/s', '', ParseDate[-5:])
ParseDate = str(ParseYear) + "-" + str(pconv)
#logger.fdebug("!success - Publication date: " + str(ParseDate))
break
# some comics are messed with pub.dates and have Spring/Summer/Fall/Winter
else:
baseseasons = {'spring':'03','summer':'06','fall':'09','winter':'12'}
baseseasons = {'spring': '03', 'summer': '06', 'fall': '09', 'winter': '12'}
for seas in baseseasons:
if seas in ParseDate.lower():
sconv = baseseasons[seas]
ParseYear = re.sub('/s','',ParseDate[-5:])
ParseYear = re.sub('/s', '', ParseDate[-5:])
ParseDate = str(ParseYear) + "-" + str(sconv)
break
break
# #try key date
# subtxt1 = parsed('td')[2]
# ParseDate = subtxt1.findNext(text=True)
@ -493,22 +493,22 @@ def GCDAdd(gcdcomicid):
logger.fdebug("looking at gcdid:" + str(gcdid))
input2 = 'http://www.comics.org/series/' + str(gcdid)
logger.fdebug("---url: " + str(input2))
resp = urllib2.urlopen ( input2 )
soup = BeautifulSoup ( resp )
resp = urllib2.urlopen (input2)
soup = BeautifulSoup (resp)
logger.fdebug("SeriesName section...")
parsen = soup.find("span", {"id" : "series_name"})
parsen = soup.find("span", {"id": "series_name"})
#logger.fdebug("series name (UNPARSED): " + str(parsen))
subpar = parsen('a')[0]
resultName = subpar.findNext(text=True)
logger.fdebug("ComicName: " + str(resultName))
#covers-start
logger.fdebug("Covers section...")
coverst = soup.find("div", {"id" : "series_cover"})
coverst = soup.find("div", {"id": "series_cover"})
if coverst < 0:
gcdcover = "None"
logger.fdebug("unable to find any covers - setting to None")
else:
subcoverst = coverst('img',src=True)[0]
subcoverst = coverst('img', src=True)[0]
#logger.fdebug("cover (UNPARSED) : " + str(subcoverst))
gcdcover = subcoverst['src']
logger.fdebug("Cover: " + str(gcdcover))
@ -516,27 +516,27 @@ def GCDAdd(gcdcomicid):
#publisher start
logger.fdebug("Publisher section...")
try:
pubst = soup.find("div", {"class" : "item_data"})
pubst = soup.find("div", {"class": "item_data"})
catchit = pubst('a')[0]
except (IndexError, TypeError):
pubst = soup.findAll("div", {"class" : "left"})[1]
pubst = soup.findAll("div", {"class": "left"})[1]
catchit = pubst.find("a")
publisher = catchit.findNext(text=True)
logger.fdebug("Publisher: " + str(publisher))
#publisher end
parsed = soup.find("div", {"id" : "series_data"})
parsed = soup.find("div", {"id": "series_data"})
#logger.fdebug("series_data: " + str(parsed))
#print ("parse:" + str(parsed))
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
subtxt3 = parsed.find("dd", {"id": "publication_dates"})
#logger.fdebug("publication_dates: " + str(subtxt3))
pubdate = subtxt3.findNext(text=True).rstrip()
logger.fdebug("pubdate:" + str(pubdate))
subtxt4 = parsed.find("dd", {"id" : "issues_published"})
subtxt4 = parsed.find("dd", {"id": "issues_published"})
noiss = subtxt4.findNext(text=True)
lenwho = len(noiss)
lent = noiss.find(' ',2)
lent = noiss.find(' ', 2)
lenf = noiss.find('(')
stringit = noiss[lenf:lenwho]
stringout = noiss[:lent]
@ -547,12 +547,12 @@ def GCDAdd(gcdcomicid):
serieschoice.append({
"ComicID": gcdid,
"ComicName": resultName,
"ComicYear" : pubdate,
"ComicIssues" : noissues,
"ComicPublisher" : publisher,
"ComicCover" : gcdcover
})
series['serieschoice'] = serieschoice
"ComicYear": pubdate,
"ComicIssues": noissues,
"ComicPublisher": publisher,
"ComicCover": gcdcover
})
series['serieschoice'] = serieschoice
return series
@ -582,14 +582,14 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
# take the 1st word ;)
#comicpub = comicpub.split()[0]
# if it's not one of the BIG publisher's it might fail - so let's increase the odds.
pubbiggies = [ 'DC',
pubbiggies = ['DC',
'Marvel',
'Image',
'IDW' ]
'IDW']
uhuh = "no"
for pb in pubbiggies:
if pb in comicpub:
#keep publisher in url if a biggie.
#keep publisher in url if a biggie.
uhuh = "yes"
#print (" publisher match : " + str(comicpub))
conv_pub = comicpub.split()[0]
@ -624,10 +624,10 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
if uhuh == "no":
publink = "&pub_name="
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&keywords=&order1=series&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31' + '&title=&feature=&job_number=&pages=&script=&pencils=&inks=&colors=&letters=&story_editing=&genre=&characters=&synopsis=&reprint_notes=&story_reprinted=None&notes=' + str(publink) + '&pub_notes=&brand=&brand_notes=&indicia_publisher=&is_surrogate=None&ind_pub_notes=&series=' + str(comicnm) + '&series_year_began=&series_notes=&tracking_notes=&issue_count=&is_comics=None&format=&color=&dimensions=&paper_stock=&binding=&publishing_format=&issues=&volume=&issue_title=&variant_name=&issue_date=&indicia_frequency=&price=&issue_pages=&issue_editing=&isbn=&barcode=&issue_notes=&issue_reprinted=None&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
# print ("cnt1: " + str(cnt1))
@ -643,13 +643,13 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
n_odd = -1
n_even = -1
n = 0
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class" : "listing_even"})[n_even]
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
rtpit = rtp.findNext(text=True)
rtpthis = rtpit.encode('utf-8').strip()
@ -663,19 +663,19 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
resultPublisher.append(pubthis)
# print ( "Publisher: " + str(resultPublisher[n]) )
fip = resultp('a',href=True)[1]
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
# print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ','')
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('','')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ','')
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
# print ( "Year: " + str(resultYear[n]) )
# print ( "Issues: " + str(resultIssues[n]) )
# print ("comchkchoice: " + str(comchkchoice))
@ -685,18 +685,18 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
"ComicID": str(comicid),
"ComicName": resultName[n],
"GCDID": str(resultID[n]).split('/')[2],
"ComicYear" : str(resultYear[n]),
"ComicPublisher" : resultPublisher[n],
"ComicURL" : "http://www.comics.org" + str(resultID[n]),
"ComicIssues" : str(resultIssues[n])
"ComicYear": str(resultYear[n]),
"ComicPublisher": resultPublisher[n],
"ComicURL": "http://www.comics.org" + str(resultID[n]),
"ComicIssues": str(resultIssues[n])
})
#else:
#print ( str(resultID[n]) + " already in DB...skipping" )
#print ( str(resultID[n]) + " already in DB...skipping" )
n+=1
cr+=1
totalcount= totalcount + cnt
comchoice['comchkchoice'] = comchkchoice
return comchoice, totalcount
return comchoice, totalcount
def decode_html(html_string):
converted = UnicodeDammit(html_string)
@ -721,10 +721,10 @@ def annualCheck(gcomicid, comicid, comicname, comicyear):
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyear) + '-01-01&end_date=' + str(comicyear) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
@ -738,33 +738,33 @@ def annualCheck(gcomicid, comicid, comicname, comicyear):
n_odd = -1
n_even = -1
n = 0
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class" : "listing_even"})[n_even]
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
rtp1 = re.sub('Annual', '', rtp)
resultName.append(helpers.cleanName(rtp1.findNext(text=True)))
print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a',href=True)[1]
print ("Comic Name: " + str(resultName[n]))
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
print ( "ID: " + str(resultID[n]) )
print ("ID: " + str(resultID[n]))
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ','')
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('','')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ','')
print ( "Year: " + str(resultYear[n]) )
print ( "Issues: " + str(resultIssues[n]) )
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
print ("Year: " + str(resultYear[n]))
print ("Issues: " + str(resultIssues[n]))
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
@ -775,9 +775,9 @@ def annualCheck(gcomicid, comicid, comicname, comicyear):
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear)+1):
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
print ("n:" + str(n) + "...matched by year to Mylar!")
print ( "Year: " + str(resultYear[n]) )
print ("Year: " + str(resultYear[n]))
TotalIssues = resultIssues[n]
resultURL = str(resultID[n])
rptxt = resultp('td')[6]

View File

@ -217,7 +217,7 @@ class Readinglist(object):
cmd = shlex.split(cmdstring)
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError,e:
except subprocess.CalledProcessError, e:
logger.info(module + ' The host {0} is not Reachable at this time.'.format(cmd[-1]))
return
else:

View File

@ -29,7 +29,7 @@ def _start_newznab_attr(self, attrsD):
feedparser._FeedParserMixin._start_newznab_attr = _start_newznab_attr
def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
if pickfeed is None:
return
@ -69,14 +69,14 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
katinfo = {}
while (lp < loopit):
if lp == 0 and loopit == 2:
if lp == 0 and loopit == 2:
pickfeed = '2'
elif lp == 1 and loopit == 2:
pickfeed = '5'
elif lp == 1 and loopit == 2:
pickfeed = '5'
feedtype = None
if pickfeed == "1" and mylar.ENABLE_32P: # 32pages new releases feed.
if pickfeed == "1" and mylar.ENABLE_32P: # 32pages new releases feed.
feed = 'https://32pag.es/feeds.php?feed=torrents_all&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey']
feedtype = ' from the New Releases RSS Feed for comics'
elif pickfeed == "2" and srchterm is not None: # kat.ph search
@ -85,7 +85,7 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases RSS Feed for comics'
elif pickfeed == "4": #32p search
if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ):
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
lp=+1
continue
@ -97,28 +97,28 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
#searchresults = searchit.authenticate()
#logger.info('search results: ' + str(searchresults))
return
elif pickfeed == "5" and srchterm is not None: # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized)
elif pickfeed == "5" and srchterm is not None: # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized)
feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Aother%20seeds%3A1/?rss=1"
elif pickfeed == "6": # kat.ph rss feed (category:other so that we can get them quicker if need-be)
feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases for category Other RSS Feed that contain comics'
elif int(pickfeed) >=7 and feedinfo is not None:
feedtype = ' from the New Releases for category Other RSS Feed that contain comics'
elif int(pickfeed) >= 7 and feedinfo is not None:
#personal 32P notification feeds.
#get the info here
feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname']
feedtype = ' from your Personal Notification Feed : ' + feedinfo['feedname']
else:
logger.error('invalid pickfeed denoted...')
return
#logger.info('feed URL: ' + str(feed))
feedme = feedparser.parse(feed)
if pickfeed == "3" or pickfeed == "6" or pickfeed == "2" or pickfeed == "5":
picksite = 'KAT'
elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7:
elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7:
picksite = '32P'
i = 0
@ -127,12 +127,12 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
if pickfeed == "3" or pickfeed == "6":
tmpsz = feedme.entries[i].enclosures[0]
feeddata.append({
'site': picksite,
'title': feedme.entries[i].title,
'link': tmpsz['url'],
'pubdate': feedme.entries[i].updated,
'size': tmpsz['length']
})
'site': picksite,
'title': feedme.entries[i].title,
'link': tmpsz['url'],
'pubdate': feedme.entries[i].updated,
'size': tmpsz['length']
})
elif pickfeed == "2" or pickfeed == "5":
tmpsz = feedme.entries[i].enclosures[0]
@ -143,21 +143,21 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
'pubdate': feedme.entries[i].updated,
'size': tmpsz['length']
})
elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7:
if pickfeed == "1" or int(pickfeed) > 7:
tmpdesc = feedme.entries[i].description
st_pub = feedme.entries[i].title.find('(')
st_end = feedme.entries[i].title.find(')')
pub = feedme.entries[i].title[st_pub+1:st_end] # +1 to not include (
pub = feedme.entries[i].title[st_pub +1:st_end] # +1 to not include (
#logger.fdebug('publisher: ' + re.sub("'",'', pub).strip()) #publisher sometimes is given within quotes for some reason, strip 'em.
vol_find = feedme.entries[i].title.find('vol.')
series = feedme.entries[i].title[st_end+1:vol_find].strip()
series = feedme.entries[i].title[st_end +1:vol_find].strip()
#logger.fdebug('series title: ' + series)
iss_st = feedme.entries[i].title.find(' - ', vol_find)
vol = re.sub('\.', '', feedme.entries[i].title[vol_find:iss_st]).strip()
#logger.fdebug('volume #: ' + str(vol))
issue = feedme.entries[i].title[iss_st+3:].strip()
issue = feedme.entries[i].title[iss_st +3:].strip()
#logger.fdebug('issue # : ' + str(issue))
#break it down to get the Size since it's available on THIS 32P feed only so far.
@ -172,18 +172,18 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
if '.' in fdigits:
decfind = fdigits.find('.')
wholenum = fdigits[:decfind]
decnum = fdigits[decfind+1:]
decnum = fdigits[decfind +1:]
else:
wholenum = fdigits
decnum = 0
if 'MB' in tmpsize:
wholebytes = int(wholenum) * 1048576
wholedecimal = ( int(decnum) * 1048576 ) / 100
wholedecimal = (int(decnum) * 1048576) / 100
justdigits = wholebytes + wholedecimal
else:
#it's 'GB' then
wholebytes = ( int(wholenum) * 1024 ) * 1048576
wholedecimal = ( ( int(decnum) * 1024 ) * 1048576 ) / 100
wholebytes = (int(wholenum) * 1024) * 1048576
wholedecimal = ((int(decnum) * 1024) * 1048576) / 100
justdigits = wholebytes + wholedecimal
#this is not currently working for 32p
#Get the # of seeders.
@ -201,12 +201,13 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
if int(mylar.MINSEEDS) >= int(seeddigits):
link = feedme.entries[i].link
linkst = link.find('&id')
linken = link.find('&', linkst+1)
if linken == -1: linken = len(link)
newlink = re.sub('&id=','', link[linkst:linken]).strip()
linken = link.find('&', linkst +1)
if linken == -1:
linken = len(link)
newlink = re.sub('&id=', '', link[linkst:linken]).strip()
feeddata.append({
'site': picksite,
'title': series.lstrip() + ' ' + vol + ' #' + issue,
'title': series.lstrip() + ' ' + vol + ' #' + issue,
'volume': vol, # not stored by mylar yet.
'issue': issue, # not stored by mylar yet.
'link': newlink, #just the id for the torrent
@ -214,7 +215,7 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
'size': justdigits
})
i+=1
i += 1
if feedtype is None:
logger.info('[' + picksite + '] there were ' + str(i) + ' results..')
@ -222,11 +223,10 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
logger.info('[' + picksite + '] there were ' + str(i) + ' results' + feedtype)
totalcount += i
lp +=1
lp += 1
if not seriesname:
rssdbupdate(feeddata,totalcount,'torrent')
rssdbupdate(feeddata, totalcount, 'torrent')
else:
katinfo['entries'] = torthekat
return katinfo
@ -298,7 +298,7 @@ def nzbs(provider=None, forcerss=False):
else:
# dognzb, nzb.su, newznab
link = entry.link
#Remove the API keys from the url to allow for possible api key changes
if site == 'dognzb':
link = re.sub(mylar.DOGNZB_APIKEY, '', link).strip()
@ -325,7 +325,7 @@ def nzbs(provider=None, forcerss=False):
rssdbupdate(feeddata, i, 'usenet')
return
def rssdbupdate(feeddata,i,type):
def rssdbupdate(feeddata, i, type):
rsschktime = 15
myDB = db.DBConnection()
@ -340,7 +340,7 @@ def rssdbupdate(feeddata,i,type):
newlink = dataval['link']
else:
#store the hash/id from KAT
newlink = os.path.basename(re.sub('.torrent','', dataval['link'][:dataval['link'].find('?title')]))
newlink = os.path.basename(re.sub('.torrent', '', dataval['link'][:dataval['link'].find('?title')]))
newVal = {"Link": newlink,
"Pubdate": dataval['pubdate'],
@ -356,12 +356,13 @@ def rssdbupdate(feeddata,i,type):
"Size": dataval['Size']}
ctrlVal = {"Title": dataval['Title']}
myDB.upsert("rssdb", newVal,ctrlVal)
myDB.upsert("rssdb", newVal, ctrlVal)
logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.RSS_CHECKINTERVAL) + ' minutes')
return
def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
myDB = db.DBConnection()
seriesname_alt = None
if comicid is None or comicid == 'None':
@ -376,19 +377,18 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
seriesname = snm['ComicName']
seriesname_alt = snm['AlternateSearch']
#remove 'and' and 'the':
tsearch_rem1 = re.sub("\\band\\b", "%", seriesname.lower())
tsearch_rem2 = re.sub("\\bthe\\b", "%", tsearch_rem1.lower())
tsearch_rem2 = re.sub("\\bthe\\b", "%", tsearch_rem1.lower())
tsearch_removed = re.sub('\s+', ' ', tsearch_rem2)
tsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\-\;\/\\=\?\&\.\s]', '%',tsearch_removed)
tsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\-\;\/\\=\?\&\.\s]', '%', tsearch_removed)
if mylar.PREFERRED_QUALITY == 0:
tsearch = tsearch_seriesname + "%"
elif mylar.PREFERRED_QUALITY == 1:
tsearch = tsearch_seriesname + "%cbr%"
elif mylar.PREFERRED_QUALITY == 2:
tsearch = tsearch_seriesname + "%cbz%"
else:
else:
tsearch = tsearch_seriesname + "%"
logger.fdebug('tsearch : ' + tsearch)
@ -412,7 +412,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
AS_Alternate = seriesname_alt
AS_Alt.append(seriesname_alt)
for calt in chkthealt:
AS_Alter = re.sub('##','',calt)
AS_Alter = re.sub('##', '', calt)
u_altsearchcomic = AS_Alter.encode('ascii', 'ignore').strip()
AS_Altrem = re.sub("\\band\\b", "", u_altsearchcomic.lower())
AS_Altrem = re.sub("\\bthe\\b", "", AS_Altrem.lower())
@ -420,7 +420,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
AS_Alternate = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\+\'\&\?\@\s]', '%', AS_Altrem)
AS_Altrem_mod = re.sub('[\&]', ' ', AS_Altrem)
AS_formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',AS_Altrem_mod)
AS_formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '', AS_Altrem_mod)
AS_formatrem_seriesname = re.sub('\s+', ' ', AS_formatrem_seriesname)
if AS_formatrem_seriesname[:1] == ' ': AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
AS_Alt.append(AS_formatrem_seriesname)
@ -479,13 +479,13 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
titletemp = re.sub('cbz', '', titletemp)
titletemp = re.sub('none', '', titletemp)
if i == 0:
if i == 0:
rebuiltline = titletemp
else:
rebuiltline = rebuiltline + ' (' + titletemp + ')'
i+=1
if ext_check == False:
if ext_check == False:
continue
logger.fdebug('rebuiltline is :' + rebuiltline)
@ -499,14 +499,14 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
seriesname_mod = re.sub('[\&]', ' ', seriesname_mod)
foundname_mod = re.sub('[\&]', ' ', foundname_mod)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.]', '',seriesname_mod)
formatrem_seriesname = re.sub('[\-]', ' ',formatrem_seriesname)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.]', '', seriesname_mod)
formatrem_seriesname = re.sub('[\-]', ' ', formatrem_seriesname)
formatrem_seriesname = re.sub('[\/]', ' ', formatrem_seriesname) #not necessary since seriesname in a torrent file won't have /
formatrem_seriesname = re.sub('\s+', ' ', formatrem_seriesname)
if formatrem_seriesname[:1] == ' ': formatrem_seriesname = formatrem_seriesname[1:]
formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.]', '',foundname_mod)
formatrem_torsplit = re.sub('[\-]', ' ',formatrem_torsplit) #we replace the - with space so we'll get hits if differnces
formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.]', '', foundname_mod)
formatrem_torsplit = re.sub('[\-]', ' ', formatrem_torsplit) #we replace the - with space so we'll get hits if differnces
formatrem_torsplit = re.sub('[\/]', ' ', formatrem_torsplit) #not necessary since if has a /, should be removed in above line
formatrem_torsplit = re.sub('\s+', ' ', formatrem_torsplit)
logger.fdebug(str(len(formatrem_torsplit)) + ' - formatrem_torsplit : ' + formatrem_torsplit.lower())
@ -532,7 +532,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
ctitle = tor['Title'].find('cbz')
if ctitle == 0:
ctitle = tor['Title'].find('none')
if ctitle == 0:
if ctitle == 0:
logger.fdebug('cannot determine title properly - ignoring for now.')
continue
cttitle = tor['Title'][:ctitle]
@ -541,7 +541,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
st_pub = rebuiltline.find('(')
if st_pub < 2 and st_pub != -1:
st_end = rebuiltline.find(')')
rebuiltline = rebuiltline[st_end+1:]
rebuiltline = rebuiltline[st_end +1:]
tortheinfo.append({
'title': rebuiltline, #cttitle,
@ -555,7 +555,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
return torinfo
def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,ComicVersion=None):
def nzbdbsearch(seriesname, issue, comicid=None, nzbprov=None, searchYear=None, ComicVersion=None):
myDB = db.DBConnection()
seriesname_alt = None
if comicid is None or comicid == 'None':
@ -569,12 +569,12 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,Comic
seriesname = snm['ComicName']
seriesname_alt = snm['AlternateSearch']
nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-\s]', '%',seriesname)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname)
nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-\s]', '%', seriesname)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '', seriesname)
nsearch = '%' + nsearch_seriesname + "%"
nresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [nsearch,nzbprov])
nresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [nsearch, nzbprov])
if nresults is None:
logger.fdebug('nzb search returned no results for ' + seriesname)
if seriesname_alt is None:
@ -585,9 +585,9 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,Comic
if chkthealt == 0:
AS_Alternate = AlternateSearch
for calt in chkthealt:
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
AS_Alternate = '%' + AS_Alternate + "%"
nresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [AS_Alternate,nzbprov])
nresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [AS_Alternate, nzbprov])
if nresults is None:
logger.fdebug('nzb alternate name search returned no results.')
return "no results"
@ -610,7 +610,7 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,Comic
filetype = None
if mylar.PREFERRED_QUALITY == 1: filetype = 'cbr'
elif mylar.PREFERRED_QUALITY == 2: filetype = 'cbz'
for results in nresults:
title = results['Title']
#logger.fdebug("titlesplit: " + str(title.split("\"")))
@ -670,7 +670,7 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,Comic
nzbinfo['entries'] = nzbtheinfo
return nzbinfo
def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.info('matched on ' + seriesname)
filename = helpers.filesafe(seriesname)
@ -709,7 +709,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.fdebug('[32P-AUTHENTICATION] 32P (Legacy) Authentication already done. Attempting to use existing keys.')
mylar.AUTHKEY_32P = mylar.KEYS_32P['authkey']
else:
if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ):
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
logger.error('[RSS] Unable to sign-on to 32P to validate settings and initiate download sequence. Please enter/check your username password in the configuration.')
return "fail"
elif mylar.PASSKEY_32P is None or mylar.AUTHKEY_32P is None or mylar.KEYS_32P is None:
@ -727,7 +727,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
'authkey': mylar.AUTHKEY_32P,
'id': linkit}
headers = None #{'Accept-encoding': 'gzip',
headers = None #{'Accept-encoding': 'gzip',
# 'User-Agent': str(mylar.USER_AGENT)}
else:
@ -739,7 +739,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.fdebug('KAT Referer set to :' + kat_referrer)
headers = {'Accept-encoding': 'gzip',
headers = {'Accept-encoding': 'gzip',
'Referer': kat_referrer}
url = helpers.torrent_create('KAT', linkit)
@ -752,7 +752,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
#disable SSL warnings - too many 'warning' messages about invalid certificates
try:
from lib.requests.packages.urllib3 import disable_warnings
disable_warnings()
disable_warnings()
except ImportError:
#this is probably not necessary and redudant, but leaving in for the time being.
from requests.packages.urllib3.exceptions import InsecureRequestWarning
@ -802,7 +802,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
return "pass"
elif mylar.TORRENT_SEEDBOX:
tssh = ftpsshup.putfile(filepath,filename)
tssh = ftpsshup.putfile(filepath, filename)
return tssh

View File

@ -64,13 +64,13 @@ class tehMain():
logger.info('[RSS] Initiating Torrent RSS Feed Check on 32P.')
if mylar.MODE_32P == 0:
logger.fdebug('[RSS] 32P mode set to Legacy mode. Monitoring New Releases feed only.')
if any( [mylar.PASSKEY_32P is None, mylar.PASSKEY_32P == '', mylar.RSSFEED_32P is None, mylar.RSSFEED_32P == ''] ):
if any([mylar.PASSKEY_32P is None, mylar.PASSKEY_32P == '', mylar.RSSFEED_32P is None, mylar.RSSFEED_32P == '']):
logger.error('[RSS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.')
else:
rsscheck.torrents(pickfeed='1', feedinfo=mylar.KEYS_32P)
else:
logger.fdebug('[RSS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed')
if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None] ):
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None]):
logger.error('[RSS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.')
else:
if mylar.KEYS_32P is None:

View File

@ -29,30 +29,30 @@ def sabnzbd():
r = requests.get(sabline + 'config/general/')
soup = BeautifulSoup(r.content)
#lenlinks = len(cntlinks)
cnt1 = len(soup.findAll("div", {"class" : "field-pair alt"}))
cnt2 = len(soup.findAll("div", {"class" : "field-pair"}))
cnt1 = len(soup.findAll("div", {"class": "field-pair alt"}))
cnt2 = len(soup.findAll("div", {"class": "field-pair"}))
cnt = int(cnt1 + cnt2)
n = 0
n_even = -1
n_odd = -1
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("div", {"class" : "field-pair"})[n_even]
resultp = soup.findAll("div", {"class": "field-pair"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("div", {"class" : "field-pair alt"})[n_odd]
resultp = soup.findAll("div", {"class": "field-pair alt"})[n_odd]
if resultp.find("label", {"for" : "nzbkey"}):
if resultp.find("label", {"for": "nzbkey"}):
#logger.fdebug resultp
try:
result = resultp.find("input", {"type" : "text"})
result = resultp.find("input", {"type": "text"})
except:
continue
if result['id'] == "nzbkey":
nzbkey = result['value']
nzbkey = result['value']
logger.fdebug('found SABnzbd NZBKey: ' + str(nzbkey))
return nzbkey
n+=1

View File

@ -43,7 +43,7 @@ class Scheduler:
self.delay = delay
self.initThread()
self.abort = False
def initThread(self):
@ -77,7 +77,7 @@ class Scheduler:
self.action.run()
except Exception, e:
logger.fdebug("Exception generated in thread " + self.threadName + ": %s" % e )
logger.fdebug("Exception generated in thread " + self.threadName + ": %s" % e)
logger.fdebug(repr(traceback.format_exc()))
if self.abort:

View File

@ -69,7 +69,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#one-off the download.
logger.fdebug('One-Off Search parameters:')
logger.fdebug("ComicName: " + ComicName)
logger.fdebug("Issue: " + str(IssueNumber))
logger.fdebug("Issue: " + str(IssueNumber))
logger.fdebug("Year: " + str(ComicYear))
logger.fdebug("IssueDate:" + str(IssueDate))
if SARC:
@ -82,7 +82,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.fdebug("Checking for torrent enabled.")
if mylar.ENABLE_TORRENT_SEARCH: #and mylar.ENABLE_TORRENTS:
if mylar.ENABLE_32P:
torprovider.append('32p')
torprovider.append('32p')
torp+=1
#print torprovider[0]
if mylar.ENABLE_KAT:
@ -98,7 +98,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mylar.DOGNZB == 1:
nzbprovider.append('dognzb')
nzbp+=1
# --------
# --------
# Xperimental
if mylar.EXPERIMENTAL == 1:
nzbprovider.append('experimental')
@ -112,7 +112,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#if len(mylar.EXTRA_NEWZNABS > 0):
for newznab_host in mylar.EXTRA_NEWZNABS:
if newznab_host[4] == '1' or newznab_host[4] == 1:
newznab_hosts.append(newznab_host)
newznab_hosts.append(newznab_host)
#if newznab_host[0] == newznab_host[1]:
# nzbprovider.append('newznab')
#else:
@ -131,7 +131,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.fdebug("there are : " + str(providercount) + " nzb providers you have selected.")
logger.fdebug("Usenet Retention : " + str(mylar.USENET_RETENTION) + " days")
#nzbpr = providercount - 1
#if nzbpr < 0:
#if nzbpr < 0:
# nzbpr == 0
findit = 'no'
@ -143,7 +143,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
nzbprov = None
return findit, nzbprov
prov_order,newznab_info = provider_sequence(nzbprovider,torprovider,newznab_hosts)
prov_order, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts)
# end provider order sequencing
logger.info('search provider order is ' + str(prov_order))
@ -169,7 +169,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
searchcnt = 2 #set the searchcnt to 2 (api)
i = 2 #start the counter at api, so it will exit without running RSS
while ( i <= searchcnt ):
while (i <= searchcnt):
#searchmodes:
# rss - will run through the built-cached db of entries
# api - will run through the providers via api (or non-api in the case of Experimental)
@ -179,7 +179,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if i == 1: searchmode = 'rss' #order of ops - this will be used first.
elif i == 2: searchmode = 'api'
if findit == 'yes':
if findit == 'yes':
logger.fdebug('Found result on first run, exiting search module now.')
break
@ -188,7 +188,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#torprtmp = 0 # torprtmp = torpr
prov_count = 0
while (prov_count <= len(prov_order)-1):
while (prov_count <= len(prov_order) -1):
#while (torprtmp <= torpr): #(torprtmp >=0 ):
newznab_host = None
if prov_order[prov_count] == '32p':
@ -219,7 +219,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
AS_Alternate = AlternateSearch
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate)
if findit == 'yes':
@ -238,7 +238,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
AS_Alternate = AlternateSearch
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
@ -251,13 +251,13 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
prov_count+=1
#torprtmp+=1 #torprtmp-=1
if findit == 'yes':
if findit == 'yes':
#check for snatched_havetotal being enabled here and adjust counts now.
#IssueID being the catch/check for one-offs as they won't exist on the watchlist and error out otherwise.
if mylar.SNATCHED_HAVETOTAL and IssueID is not None:
logger.fdebug('Adding this to the HAVE total for the series.')
helpers.incr_snatched(ComicID)
return findit, searchprov
helpers.incr_snatched(ComicID)
return findit, searchprov
else:
if manualsearch is None:
logger.info('Finished searching via :' + str(searchmode) + '. Issue not found - status kept as Wanted.')
@ -268,7 +268,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
return findit, 'None'
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None):
if nzbprov == 'nzb.su':
apikey = mylar.NZBSU_APIKEY
elif nzbprov == 'dognzb':
@ -282,7 +282,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
apikey = newznab_host[2].rstrip()
if '#' in newznab_host[3].rstrip():
catstart = newznab_host[3].find('#')
category_newznab = newznab_host[3][catstart+1:]
category_newznab = newznab_host[3][catstart +1:]
logger.fdebug('non-default Newznab category set to :' + str(category_newznab))
else:
category_newznab = '7030'
@ -308,8 +308,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if mylar.PREFERRED_QUALITY == 0: filetype = ""
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz"
#UseFuzzy == 0: Normal
#UseFuzzy == 0: Normal
#UseFuzzy == 1: Remove Year
#UseFuzzy == 2: Fuzzy Year
# figure out what was missed via rss feeds and do a manual search via api
@ -346,7 +346,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
intIss = helpers.issuedigits(IssueNumber)
iss = IssueNumber
if u'\xbd' in IssueNumber:
findcomiciss = '0.5'
findcomiciss = '0.5'
elif u'\xbc' in IssueNumber:
findcomiciss = '0.25'
elif u'\xbe' in IssueNumber:
@ -380,7 +380,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
break
elif '.' in findcomiciss[i]:
c_number = findcomiciss[:i].rstrip()
c_num_a4 = findcomiciss[i+1:].rstrip()
c_num_a4 = findcomiciss[i +1:].rstrip()
#if decimal seperates numeric from alpha (ie - 7.INH)
#don't give calpha a value or else will seperate with a space further down
#assign it to dsp_c_alpha so that it can be displayed for debugging.
@ -392,9 +392,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
i+=1
logger.fdebug("calpha/cnumber: " + str(dsp_c_alpha) + " / " + str(c_number))
if c_number is None:
if c_number is None:
c_number = findcomiciss # if it's None, means no special alphas or decimals
if len(c_number) == 1:
cmloopit = 3
elif len(c_number) == 2:
@ -418,9 +418,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# if issue is '011' instead of '11' in nzb search results, will not have same
# results. '011' will return different than '11', as will '009' and '09'.
while (findloop < findcount ):
while (findloop < findcount):
comsrc = comsearch
while (cmloopit >= 1 ):
while (cmloopit >= 1):
#if issue_except is None: issue_exc = ''
#else: issue_exc = issue_except
if done is True and seperatealpha == "no":
@ -430,7 +430,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# here we account for issue pattern variations
if seperatealpha == "yes":
isssearch = str(c_number) + "%20" + str(c_alpha)
if cmloopit == 3:
comsearch = comsrc + "%2000" + str(isssearch) #+ "%20" + str(filetype)
issdig = '00'
@ -451,7 +451,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == '32P' or nzbprov == 'KAT':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [" + str(nzbprov) + "] RSS for " + str(findcomic) + " : " + str(mod_isssearch))
bb = rsscheck.torrentdbsearch(findcomic,mod_isssearch,ComicID,nzbprov)
bb = rsscheck.torrentdbsearch(findcomic, mod_isssearch, ComicID, nzbprov)
rss = "yes"
#if bb is not None: logger.fdebug("bb results: " + str(bb))
else:
@ -460,12 +460,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == 'newznab':
nzbprov_fix = name_newznab
else: nzbprov_fix = nzbprov
bb = rsscheck.nzbdbsearch(findcomic,mod_isssearch,ComicID,nzbprov_fix,ComicYear,ComicVersion)
bb = rsscheck.nzbdbsearch(findcomic, mod_isssearch, ComicID, nzbprov_fix, ComicYear, ComicVersion)
rss = "yes"
#if bb is not None: logger.fdebug("bb results: " + str(bb))
#this is the API calls
else:
#32P is redudant now since only RSS works
#32P is redudant now since only RSS works
# - just getting it ready for when it's not redudant :)
if nzbprov == '':
bb = "no results"
@ -474,7 +474,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
elif nzbprov == 'KAT':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch))
bb = rsscheck.torrents(pickfeed='KAT',seriesname=cmname,issue=mod_isssearch)#cmname,issue=mod_isssearch)
bb = rsscheck.torrents(pickfeed='KAT', seriesname=cmname, issue=mod_isssearch)#cmname,issue=mod_isssearch)
rss = "no"
#if bb is not None: logger.fdebug("results: " + str(bb))
elif nzbprov != 'experimental':
@ -484,14 +484,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it.
if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/':
if host_newznab[len(host_newznab) -1:len(host_newznab)] != '/':
host_newznab_fix = str(host_newznab) + "/"
else: host_newznab_fix = host_newznab
findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch) + "&o=xml&cat=" + str(category_newznab)
if nzbprov != 'nzbx':
# helper function to replace apikey here so we avoid logging it ;)
findurl = findurl + "&apikey=" + str(apikey)
logsearch = helpers.apiremove(str(findurl),'nzb')
logsearch = helpers.apiremove(str(findurl), 'nzb')
logger.fdebug("search-url: " + str(logsearch))
### IF USENET_RETENTION is set, honour it
@ -564,7 +564,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
done = False
foundc = "no"
log2file = ""
if bb == "no results":
if bb == "no results":
pass
foundc = "no"
else:
@ -588,7 +588,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else:
tmpsz = entry.enclosures[0]
comsize_b = tmpsz['length']
#file restriction limitation here
#only works with KAT (done here) & 32P (done in rsscheck) & Experimental (has it embeded in search and rss checks)
if nzbprov == 'KAT':
@ -604,7 +604,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else:
logger.fdebug('Quality restriction enforced [ .cbz only ]. Rejecting this result.')
continue
if comsize_b is None:
logger.fdebug('Size of file cannot be retrieved. Ignoring size-comparison and continuing.')
#comsize_b = 0
@ -645,7 +645,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('Year has been fuzzied for this series, ignoring store date comparison entirely.')
else:
#use store date instead of publication date for comparisons since publication date is usually +2 months
#use store date instead of publication date for comparisons since publication date is usually +2 months
if StoreDate is None or StoreDate == '0000-00-00':
if IssueDate is None or IssueDate == '0000-00-00':
logger.fdebug('Invalid store date & issue date detected - you probably should refresh the series or wait for CV to correct the data')
@ -658,9 +658,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
dateconv = email.utils.parsedate_tz(pubdate)
# convert it to a numeric time, then subtract the timezone difference (+/- GMT)
if dateconv[-1] is not None:
postdate_int = time.mktime(dateconv[:len(dateconv)-1]) - dateconv[-1]
postdate_int = time.mktime(dateconv[:len(dateconv) -1]) - dateconv[-1]
else:
postdate_int = time.mktime(dateconv[:len(dateconv)-1])
postdate_int = time.mktime(dateconv[:len(dateconv) -1])
#convert it to a Thu, 06 Feb 2014 00:00:00 format
issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')
# to get past different locale's os-dependent dates, let's convert it to a generic datetime format
@ -674,13 +674,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
econv = email.utils.parsedate_tz(issconv)
#convert it to a numeric and drop the GMT/Timezone
try:
issuedate_int = time.mktime(econv[:len(econv)-1])
issuedate_int = time.mktime(econv[:len(econv) -1])
except OverflowError:
logger.fdebug('Unable to convert timestamp to integer format. Forcing things through.')
isyear = econv[1]
epochyr = '1970'
if int(isyear) <= int(epochyr):
tm = datetime.datetime(1970,1,1)
tm = datetime.datetime(1970, 1, 1)
issuedate_int = int(time.mktime(tm.timetuple()))
else:
continue
@ -719,7 +719,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# this is new - if title contains a '&' in the title it will assume the filename has ended at that point
# which causes false positives (ie. wolverine & the x-men becomes the x-men, which matches on x-men.
# 'the' is removed for comparisons later on
if '&' in cleantitle: cleantitle = re.sub('[\&]','and', cleantitle)
if '&' in cleantitle: cleantitle = re.sub('[\&]', 'and', cleantitle)
nzbname = cleantitle
@ -735,7 +735,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
vers4vol = "no"
versionfound = "no"
if any( ['cover only' in cleantitle.lower(), 'variant' in cleantitle.lower()] ):
if any(['cover only' in cleantitle.lower(), 'variant' in cleantitle.lower()]):
logger.fdebug("Ignoring title as Cover/Variant Only detected.")
cleantitle = "abcdefghijk 0 (1901).cbz"
continue
@ -786,8 +786,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("false version detection..ignoring.")
if len(re.findall('[^()]+', cleantitle)) == 1 or 'cover only' in cleantitle.lower():
if len(re.findall('[^()]+', cleantitle)) == 1 or 'cover only' in cleantitle.lower():
#some sites don't have (2013) or whatever..just v2 / v2013. Let's adjust:
#this handles when there is NO YEAR present in the title, otherwise versioning is way below.
if vers4year == "no" and vers4vol == "no":
@ -819,7 +819,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
ripperlist=['digital-',
'empire',
'dcp']
#this takes care of the brackets :)
#this takes care of the brackets :)
m = re.findall('[^()]+', cleantitle)
lenm = len(m)
@ -831,9 +831,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
while (cnt < lenm):
#print 'm[cnt]: ' + str(m[cnt])
if m[cnt] is None: break
if m[cnt] == ' ':
if m[cnt] == ' ':
pass
else:
else:
logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt]))
if cnt == 0:
comic_andiss = m[cnt]
@ -850,7 +850,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check")
yearmatch = "true"
elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no":
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
logger.fdebug('year detected: ' + str(m[cnt]))
result_comyear = m[cnt]
logger.fdebug('year looking for: ' + str(comyear))
@ -865,8 +865,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
ComUp = int(ComicYear) + 1
ComDwn = int(ComicYear) - 1
if str(ComUp) in result_comyear or str(ComDwn) in result_comyear:
logger.fdebug("Fuzzy Logic'd the Year and got a match with a year of " + str(result_comyear))
yearmatch = "true"
logger.fdebug("Fuzzy Logic'd the Year and got a match with a year of " + str(result_comyear))
yearmatch = "true"
else:
logger.fdebug(str(comyear) + "Fuzzy logic'd the Year and year still didn't match.")
#let's do this here and save a few extra loops ;)
@ -894,7 +894,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#have this to distinguish different titles), let's remove it entirely.
logger.fdebug('Publisher detected within title : ' + str(m[cnt]))
logger.fdebug('cnt is : ' + str(cnt) + ' --- Publisher is: ' + Publisher)
pub_removed = m[cnt]
pub_removed = m[cnt]
#-strip publisher if exists here-
logger.fdebug('removing publisher from title')
cleantitle_pubremoved = re.sub(pub_removed, '', cleantitle)
@ -908,13 +908,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
cnt = 0
yearmatch = "false"
continue
if 'digital' in m[cnt] and len(m[cnt]) == 7:
if 'digital' in m[cnt] and len(m[cnt]) == 7:
logger.fdebug("digital edition detected")
pass
if ' of ' in m[cnt]:
logger.fdebug("mini-series detected : " + str(m[cnt]))
result_of = m[cnt]
if 'cover' in m[cnt]:
if 'cover' in m[cnt]:
logger.fdebug("covers detected: " + str(m[cnt]))
result_comcovers = m[cnt]
for ripper in ripperlist:
@ -923,10 +923,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
result_comscanner = m[cnt]
cnt+=1
if yearmatch == "false": continue
splitit = []
splitit = []
watchcomic_split = []
logger.fdebug("original nzb comic and issue: " + str(comic_andiss))
logger.fdebug("original nzb comic and issue: " + str(comic_andiss))
#scan the returned name to see if it contains a '-', which typically denotes the start of an issuetitle
#if the series doesn't have a '-' within it.
hyphensplit = None
@ -949,7 +949,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('First word of issue stored as : ' + str(issue_firstword))
except:
if m.start() + 2 > len(comic_andiss.strip()):
issue_firstword = None
issue_firstword = None
else:
logger.fdebug('Unable to parse title due to no space between hyphen. Ignoring this result.')
hyphenfail = True
@ -961,7 +961,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#changed this from '' to ' '
comic_iss_b4 = re.sub('[\-\:\,\?\!]', ' ', comic_andiss)
comic_iss_b4 = re.sub('\'', '', comic_iss_b4)
comic_iss = comic_iss_b4.replace('.',' ')
comic_iss = comic_iss_b4.replace('.', ' ')
#if issue_except: comic_iss = re.sub(issue_except.lower(), '', comic_iss)
logger.fdebug("adjusted nzb comic and issue: " + comic_iss)
@ -969,16 +969,16 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#something happened to dognzb searches or results...added a '.' in place of spaces
#screwed up most search results with dognzb. Let's try to adjust.
#watchcomic_split = findcomic[findloop].split(None)
if splitit[(len(splitit)-1)].isdigit():
if splitit[(len(splitit) -1)].isdigit():
#compares - if the last digit and second last digit are #'s seperated by spaces assume decimal
comic_iss = splitit[(len(splitit)-1)]
comic_iss = splitit[(len(splitit) -1)]
splitst = len(splitit) - 1
if splitit[(len(splitit)-2)].isdigit():
if splitit[(len(splitit) -2)].isdigit():
# for series that have a digit at the end, it screws up the logistics.
i = 1
chg_comic = splitit[0]
while (i < (len(splitit)-1)):
while (i < (len(splitit) -1)):
chg_comic = chg_comic + " " + splitit[i]
i+=1
logger.fdebug("chg_comic:" + str(chg_comic))
@ -993,13 +993,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if chg_comic.upper() in findcomic_chksplit.upper():
logger.fdebug("series contains numerics...adjusting..")
else:
changeup = "." + splitit[(len(splitit)-1)]
changeup = "." + splitit[(len(splitit) -1)]
logger.fdebug("changeup to decimal: " + str(changeup))
comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss
comic_iss = splitit[(len(splitit) -2)] + "." + comic_iss
splitst = len(splitit) - 2
else:
#if the issue is alphanumeric (ie. 15AU, 12A) it'll error.
tmpiss = splitit[(len(splitit)-1)]
tmpiss = splitit[(len(splitit) -1)]
i = 0
alphas = None
a_issno = None
@ -1007,14 +1007,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if tmpiss[i].isalpha():
#take first occurance of alpha in string and carry it through
alphas = tmpiss[i:].rstrip()
a_issno = tmpiss[:i+1].rstrip()
a_issno = tmpiss[:i +1].rstrip()
break
i+=1
logger.fdebug("alphas: " + str(alphas))
logger.fdebug("a_issno: " + str(a_issno))
if alphas is None:
# if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb
logger.fdebug("invalid naming format of nzb detected - cannot properly determine issue")
logger.fdebug("invalid naming format of nzb detected - cannot properly determine issue")
continue
else:
if a_issno == '' and alphas is not None:
@ -1022,13 +1022,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#print 'splitit: ' + splitit[(len(splitit)-2)]
#print 'splitit: ' + splitit[(len(splitit)-1)]
#if there' a space between the issue & alpha, join them.
findstart = thisentry.find(splitit[(len(splitit)-1)])
findstart = thisentry.find(splitit[(len(splitit) -1)])
#print 'thisentry : ' + thisentry
#print 'decimal location : ' + str(findstart)
if thisentry[findstart-1] == '.':
comic_iss = splitit[(len(splitit)-2)] + '.' + splitit[(len(splitit)-1)]
if thisentry[findstart -1] == '.':
comic_iss = splitit[(len(splitit) -2)] + '.' + splitit[(len(splitit) -1)]
else:
comic_iss = splitit[(len(splitit)-2)] + splitit[(len(splitit)-1)]
comic_iss = splitit[(len(splitit) -2)] + splitit[(len(splitit) -1)]
logger.fdebug('comic_iss is : ' + str(comic_iss))
splitst = len(splitit) - 2
else:
@ -1042,11 +1042,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# make sure that things like - in watchcomic are accounted for when comparing to nzb.
findcomic = re.sub('[\/]', ' ', findcomic)
watchcomic_split = helpers.cleanName(str(findcomic))
if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]','and', watchcomic_split)
if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]', 'and', watchcomic_split)
watchcomic_nonsplit = re.sub('[\-\:\,\.\?]', ' ', watchcomic_split)
watchcomic_nonsplit = re.sub('\'', '', watchcomic_nonsplit)
watchcomic_split = watchcomic_nonsplit.split(None)
logger.fdebug(str(splitit) + " nzb series word count: " + str(splitst))
logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split)))
#account for possible version inclusion here and annual inclusions.
@ -1075,7 +1075,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('volume detected - stripping and re-analzying for volume label.')
if '.' in tmpsplit:
tmpsplit = re.sub('.', '', tmpsplit).strip()
tmpsplit = re.sub('vol','', tmpsplit.lower()).strip()
tmpsplit = re.sub('vol', '', tmpsplit.lower()).strip()
#if vol label set as 'Vol 4' it will obliterate the Vol, but pass over the '4' - set
#volfound to True so that it can loop back around.
if not tmpsplit.isdigit():
@ -1133,7 +1133,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#need to convert dates to just be yyyy-mm-dd and do comparison, time operator in the below calc as well which probably throws off some accuracy.
if postdate_int >= issuedate_int and nzbprov == '32P':
logger.fdebug('32P torrent discovery. Store date (' + str(stdate) + ') is before posting date (' + str(pubdate) + '), forcing volume label to be the same as series label (0-Day Enforcement): v' + str(F_ComicVersion) + ' --> v' + str(S_ComicVersion))
F_ComicVersion = D_ComicVersion
F_ComicVersion = D_ComicVersion
else:
F_ComicVersion = '1'
@ -1157,13 +1157,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("Versions wrong. Ignoring possible match.")
scount = 0
cvers = "false"
if cvers == "true":
#since we matched on versions, let's remove it entirely to improve matching.
logger.fdebug('Removing versioning from nzb filename to improve matching algorithims.')
cissb4vers = re.sub(tstsplit, "", comic_iss_b4).strip()
logger.fdebug('New b4split : ' + str(cissb4vers))
splitit = cissb4vers.split(None)
splitit = cissb4vers.split(None)
splitst -=1
break
@ -1239,7 +1239,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("search-length: " + str(splitst))
logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
if cvers == "true": splitst = splitst + 1
while ( n <= (splitst)-1 ):
while (n <= (splitst) -1):
logger.fdebug("splitit: " + str(splitit[n]))
logger.fdebug("scount : " + str(scount))
if n < (splitst) and n < len(watchcomic_split):
@ -1277,7 +1277,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
totalcnt = int(splitst)
logger.fdebug("splitit-len:" + str(totalcnt))
try:
spercent = (wordcnt/totalcnt) * 100
spercent = (wordcnt /totalcnt) * 100
except ZeroDivisionError:
spercent = 0
logger.fdebug("Match to series : " + str(spercent) + " %.")
@ -1299,7 +1299,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("issue we found for is : " + str(comic_iss))
comintIss = helpers.issuedigits(comic_iss)
logger.fdebug("integer value of issue we have found : " + str(comintIss))
#issue comparison now as well
if int(intIss) == int(comintIss):
@ -1339,23 +1339,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
log2file = log2file + "issues don't match.." + "\n"
foundc = "no"
if done == True:
cmloopit == 1 #let's make sure it STOPS searching after a sucessful match.
cmloopit == 1 #let's make sure it STOPS searching after a sucessful match.
break
cmloopit-=1
if cmloopit < 1 and c_alpha is not None and seperatealpha == "no" and foundc == "no":
logger.info("Alphanumerics detected within IssueNumber. Seperating from Issue # and re-trying.")
cmloopit = origcmloopit
cmloopit = origcmloopit
seperatealpha = "yes"
findloop+=1
if foundc == "yes":
foundcomic.append("yes")
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov)
else:
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + '[' + alt_nzbname + ']')
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
# #send out the notifications for the snatch.
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
@ -1437,18 +1437,18 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
StoreDate = result['StoreDate']
UseFuzzy = comic['UseFuzzy']
ComicVersion = comic['ComicVersion']
if result['IssueDate'] == None:
if result['IssueDate'] == None:
ComicYear = comic['ComicYear']
else:
else:
ComicYear = str(result['IssueDate'])[:4]
mode = result['mode']
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_32P) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=comic['ComicName_Filesafe'])
if foundNZB == "yes":
if foundNZB == "yes":
#print ("found!")
updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov)
else:
pass
pass
#print ("not found!")
if rsscheck:
@ -1487,7 +1487,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
logger.fdebug("I found " + comic['ComicName'] + ' #:' + str(result['Issue_Number']))
updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'], mode=mode, provider=prov)
else:
pass
pass
#print ("not found!")
return
@ -1543,7 +1543,7 @@ def provider_sequence(nzbprovider, torprovider, newznab_hosts):
# this is for nzb providers
for np in nzbprovider:
logger.fdebug('checking against nzb provider: ' + str(np))
if all( [ 'newznab' in np, pr_order[1].lower() in np.lower() ] ):
if all(['newznab' in np, pr_order[1].lower() in np.lower()]):
logger.fdebug('newznab match against: ' + str(np))
for newznab_host in newznab_hosts:
#logger.fdebug('comparing ' + str(pr_order[1]).lower() + ' against: ' + str(newznab_host[0]).lower())
@ -1573,7 +1573,7 @@ def provider_sequence(nzbprovider, torprovider, newznab_hosts):
logger.fdebug('provider order sequence is now to start with ' + pr_order[1] + ' at spot #' + str(pr_order[0]))
return prov_order,newznab_info
return prov_order, newznab_info
def nzbname_create(provider, title=None, info=None):
#the nzbname here is used when post-processing
@ -1605,7 +1605,7 @@ def nzbname_create(provider, title=None, info=None):
elif provider == '32P' or provider == 'KAT':
#filesafe the name cause people are idiots when they post sometimes.
nzbname = re.sub('\s{2,}',' ', helpers.filesafe(title)).strip()
nzbname = re.sub('\s{2,}', ' ', helpers.filesafe(title)).strip()
#let's change all space to decimals for simplicity
nzbname = re.sub(" ", ".", nzbname)
#gotta replace & or escape it
@ -1625,7 +1625,7 @@ def nzbname_create(provider, title=None, info=None):
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+',' ', nzbname) #make sure we remove the extra spaces.
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[SEARCHER] nzbname (\s): ' + nzbname)
nzbname = re.sub(' ', '.', nzbname)
logger.fdebug('[SEARCHER] end nzbname: ' + nzbname)
@ -1683,9 +1683,9 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if nzbprov == 'newznab':
name_newznab = newznab[0].rstrip()
host_newznab = newznab[1].rstrip()
if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/':
if host_newznab[len(host_newznab) -1:len(host_newznab)] != '/':
host_newznab_fix = str(host_newznab) + "/"
else:
else:
host_newznab_fix = host_newznab
apikey = newznab[2].rstrip()
@ -1726,7 +1726,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
disable_warnings()
except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
try:
r = requests.get(down_url, params=payload, verify=verify, headers=headers)
@ -1772,13 +1772,13 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
logger.fdebug('[FILENAME] filename (remove chars): ' + filen)
filen = re.sub('.cbr', '', filen).strip()
filen = re.sub('.cbz', '', filen).strip()
filen = re.sub('\s+',' ', filen) #make sure we remove the extra spaces.
filen = re.sub('\s+', ' ', filen) #make sure we remove the extra spaces.
logger.fdebug('[FILENAME] nzbname (\s): ' + filen)
filen = re.sub(' ', '.', filen)
logger.fdebug('[FILENAME] end nzbname: ' + filen)
if re.sub('.nzb','', filen.lower()).strip() != re.sub('.nzb','', nzbname.lower()).strip():
alt_nzbname = re.sub('.nzb','', filen).strip()
if re.sub('.nzb', '', filen.lower()).strip() != re.sub('.nzb', '', nzbname.lower()).strip():
alt_nzbname = re.sub('.nzb', '', filen).strip()
alt_nzbname = re.sub('[\s+]', ' ', alt_nzbname)
alt_nzbname = re.sub('[\s\_]', '.', alt_nzbname)
logger.info('filen: ' + alt_nzbname + ' -- nzbname: ' + nzbname + ' are not identical. Storing extra value as : ' + alt_nzbname)
@ -1834,7 +1834,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
pass
logger.fdebug('issues match!')
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov) )
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov))
logger.fdebug("link given by: " + str(nzbprov))
@ -1856,7 +1856,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
shutil.move(nzbpath, os.path.join(mylar.BLACKHOLE_DIR, nzbname))
except (OSError, IOError):
logger.warn('Failed to move nzb into blackhole directory - check blackhole directory and/or permissions.')
return "blackhole-fail"
return "blackhole-fail"
logger.fdebug("filename saved to your blackhole as : " + nzbname)
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + os.path.join(mylar.BLACKHOLE_DIR, nzbname))
@ -1909,7 +1909,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
in_file.close()
from base64 import standard_b64encode
nzbcontent64 = standard_b64encode(nzbcontent)
tmpapi = str(tmpapi) + str(mylar.NZBGET_USERNAME) + ":" + str(mylar.NZBGET_PASSWORD)
tmpapi = str(tmpapi) + "@" + str(nzbget_host) + ":" + str(mylar.NZBGET_PORT) + "/xmlrpc"
server = ServerProxy(tmpapi)
@ -1927,7 +1927,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
# changed to just work with direct links now...
tmpapi = mylar.SAB_HOST + "/api?apikey=" + mylar.SAB_APIKEY
logger.fdebug("send-to-SAB host &api initiation string : " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("send-to-SAB host &api initiation string : " + str(helpers.apiremove(tmpapi, '&')))
SABtype = "&mode=addlocalfile&name="
tmpapi = tmpapi + SABtype
@ -1935,24 +1935,24 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
logger.fdebug("...selecting API type: " + str(tmpapi))
tmpapi = tmpapi + urllib.quote_plus(nzbpath)
logger.fdebug("...attaching nzb provider link: " + str(helpers.apiremove(tmpapi,'$')))
logger.fdebug("...attaching nzb provider link: " + str(helpers.apiremove(tmpapi, '$')))
# determine SAB priority
if mylar.SAB_PRIORITY:
tmpapi = tmpapi + "&priority=" + sabpriority
logger.fdebug("...setting priority: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("...setting priority: " + str(helpers.apiremove(tmpapi, '&')))
# if category is blank, let's adjust
if mylar.SAB_CATEGORY:
tmpapi = tmpapi + "&cat=" + mylar.SAB_CATEGORY
logger.fdebug("...attaching category: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("...attaching category: " + str(helpers.apiremove(tmpapi, '&')))
if mylar.POST_PROCESSING: #or mylar.RENAME_FILES:
if mylar.POST_PROCESSING_SCRIPT:
#this is relative to the SABnzbd script directory (ie. no path)
tmpapi = tmpapi + "&script=" + mylar.POST_PROCESSING_SCRIPT
else:
tmpapi = tmpapi + "&script=ComicRN.py"
logger.fdebug("...attaching rename script: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("...attaching rename script: " + str(helpers.apiremove(tmpapi, '&')))
#final build of send-to-SAB
logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi, '&')))
logger.fdebug('sab-to-send:' + str(tmpapi))
@ -1979,7 +1979,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
sent_to = "SABnzbd+"
logger.info(u"Successfully sent nzb file to SABnzbd")
#nzbid, nzbname, sent_to
nzbname = re.sub('.nzb', '', nzbname).strip()
@ -1996,11 +1996,11 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
#update the db on the snatch.
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov)
else:
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + ' [' + alt_nzbname + ']')
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
@ -2013,24 +2013,24 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov)
if mylar.PROWL_ENABLED and mylar.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification")
prowl = notifiers.PROWL()
prowl.notify(nzbname,"Download started using " + sent_to)
prowl.notify(nzbname, "Download started using " + sent_to)
if mylar.NMA_ENABLED and mylar.NMA_ONSNATCH:
logger.info(u"Sending NMA notification")
nma = notifiers.NMA()
nma.notify(snline=snline,snatched_nzb=nzbname,sent_to=sent_to,prov=nzbprov)
nma.notify(snline=snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov)
if mylar.PUSHOVER_ENABLED and mylar.PUSHOVER_ONSNATCH:
logger.info(u"Sending Pushover notification")
thisline = 'Mylar has snatched: ' + nzbname + ' from ' + nzbprov + ' and has sent it to ' + sent_to
pushover = notifiers.PUSHOVER()
pushover.notify(thisline,snline)
pushover.notify(thisline, snline)
if mylar.BOXCAR_ENABLED and mylar.BOXCAR_ONSNATCH:
logger.info(u"Sending Boxcar notification")
boxcar = notifiers.BOXCAR()
boxcar.notify(snatched_nzb=nzbname,sent_to=sent_to,snline=snline)
boxcar.notify(snatched_nzb=nzbname, sent_to=sent_to, snline=snline)
if mylar.PUSHBULLET_ENABLED and mylar.PUSHBULLET_ONSNATCH:
logger.info(u"Sending Pushbullet notification")
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(snline=snline,snatched=nzbname,sent_to=sent_to,prov=nzbprov,method='POST')
pushbullet.notify(snline=snline, snatched=nzbname, sent_to=sent_to, prov=nzbprov, method='POST')
return
@ -2084,11 +2084,11 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
logger.fdebug('possible decimal - referencing position from original title.')
chkme = orignzb.find(decit[0])
chkend = orignzb.find(decit[1], chkme + len(decit[0]))
chkspot = orignzb[chkme:chkend+1]
chkspot = orignzb[chkme:chkend +1]
print chkme, chkend
print chkspot
# we add +1 to decit totals in order to account for the '.' that's missing and we assume is there.
if len(chkspot) == ( len(decit[0]) + len(decit[1]) + 1 ):
if len(chkspot) == (len(decit[0]) + len(decit[1]) + 1):
logger.fdebug('lengths match for possible decimal issue.')
if '.' in chkspot:
logger.fdebug('decimal located within : ' + str(chkspot))
@ -2154,7 +2154,7 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
logger.fdebug('isstitle_match count : ' + str(isstitle_match))
if isstitle_match > 0:
iss_calc = ( ( isstitle_match + misword ) / watch_split_count ) * 100
iss_calc = ((isstitle_match + misword) / watch_split_count) * 100
logger.fdebug('iss_calc: ' + str(iss_calc) + ' % with ' + str(misword) + ' unaccounted for words')
else:
iss_calc = 0
@ -2182,7 +2182,7 @@ def generate_id(nzbprov, link):
path_parts = url_parts[2].rpartition('/')
nzbtempid = path_parts[0].rpartition('/')
nzblen = len(nzbtempid)
nzbid = nzbtempid[nzblen-1]
nzbid = nzbtempid[nzblen -1]
elif nzbprov == '32P':
#32P just has the torrent id stored.
nzbid = link
@ -2200,7 +2200,7 @@ def generate_id(nzbprov, link):
elif nzbprov == 'dognzb':
url_parts = urlparse.urlparse(link)
path_parts = url_parts[2].rpartition('/')
nzbid = path_parts[0].rsplit('/',1)[1]
nzbid = path_parts[0].rsplit('/', 1)[1]
elif nzbprov == 'newznab':
#if in format of http://newznab/getnzb/<id>.nzb&i=1&r=apikey
tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url.
@ -2209,7 +2209,7 @@ def generate_id(nzbprov, link):
else:
# for the geek in all of us...
st = tmpid.find('&id')
end = tmpid.find('&',st+1)
nzbid = re.sub('&id=','', tmpid[st:end]).strip()
end = tmpid.find('&', st +1)
nzbid = re.sub('&id=', '', tmpid[st:end]).strip()
return nzbid

View File

@ -29,7 +29,7 @@ def solicit(month, year):
mnloop = 0
upcoming = []
publishers = {'DC Comics':'DC Comics', 'DC\'s': 'DC Comics', 'Marvel':'Marvel Comics', 'Image':'Image Comics', 'IDW':'IDW Publishing', 'Dark Horse':'Dark Horse'}
publishers = {'DC Comics': 'DC Comics', 'DC\'s': 'DC Comics', 'Marvel': 'Marvel Comics', 'Image': 'Image Comics', 'IDW': 'IDW Publishing', 'Dark Horse': 'Dark Horse'}
# -- this is no longer needed (testing)
@ -47,7 +47,7 @@ def solicit(month, year):
#using the solicits+datestring leaves out some entries occasionally
#should use http://www.comicbookresources.com/tag/solicitations
#then just use the logic below but instead of datestring, find the month term and
#then just use the logic below but instead of datestring, find the month term and
#go ahead up to +5 months.
if month > 0:
@ -82,7 +82,7 @@ def solicit(month, year):
#logger.info('datestring:' + datestring)
#logger.info('checking:' + pagelinks)
pageresponse = urllib2.urlopen ( pagelinks )
pageresponse = urllib2.urlopen (pagelinks)
soup = BeautifulSoup (pageresponse)
cntlinks = soup.findAll('h3')
lenlinks = len(cntlinks)
@ -103,7 +103,7 @@ def solicit(month, year):
headName = headt.findNext(text=True)
#print ('headName: ' + headName)
if 'Image' in headName: print 'IMAGE FOUND'
if not all( ['Marvel' in headName, 'DC' in headName, 'Image' in headName] ) and ('Solicitations' in headName or 'Solicits' in headName):
if not all(['Marvel' in headName, 'DC' in headName, 'Image' in headName]) and ('Solicitations' in headName or 'Solicits' in headName):
# test for month here (int(month) + 5)
if not any(d.get('month', None) == str(headName).lower() for d in monthlist):
for mt in monthlist:
@ -126,29 +126,29 @@ def solicit(month, year):
#publish.append( headName[:pubstart].strip() )
abc = headt.findAll('a', href=True)[0]
ID_som = abc['href'] #first instance will have the right link...
resultURL.append( ID_som )
resultURL.append(ID_som)
#print '(' + str(cnt) + ') [ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
cnt+=1
else:
logger.info('incorrect month - not using.')
x+=1
if cnt == 0:
return #break # no results means, end it
loopthis = (cnt-1)
#this loops through each 'found' solicit page
loopthis = (cnt -1)
#this loops through each 'found' solicit page
#shipdate = str(month_string) + '-' + str(year) - not needed.
while ( loopthis >= 0 ):
while (loopthis >= 0):
#print 'loopthis is : ' + str(loopthis)
#print 'resultURL is : ' + str(resultURL[loopthis])
shipdate = str(resultmonth[loopthis]) + '-' + str(resultyear[loopthis])
upcoming += populate(resultURL[loopthis], publish[loopthis], shipdate)
loopthis -=1
logger.info( str(len(upcoming)) + ' upcoming issues discovered.' )
logger.info(str(len(upcoming)) + ' upcoming issues discovered.')
newfl = mylar.CACHE_DIR + "/future-releases.txt"
newtxtfile = open(newfl, 'wb')
@ -165,7 +165,7 @@ def solicit(month, year):
newtxtfile.close()
logger.fdebug( 'attempting to populate future upcoming...' )
logger.fdebug('attempting to populate future upcoming...')
mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")
@ -173,7 +173,7 @@ def solicit(month, year):
cursor = connection.cursor()
# we should extract the issues that are being watched, but no data is available yet ('Watch For' status)
# once we get the data, store it, wipe the existing table, retrieve the new data, populate the data into
# once we get the data, store it, wipe the existing table, retrieve the new data, populate the data into
# the table, recheck the series against the current watchlist and then restore the Watch For data.
@ -204,11 +204,11 @@ def solicit(month, year):
mylar.weeklypull.pullitcheck(futurepull="yes")
#.end
def populate(link,publisher,shipdate):
def populate(link, publisher, shipdate):
#this is the secondary url call to populate
input = 'http://www.comicbookresources.com/' + link
#print 'checking ' + str(input)
response = urllib2.urlopen ( input )
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
abc = soup.findAll('p')
lenabc = len(abc)
@ -222,7 +222,7 @@ def populate(link,publisher,shipdate):
prev_chk = False
while (i < lenabc):
titlet = abc[i] #iterate through the p pulling out only results.
titlet = abc[i] #iterate through the p pulling out only results.
titlet_next = titlet.findNext(text=True)
#print ("titlet: " + str(titlet))
if "/prev_img.php?pid" in str(titlet) and titlet_next is None:
@ -247,7 +247,7 @@ def populate(link,publisher,shipdate):
if prev_chk == True:
tempName = titlet.findNext(text=True)
if not any( [' TPB' in tempName, 'HC' in tempName, 'GN-TPB' in tempName, 'for $1' in tempName.lower(), 'subscription variant' in tempName.lower(), 'poster' in tempName.lower() ] ):
if not any([' TPB' in tempName, 'HC' in tempName, 'GN-TPB' in tempName, 'for $1' in tempName.lower(), 'subscription variant' in tempName.lower(), 'poster' in tempName.lower()]):
if '#' in tempName[:50]:
#tempName = tempName.replace(u'.',u"'")
tempName = tempName.encode('ascii', 'replace') #.decode('utf-8')
@ -255,8 +255,8 @@ def populate(link,publisher,shipdate):
tempName = tempName.replace('???', ' ')
stissue = tempName.find('#')
endissue = tempName.find(' ', stissue)
if tempName[stissue+1] == ' ': #if issue has space between # and number, adjust.
endissue = tempName.find(' ', stissue+2)
if tempName[stissue +1] == ' ': #if issue has space between # and number, adjust.
endissue = tempName.find(' ', stissue +2)
if endissue == -1: endissue = len(tempName)
issue = tempName[stissue:endissue].lstrip(' ')
if ':'in issue: issue = re.sub(':', '', issue).rstrip()
@ -269,15 +269,15 @@ def populate(link,publisher,shipdate):
#print ('multiple issues detected. Splitting.')
ststart = issue.find('-')
issue1 = issue[:ststart]
issue2 = '#' + str(issue[ststart+1:])
issue2 = '#' + str(issue[ststart +1:])
if '&' in exinfo:
#print ('multiple issues detected. Splitting.')
ststart = exinfo.find('&')
issue1 = issue # this detects fine
issue2 = '#' + str(exinfo[ststart+1:])
issue2 = '#' + str(exinfo[ststart +1:])
if '& ' in issue2: issue2 = re.sub("&\\b", "", issue2)
exinfo = exinfo.replace(exinfo[ststart+1:len(issue2)], '').strip()
exinfo = exinfo.replace(exinfo[ststart +1:len(issue2)], '').strip()
if exinfo == '&': exinfo = 'N/A'
comic = tempName[:stissue].strip()
@ -289,11 +289,11 @@ def populate(link,publisher,shipdate):
issuedate = shipdate
if 'on sale' in str(titlet).lower():
onsale_start = str(titlet).lower().find('on sale') + 8
onsale_end = str(titlet).lower().find('<br>',onsale_start)
onsale_end = str(titlet).lower().find('<br>', onsale_start)
thedate = str(titlet)[onsale_start:onsale_end]
m = None
basemonths = {'january':'1','jan':'1','february':'2','feb':'2','march':'3','mar':'3','april':'4','apr':'4','may':'5','june':'6','july':'7','august':'8','aug':'8','september':'9','sept':'9','october':'10','oct':'10','november':'11','nov':'11','december':'12','dec':'12'}
basemonths = {'january': '1', 'jan': '1', 'february': '2', 'feb': '2', 'march': '3', 'mar': '3', 'april': '4', 'apr': '4', 'may': '5', 'june': '6', 'july': '7', 'august': '8', 'aug': '8', 'september': '9', 'sept': '9', 'october': '10', 'oct': '10', 'november': '11', 'nov': '11', 'december': '12', 'dec': '12'}
for month in basemonths:
if month in thedate.lower():
m = basemonths[month]
@ -302,7 +302,7 @@ def populate(link,publisher,shipdate):
if m is not None:
theday = len(month) + 1 # account for space between month & day
thedaystart = thedate[theday:(theday+2)].strip() # day numeric won't exceed 2
thedaystart = thedate[theday:(theday +2)].strip() # day numeric won't exceed 2
if len(str(thedaystart)) == 1:
thedaystart = '0' + str(thedaystart)
if len(str(m)) == 1:
@ -312,13 +312,13 @@ def populate(link,publisher,shipdate):
logger.info('[' + comic + '] On sale :' + str(thedate))
exinfo += ' [' + str(thedate) + ']'
issuedate = thedate
if issue1:
upcome.append({
'Shipdate': issuedate,
'Publisher': publisher.upper(),
'Issue': re.sub('#', '',issue1).lstrip(),
'Issue': re.sub('#', '', issue1).lstrip(),
'Comic': comic.upper(),
'Extra': exinfo.upper()
})
@ -336,7 +336,7 @@ def populate(link,publisher,shipdate):
#print ('Comic: ' + comic)
#print('issue#: ' + re.sub('#', '', issue2))
#print ('extra info: ' + exinfo)
else:
else:
upcome.append({
'Shipdate': issuedate,
'Publisher': publisher.upper(),

View File

@ -18,7 +18,7 @@ import datetime
from xml.dom.minidom import parseString
import urllib2
import shlex
import re
import re
import os
import itertools
@ -41,9 +41,9 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
recentstatus = 'Unknown'
elif comlist['ForceContinuing'] == 1:
recentstatus = 'Continuing'
elif 'present' in comlist['ComicPublished'].lower() or ( helpers.today()[:4] in comlist['LatestDate']):
elif 'present' in comlist['ComicPublished'].lower() or (helpers.today()[:4] in comlist['LatestDate']):
latestdate = comlist['LatestDate']
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if comlist['NewPublish']:
@ -69,7 +69,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
if calledfrom is None:
logger.info('Starting update for %i active comics' % len(comiclist))
cnt = 1
for comic in comiclist:
@ -101,7 +101,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
if CV_EXcomicid['variloop'] == '99':
mismatch = "yes"
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID)
else: importer.addComictoDB(ComicID,mismatch)
else: importer.addComictoDB(ComicID, mismatch)
else:
if mylar.CV_ONETIMER == 1:
logger.fdebug("CV_OneTimer option enabled...")
@ -151,7 +151,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
logger.fdebug("Refreshing the series and pulling in new data using only CV.")
if whack == False:
mylar.importer.addComictoDB(ComicID,mismatch,calledfrom='dbupdate',annload=annload)
mylar.importer.addComictoDB(ComicID, mismatch, calledfrom='dbupdate', annload=annload)
#reload the annuals here.
issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID])
@ -198,7 +198,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
newVAL = {"Status": issue['Status']}
if newVAL['Status'] == None:
datechk = re.sub('-','', newissue['ReleaseDate']).strip() # converts date to 20140718 format
datechk = re.sub('-', '', newissue['ReleaseDate']).strip() # converts date to 20140718 format
if mylar.AUTOWANT_ALL:
newVAL = {"Status": "Wanted"}
elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING:
@ -261,12 +261,12 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
logger.info('I have added ' + str(len(newiss)) + ' new issues for this series that were not present before.')
forceRescan(ComicID)
else:
mylar.importer.addComictoDB(ComicID,mismatch,annload=annload)
mylar.importer.addComictoDB(ComicID, mismatch, annload=annload)
else:
mylar.importer.addComictoDB(ComicID,mismatch)
mylar.importer.addComictoDB(ComicID, mismatch)
cnt +=1
time.sleep(5) #pause for 5 secs so dont hammer CV and get 500 error
@ -286,7 +286,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
# here we add to upcoming table...
myDB = db.DBConnection()
dspComicName = ComicName #to make sure that the word 'annual' will be displayed on screen
if 'annual' in ComicName.lower():
if 'annual' in ComicName.lower():
adjComicName = re.sub("\\bannual\\b", "", ComicName.lower()) # for use with comparisons.
logger.fdebug('annual detected - adjusting name to : ' + adjComicName)
else:
@ -316,7 +316,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
n_date = datetime.datetime.now()
absdiff = abs(n_date - c_obj_date)
hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
# no need to hammer the refresh
# no need to hammer the refresh
# let's check it every 5 hours (or more)
#pullupd = "yes"
if 'annual' in ComicName.lower():
@ -336,7 +336,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
og_status = None
logger.fdebug(adjComicName + ' Issue: ' + str(IssueNumber) + ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.')
# we need to either decrease the total issue count, OR indicate that an issue is upcoming.
upco_results = myDB.select("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",[ComicID])
upco_results = myDB.select("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?", [ComicID])
upco_iss = upco_results[0][0]
#logger.info("upco_iss: " + str(upco_iss))
if int(upco_iss) > 0:
@ -353,7 +353,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
if hours > 5 or forcecheck == 'yes':
pullupd = "yes"
logger.fdebug('Now Refreshing comic ' + ComicName + ' to make sure it is up-to-date')
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd)
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID, pullupd)
else: mylar.importer.updateissuedata(ComicID, ComicName, calledfrom='weeklycheck')#mylar.importer.addComictoDB(ComicID,mismatch,pullupd)
else:
logger.fdebug('It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.')
@ -376,7 +376,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
#check for 'out-of-whack' series here.
whackness = dbUpdate([ComicID], calledfrom='weekly')
if whackness == True:
if any( [issuechk['Status'] == 'Downloaded', issuechk['Status'] == 'Archived', issuechk['Status'] == 'Snatched'] ):
if any([issuechk['Status'] == 'Downloaded', issuechk['Status'] == 'Archived', issuechk['Status'] == 'Snatched']):
logger.fdebug('Forcibly maintaining status of : ' + og_status + ' for #' + issuechk['Issue_Number'] + ' to ensure integrity.')
logger.fdebug('Comic series has an incorrect total count. Forcily refreshing series to ensure data is current.')
dbUpdate([ComicID])
@ -394,32 +394,32 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
control = {"IssueID": issuechk['IssueID']}
newValue['IssueID'] = issuechk['IssueID']
if og_status == "Snatched":
values = { "Status": "Snatched"}
values = {"Status": "Snatched"}
newValue['Status'] = "Snatched"
elif og_status == "Downloaded":
values = { "Status": "Downloaded"}
values = {"Status": "Downloaded"}
newValue['Status'] = "Downloaded"
#if the status is Downloaded and it's on the pullist - let's mark it so everyone can bask in the glory
elif og_status == "Wanted":
values = { "Status": "Wanted"}
newValue['Status'] = "Wanted"
values = {"Status": "Wanted"}
newValue['Status'] = "Wanted"
elif og_status == "Archived":
values = { "Status": "Archived"}
values = {"Status": "Archived"}
newValue['Status'] = "Archived"
elif og_status == 'Failed':
if mylar.FAILED_DOWNLOAD_HANDLING:
if mylar.FAILED_AUTO:
values = { "Status": "Wanted" }
values = {"Status": "Wanted"}
newValue['Status'] = "Wanted"
else:
values = { "Status": "Failed" }
values = {"Status": "Failed"}
newValue['Status'] = "Failed"
else:
values = { "Status": "Skipped" }
values = {"Status": "Skipped"}
newValue['Status'] = "Skipped"
else:
values = { "Status": "Skipped"}
values = {"Status": "Skipped"}
newValue['Status'] = "Skipped"
#was in wrong place :(
else:
@ -431,7 +431,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
if og_status is None:
newValue['Status'] = "Wanted"
logger.fdebug('...Changing Status to Wanted and throwing it in the Upcoming section since it is not published yet.')
#this works for issues existing in DB...
#this works for issues existing in DB...
elif og_status == "Skipped":
newValue['Status'] = "Wanted"
values = {"Status": "Wanted"}
@ -472,9 +472,9 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
else:
myDB.upsert("issues", values, control)
if any( [og_status == 'Downloaded', og_status == 'Archived', og_status == 'Snatched', og_status == 'Wanted', newValue['Status'] == 'Wanted'] ):
if any([og_status == 'Downloaded', og_status == 'Archived', og_status == 'Snatched', og_status == 'Wanted', newValue['Status'] == 'Wanted']):
logger.fdebug('updating Pull-list to reflect status change: ' + og_status + '[' + newValue['Status'] + ']')
if og_status != 'Skipped':
if og_status != 'Skipped':
downstats = {"Status": og_status,
"ComicID": issuechk['ComicID'],
"IssueID": issuechk['IssueID']}
@ -485,7 +485,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
return downstats
def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None,altissuenumber=None):
def weekly_update(ComicName, IssueNumber, CStatus, CID, futurepull=None, altissuenumber=None):
if futurepull:
logger.fdebug('future_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber) + ' to a status of ' + str(CStatus))
else:
@ -499,11 +499,11 @@ def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None,altissuenumb
# added CStatus to update status flags on Pullist screen
myDB = db.DBConnection()
if futurepull is None:
issuecheck = myDB.selectone("SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName,IssueNumber]).fetchone()
issuecheck = myDB.selectone("SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone()
else:
issuecheck = myDB.selectone("SELECT * FROM future WHERE COMIC=? AND ISSUE=?", [ComicName,IssueNumber]).fetchone()
issuecheck = myDB.selectone("SELECT * FROM future WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone()
if issuecheck is not None:
controlValue = { "COMIC": str(ComicName),
controlValue = {"COMIC": str(ComicName),
"ISSUE": str(IssueNumber)}
try:
@ -547,10 +547,10 @@ def no_searchresults(ComicID):
# when there's a mismatch between CV & GCD - let's change the status to
# something other than 'Loaded'
myDB = db.DBConnection()
controlValue = { "ComicID": ComicID}
controlValue = {"ComicID": ComicID}
newValue = {"Status": "Error",
"LatestDate": "Error",
"LatestIssue": "Error"}
"LatestIssue": "Error"}
myDB.upsert("comics", newValue, controlValue)
def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, prov=None, alt_nzbname=None):
@ -568,9 +568,9 @@ def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, pro
logger.fdebug("Story Arc (SARC) detected as: " + str(SARC))
if mylar.HIGHCOUNT == 0:
IssueID = '900000'
else:
else:
IssueID = int(mylar.HIGHCOUNT) + 1
controlValue = {"IssueID": IssueID,
"Provider": prov}
@ -591,15 +591,15 @@ def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, pro
if chkd['AltNZBName'] is None or chkd['AltNZBName'] == '':
#we need to wipe the entry so we can re-update with the alt-nzbname if required
myDB.action('DELETE FROM nzblog WHERE IssueID=? and Provider=?', [IssueID, prov])
logger.fdebug('Deleted stale entry from nzblog for IssueID: ' + str(IssueID) + ' [' + prov + ']')
logger.fdebug('Deleted stale entry from nzblog for IssueID: ' + str(IssueID) + ' [' + prov + ']')
myDB.upsert("nzblog", newValue, controlValue)
def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None, IssueArcID=None, module=None):
# When doing a Force Search (Wanted tab), the resulting search calls this to update.
# this is all redudant code that forceRescan already does.
# should be redone at some point so that instead of rescanning entire
# should be redone at some point so that instead of rescanning entire
# series directory, it just scans for the issue it just downloaded and
# and change the status to Snatched accordingly. It is not to increment the have count
# at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.
@ -685,7 +685,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
#this will update the weeklypull list immediately after sntaching to reflect the new status.
#-is ugly, should be linked directly to other table (IssueID should be populated in weekly pull at this point hopefully).
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=?",[ComicID, IssueID]).fetchone()
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=?", [ComicID, IssueID]).fetchone()
if chkit is not None:
@ -739,10 +739,10 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
myDB.upsert("issues", newValue, controlValue)
#this will update the weeklypull list immediately after post-processing to reflect the new status.
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=? AND Status='Snatched'",[ComicID, IssueID]).fetchone()
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=? AND Status='Snatched'", [ComicID, IssueID]).fetchone()
if chkit is not None:
ctlVal = {"ComicID": ComicID,
"IssueID": IssueID}
newVal = {"Status": "Downloaded"}
@ -751,14 +751,14 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
logger.info(module + ' Updating Status (' + downstatus + ') now complete for ' + ComicName + ' issue: ' + IssueNum)
return
def forceRescan(ComicID,archive=None,module=None):
def forceRescan(ComicID, archive=None, module=None):
if module is None:
module = ''
module += '[FILE-RESCAN]'
myDB = db.DBConnection()
# file check to see if issue exists
rescan = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
if rescan['AlternateSearch'] is not None:
if rescan['AlternateSearch'] is not None:
altnames = rescan['AlternateSearch'] + '##'
else:
altnames = ''
@ -771,7 +771,7 @@ def forceRescan(ComicID,archive=None,module=None):
if ascan['ReleaseComicName'] not in altnames:
altnames += ascan['ReleaseComicName'] + '!!' + ascan['ReleaseComicID'] + '##'
altnames = altnames[:-2]
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in ' + rescan['ComicLocation'] )
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in ' + rescan['ComicLocation'])
fca = []
if archive is None:
tmpval = filechecker.listFiles(dir=rescan['ComicLocation'], watchcomic=rescan['ComicName'], Publisher=rescan['ComicPublisher'], AlternateSearch=altnames)
@ -783,7 +783,7 @@ def forceRescan(ComicID,archive=None,module=None):
logger.fdebug(module + 'dir: ' + rescan['ComicLocation'])
logger.fdebug(module + 'os.path.basename: ' + os.path.basename(rescan['ComicLocation']))
pathdir = os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation']))
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in :' + pathdir )
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in :' + pathdir)
tmpv = filechecker.listFiles(dir=pathdir, watchcomic=rescan['ComicName'], Publisher=rescan['ComicPublisher'], AlternateSearch=altnames)
logger.fdebug(module + 'tmpv filecount: ' + str(tmpv['comiccount']))
comiccnt += int(tmpv['comiccount'])
@ -829,7 +829,7 @@ def forceRescan(ComicID,archive=None,module=None):
reissues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID])
multiple_check = myDB.select('SELECT * FROM issues WHERE ComicID=? GROUP BY Int_IssueNumber HAVING (COUNT(Int_IssueNumber) > 1)', [ComicID])
if len(multiple_check) == 0:
if len(multiple_check) == 0:
logger.fdebug('No issues with identical issue numbering were detected for this series')
mc_issuenumber = None
else:
@ -853,7 +853,7 @@ def forceRescan(ComicID,archive=None,module=None):
issID_to_write = []
ANNComicID = None
while (fn < fccnt):
while (fn < fccnt):
haveissue = "no"
issuedupe = "no"
annualdupe = "no"
@ -862,7 +862,7 @@ def forceRescan(ComicID,archive=None,module=None):
except IndexError:
logger.fdebug(module + ' Unable to properly retrieve a file listing for the given series.')
logger.fdebug(module + ' Probably because the filenames being scanned are not in a parseable format')
if fn == 0:
if fn == 0:
return
else:
break
@ -872,7 +872,7 @@ def forceRescan(ComicID,archive=None,module=None):
logger.fdebug(module + ' temploc: ' + str(temploc))
if 'annual' not in temploc.lower():
#remove the extension here
extensions = ('.cbr','.cbz','.cb7')
extensions = ('.cbr', '.cbz', '.cb7')
if temploc.lower().endswith(extensions):
logger.fdebug(module + ' Removed extension for issue: ' + str(temploc))
temploc = temploc[:-4]
@ -892,7 +892,7 @@ def forceRescan(ComicID,archive=None,module=None):
issname = reiss['IssueName']
fnd_iss_except = 'None'
fcdigit = helpers.issuedigits(temploc)
if int(fcdigit) == int_iss:
@ -917,7 +917,7 @@ def forceRescan(ComicID,archive=None,module=None):
logger.fdebug(module + ' miISSUEYEAR: ' + str(mi['IssueYear']) + ' -- issyear : ' + str(issyear))
if any(mi['IssueID'] == d['issueid'] for d in issuedupechk):
logger.fdebug(module + ' IssueID already within dupe. Checking next if available.')
multiplechk = True
multiplechk = True
break
if (mi['IssueYear'] in tmpfc['ComicFilename']) and (issyear == mi['IssueYear']):
logger.fdebug(module + ' Matched to year within filename : ' + str(issyear))
@ -989,15 +989,15 @@ def forceRescan(ComicID,archive=None,module=None):
if issuedupe == "no":
if foundchk == False:
if foundchk == False:
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + reiss['Issue_Number'] + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + str(isslocation))
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': fcdigit,
'filename': tmpfc['ComicFilename'],
@ -1035,7 +1035,7 @@ def forceRescan(ComicID,archive=None,module=None):
logger.fdebug(module + ' int_iss:' + str(int_iss))
issyear = reann['IssueDate'][:4]
old_status = reann['Status']
old_status = reann['Status']
fcdigit = helpers.issuedigits(re.sub('annual', '', temploc.lower()).strip())
logger.fdebug(module + ' fcdigit:' + str(fcdigit))
@ -1100,7 +1100,7 @@ def forceRescan(ComicID,archive=None,module=None):
#keep tmpfc['ComicFilename']
logger.fdebug('[DUPECHECK-CBZ PRIORITY] [#' + reann['Issue_Number'] + '] Retaining newly scanned in filename : ' + tmpfc['ComicFilename'])
removedupe = True
if mylar.DUPECONSTRAINT == 'filesize':
if tmpfc['ComicSize'] <= di['filesize']:
logger.fdebug('[DUPECHECK-FILESIZE PRIORITY] [#' + reann['Issue_Number'] + '] Retaining currently scanned in filename : ' + di['filename'])
@ -1188,7 +1188,7 @@ def forceRescan(ComicID,archive=None,module=None):
#if Archived, increase the 'Have' count.
#if archive:
# issStatus = "Archived"
if haveissue == "yes":
issStatus = "Downloaded"
newValueDict = {"Location": isslocation,
@ -1197,7 +1197,7 @@ def forceRescan(ComicID,archive=None,module=None):
}
issID_to_ignore.append(str(iss_id))
if ANNComicID:
# if 'annual' in temploc.lower():
#issID_to_write.append({"tableName": "annuals",
@ -1221,10 +1221,10 @@ def forceRescan(ComicID,archive=None,module=None):
#here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
update_iss = []
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?']*(len(issID_to_ignore)-1)))
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(issID_to_ignore) -1)))
chkthis = myDB.select(tmpsql, issID_to_ignore)
# chkthis = None
if chkthis is None:
if chkthis is None:
pass
else:
for chk in chkthis:
@ -1252,7 +1252,7 @@ def forceRescan(ComicID,archive=None,module=None):
update_iss.append({"IssueID": chk['IssueID'],
"Status": issStatus})
if len(update_iss) > 0:
i = 0
#do it like this to avoid DB locks...
@ -1308,7 +1308,7 @@ def forceRescan(ComicID,archive=None,module=None):
archivedissues = 0 #set this to 0 so it tallies correctly.
for down in downissues:
#print "downlocation:" + str(down['Location'])
#remove special characters from
#remove special characters from
#temploc = rescan['ComicLocation'].replace('_', ' ')
#temploc = re.sub('[\#\'\/\.]', '', temploc)
#print ("comiclocation: " + str(rescan['ComicLocation']))
@ -1334,14 +1334,14 @@ def forceRescan(ComicID,archive=None,module=None):
controlValue = {"IssueID": down['IssueID']}
newValue = {"Status": "Archived"}
myDB.upsert("issues", newValue, controlValue)
archivedissues+=1
archivedissues+=1
totalarc = arcfiles + archivedissues
havefiles = havefiles + archivedissues #arcfiles already tallied in havefiles in above segment
logger.fdebug(module + ' arcfiles : ' + str(arcfiles))
logger.fdebug(module + ' havefiles: ' + str(havefiles))
logger.fdebug(module + ' I have changed the status of ' + str(archivedissues) + ' issues to a status of Archived, as I now cannot locate them in the series directory.')
#combined total for dispay total purposes only.
#combined total for dispay total purposes only.
combined_total = iscnt + anncnt #(rescan['Total'] + anncnt)
#let's update the total count of comics that was found.

View File

@ -26,20 +26,20 @@ branch = "development"
def runGit(args):
if mylar.GIT_PATH:
git_locations = ['"'+mylar.GIT_PATH+'"']
git_locations = ['"' +mylar.GIT_PATH +'"']
else:
git_locations = ['git']
if platform.system().lower() == 'darwin':
git_locations.append('/usr/local/git/bin/git')
output = err = None
for cur_git in git_locations:
cmd = cur_git+' '+args
cmd = cur_git +' ' +args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + mylar.PROG_DIR)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=mylar.PROG_DIR)
@ -48,7 +48,7 @@ def runGit(args):
except OSError:
logger.debug('Command ' + cmd + ' didn\'t work, couldn\'t find git')
continue
if 'not found' in output or "not recognized as an internal or external command" in output:
logger.debug('Unable to find git with command ' + cmd)
output = None
@ -57,27 +57,27 @@ def runGit(args):
output = None
elif output:
break
return (output, err)
def getVersion():
if version.MYLAR_VERSION.startswith('win32build'):
mylar.INSTALL_TYPE = 'win'
# Don't have a way to update exe yet, but don't want to set VERSION to None
return 'Windows Install'
elif os.path.isdir(os.path.join(mylar.PROG_DIR, '.git')):
mylar.INSTALL_TYPE = 'git'
output, err = runGit('rev-parse HEAD')
if not output:
logger.error('Couldn\'t find latest installed version.')
return None
#branch_history, err = runGit("log --oneline --pretty=format:'%h - %ar - %s' -n 5")
#bh = []
#print ("branch_history: " + branch_history)
@ -85,31 +85,31 @@ def getVersion():
#print ("bh1: " + bh[0])
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.error('Output does not look like a hash, not using it')
return None
return cur_commit_hash
else:
mylar.INSTALL_TYPE = 'source'
version_file = os.path.join(mylar.PROG_DIR, 'version.txt')
if not os.path.isfile(version_file):
return None
fp = open(version_file, 'r')
current_version = fp.read().strip(' \n\r')
fp.close()
if current_version:
return current_version
else:
return None
def checkGithub():
# Get the latest commit available from github
@ -123,12 +123,12 @@ def checkGithub():
logger.warn('Could not get the latest commit from github')
mylar.COMMITS_BEHIND = 0
return mylar.CURRENT_VERSION
# See how many commits behind we are
# See how many commits behind we are
if mylar.CURRENT_VERSION:
logger.info('Comparing currently installed version with latest github version')
url = 'https://api.github.com/repos/%s/mylar/compare/%s...%s' % (user, mylar.CURRENT_VERSION, mylar.LATEST_VERSION)
try:
result = urllib2.urlopen(url).read()
git = simplejson.JSONDecoder().decode(result)
@ -137,99 +137,99 @@ def checkGithub():
logger.warn('Could not get commits behind from github')
mylar.COMMITS_BEHIND = 0
return mylar.CURRENT_VERSION
if mylar.COMMITS_BEHIND >= 1:
logger.info('New version is available. You are %s commits behind' % mylar.COMMITS_BEHIND)
elif mylar.COMMITS_BEHIND == 0:
logger.info('Mylar is up to date')
elif mylar.COMMITS_BEHIND == -1:
logger.info('You are running an unknown version of Mylar. Run the updater to identify your version')
else:
logger.info('You are running an unknown version of Mylar. Run the updater to identify your version')
return mylar.LATEST_VERSION
def update():
if mylar.INSTALL_TYPE == 'win':
logger.info('Windows .exe updating not supported yet.')
pass
elif mylar.INSTALL_TYPE == 'git':
output, err = runGit('pull origin ' + version.MYLAR_VERSION)
if not output:
logger.error('Couldn\'t download latest version')
for line in output.split('\n'):
if 'Already up-to-date.' in line:
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: '+line)
logger.error('Unable to update from git: ' +line)
logger.info('Output: ' + str(output))
else:
tar_download_url = 'https://github.com/%s/mylar/tarball/%s' % (user, branch)
update_dir = os.path.join(mylar.PROG_DIR, 'update')
version_path = os.path.join(mylar.PROG_DIR, 'version.txt')
try:
logger.info('Downloading update from: '+tar_download_url)
logger.info('Downloading update from: ' +tar_download_url)
data = urllib2.urlopen(tar_download_url)
except (IOError, urllib2.URLError):
logger.error("Unable to retrieve new version from "+tar_download_url+", can't update")
logger.error("Unable to retrieve new version from " +tar_download_url +", can't update")
return
#try sanitizing the name here...
#try sanitizing the name here...
download_name = data.geturl().split('/')[-1].split('?')[0]
tar_download_path = os.path.join(mylar.PROG_DIR, download_name)
# Save tar to disk
f = open(tar_download_path, 'wb')
f.write(data.read())
f.close()
# Extract the tar to update folder
logger.info('Extracing file' + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(update_dir)
tar.close()
# Delete the tar.gz
logger.info('Deleting file' + tar_download_path)
os.remove(tar_download_path)
# Find update dir name
update_dir_contents = [x for x in os.listdir(update_dir) if os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error(u"Invalid update data, update failed: "+str(update_dir_contents))
logger.error(u"Invalid update data, update failed: " +str(update_dir_contents))
return
content_dir = os.path.join(update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
for dirname, dirnames, filenames in os.walk(content_dir):
dirname = dirname[len(content_dir)+1:]
dirname = dirname[len(content_dir) +1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(mylar.PROG_DIR, dirname, curfile)
if os.path.isfile(new_path):
os.remove(new_path)
os.renames(old_path, new_path)
# Update version.txt
try:
ver_file = open(version_path, 'w')
ver_file.write(mylar.LATEST_VERSION)
ver_file.close()
except IOError, e:
logger.error(u"Unable to write current version to version.txt, update not complete: "+ex(e))
logger.error(u"Unable to write current version to version.txt, update not complete: " +ex(e))
return

File diff suppressed because it is too large Load Diff

View File

@ -81,27 +81,27 @@ def initialize(options):
'/': {
'tools.staticdir.root': os.path.join(mylar.PROG_DIR, 'data')
},
'/interfaces':{
'/interfaces': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "interfaces"
},
'/images':{
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "images"
},
'/css':{
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "css"
},
'/js':{
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "js"
},
'/favicon.ico':{
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(os.path.abspath(os.curdir), 'images' + os.sep + 'favicon.ico')
},
'/cache':{
'/cache': {
'tools.staticdir.on': True,
'tools.staticdir.dir': mylar.CACHE_DIR,
'tools.auth_basic.on': False
@ -113,7 +113,7 @@ def initialize(options):
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Mylar',
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict(
{options['http_username']:options['http_password']})
{options['http_username']: options['http_password']})
})
conf['/api'] = {'tools.auth_basic.on': False}

View File

@ -16,19 +16,19 @@
from __future__ import print_function
import sys
import fileinput
import csv
import getopt
import sqlite3
import urllib
import os
import time
import sys
import fileinput
import csv
import getopt
import sqlite3
import urllib
import os
import time
import re
import datetime
import shutil
import mylar
import mylar
from mylar import db, updater, helpers, logger, newpull, importer, mb
def pullit(forcecheck=None):
@ -42,9 +42,9 @@ def pullit(forcecheck=None):
pulldate = '00000000'
else:
pulldate = pull_date['SHIPDATE']
except (sqlite3.OperationalError, TypeError),msg:
except (sqlite3.OperationalError, TypeError), msg:
logger.info(u"Error Retrieving weekly pull list - attempting to adjust")
myDB.action("DROP TABLE weekly")
myDB.action("DROP TABLE weekly")
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text)")
pulldate = '00000000'
logger.fdebug(u"Table re-created, trying to populate")
@ -56,7 +56,7 @@ def pullit(forcecheck=None):
PULLURL = 'http://www.previewsworld.com/shipping/newreleases.txt'
#Prepare the Substitute name switch for pulllist to comic vine conversion
substitutes = os.path.join(mylar.DATA_DIR,"substitutes.csv")
substitutes = os.path.join(mylar.DATA_DIR, "substitutes.csv")
if not os.path.exists(substitutes):
logger.debug('no substitues.csv file located - not performing substitutions on weekly pull list')
substitute_check = False
@ -69,8 +69,8 @@ def pullit(forcecheck=None):
with open(substitutes) as f:
reader = csv.reader(f, delimiter='|')
for row in reader:
if not row[0].startswith('#'):
logger.fdebug("Substitutes file read : "+str(row))
if not row[0].startswith('#'):
logger.fdebug("Substitutes file read : " +str(row))
shortrep.append(row[0])
longrep.append(row[1])
f.close()
@ -121,7 +121,7 @@ def pullit(forcecheck=None):
#denotes issues that contain special characters within that would normally fail when checked if issue ONLY contained numerics.
#add freely, just lowercase and exclude decimals (they get stripped during comparisons)
specialissues = {'au','ai','inh','now'}
specialissues = {'au', 'ai', 'inh', 'now'}
pub = "COMICS"
prevcomic = ""
@ -159,7 +159,7 @@ def pullit(forcecheck=None):
if i.startswith('Shipping') or i.startswith('New Releases') or i.startswith('Upcoming Releases'):
shipdatechk = i.split()
if i.startswith('Shipping'):
shipdate = shipdatechk[1]
shipdate = shipdatechk[1]
elif i.startswith('New Releases'):
shipdate = shipdatechk[3]
elif i.startswith('Upcoming Releases'):
@ -181,7 +181,7 @@ def pullit(forcecheck=None):
return
else:
logger.info(u"Preparing to update to the new listing.")
break
break
else:
mylar.PULLNEW = 'yes'
for yesyes in checkit:
@ -202,18 +202,18 @@ def pullit(forcecheck=None):
break
else:
#logger.info('chkchk not in i - i.findcomics: ' + str(i.find("COMICS")) + ' length: ' + str(len(i.strip())))
if all( [i.find("COMICS") < 1, len(i.strip()) == 6 ] ) or ("GRAPHIC NOVELS" in i):
if all([i.find("COMICS") < 1, len(i.strip()) == 6]) or ("GRAPHIC NOVELS" in i):
# if i.find("COMICS") < 1 and (len(i.strip()) == 6 or "& GRAPHIC NOVELS" in i):
pub = "COMICS"
#logger.info("i.find comics & len =6 : " + pub)
break
break
elif i.find("COMICS") > 12:
#logger.info("comics word found in comic title")
flagged = "yes"
flagged = "yes"
break
else:
#logger.info('yesyes not found: ' + yesyes + ' i.findcomics: ' + str(i.find("COMICS")) + ' length: ' + str(len(i.strip())))
if all( [i.find("COMICS") < 1, len(i.strip()) == 6 ] ) or ("GRAPHIC NOVELS" in i):
if all([i.find("COMICS") < 1, len(i.strip()) == 6]) or ("GRAPHIC NOVELS" in i):
#logger.info("format string not comics & i.find < 1: " + pub)
pub = "COMICS"
break
@ -221,7 +221,7 @@ def pullit(forcecheck=None):
pub = format(str(yesyes))
#logger.info("format string not comics & i.find > 1: " + pub)
break
if flagged == "no":
if flagged == "no":
break
else:
dupefound = "no"
@ -240,9 +240,9 @@ def pullit(forcecheck=None):
#this is to ensure we don't get any comps added by removing them entirely (ie. #1-4, etc)
x = None
try:
x = float( re.sub('#','', issname[n].strip()) )
x = float(re.sub('#', '', issname[n].strip()))
except ValueError, e:
if any(d in re.sub(r'[^a-zA-Z0-9]','',issname[n]).strip() for d in specialissues):
if any(d in re.sub(r'[^a-zA-Z0-9]', '', issname[n]).strip() for d in specialissues):
issue = issname[n]
else:
logger.fdebug('Comp issue set detected as : ' + str(issname[n]) + '. Ignoring.')
@ -250,7 +250,7 @@ def pullit(forcecheck=None):
else:
issue = issname[n]
if 'ongoing' not in issname[n-1].lower() and '(vu)' not in issname[n-1].lower():
if 'ongoing' not in issname[n -1].lower() and '(vu)' not in issname[n -1].lower():
#print ("issue found : " + issname[n])
comicend = n - 1
else:
@ -264,7 +264,7 @@ def pullit(forcecheck=None):
while (n < comicend + 1):
comicnm = comicnm + " " + issname[n]
n+=1
comcnm = re.sub('1 FOR \$1','', comicnm).strip()
comcnm = re.sub('1 FOR \$1', '', comicnm).strip()
#logger.info("Comicname: " + str(comicnm) )
#get remainder
try:
@ -294,8 +294,8 @@ def pullit(forcecheck=None):
# if '.' in issue:
# issue_decimal = re.compile(r'[^\d.]+')
# issue = issue_decimal.sub('', str(issue))
# else: issue = re.sub('#','', issue)
issue = re.sub('#','', issue)
# else: issue = re.sub('#','', issue)
issue = re.sub('#', '', issue)
#issue = re.sub("\D", "", str(issue))
#store the previous comic/issue for comparison to filter out duplicate issues/alt covers
#print ("Previous Comic & Issue: " + str(prevcomic) + "--" + str(previssue))
@ -311,7 +311,7 @@ def pullit(forcecheck=None):
while (n < issnamec):
#find the type of non-issue (TP,HC,GN,SC,OS,PI etc)
for cm in cmty:
if "ONE" in issue and "SHOT" in issname[n+1]: issue = "OS"
if "ONE" in issue and "SHOT" in issname[n +1]: issue = "OS"
if cm == (issname[n]):
if issname[n] == 'PI':
issue = 'NA'
@ -364,11 +364,11 @@ def pullit(forcecheck=None):
#-- remove html tags when alt_pull is enabled
if mylar.ALT_PULL:
if '&amp;' in comicnm:
comicnm = re.sub('&amp;','&',comicnm).strip()
comicnm = re.sub('&amp;', '&', comicnm).strip()
if '&amp;' in pub:
pub = re.sub('&amp;','&',pub).strip()
pub = re.sub('&amp;', '&', pub).strip()
if '&amp;' in comicrm:
comicrm = re.sub('&amp;','&',comicrm).strip()
comicrm = re.sub('&amp;', '&', comicrm).strip()
#--start duplicate comic / issue chk
# pullist has shortforms of a series' title sometimes and causes problems
@ -377,11 +377,11 @@ def pullit(forcecheck=None):
if substitute_check == True:
#Step through the list - storing an index
for repindex,repcheck in enumerate(shortrep):
for repindex, repcheck in enumerate(shortrep):
if len(comicnm) >= len(repcheck):
#if the leftmost chars match the short text then replace them with the long text
if comicnm[:len(repcheck)]==repcheck:
logger.fdebug("Switch worked on "+comicnm + " replacing " + str(repcheck) + " with " + str(longrep[repindex]))
logger.fdebug("Switch worked on " +comicnm + " replacing " + str(repcheck) + " with " + str(longrep[repindex]))
comicnm = re.sub(repcheck, longrep[repindex], comicnm)
for excl in excludes:
@ -425,11 +425,11 @@ def pullit(forcecheck=None):
logger.debug("Row: %s" % row)
controlValueDict = {'COMIC': row[3],
'ISSUE': row[2],
'EXTRA': row[4] }
'EXTRA': row[4]}
newValueDict = {'SHIPDATE': row[0],
'PUBLISHER': row[1],
'STATUS': row[5],
'COMICID': None }
'COMICID': None}
myDB.upsert("weekly", newValueDict, controlValueDict)
#cursor.execute("INSERT INTO weekly VALUES (?,?,?,?,?,?,null);", row)
except Exception, e:
@ -442,8 +442,8 @@ def pullit(forcecheck=None):
logger.info(u"Weekly Pull List successfully loaded.")
#let's delete the files
pullpath = str(mylar.CACHE_DIR) + "/"
os.remove( str(pullpath) + "Clean-newreleases.txt" )
os.remove( str(pullpath) + "newreleases.txt" )
os.remove(str(pullpath) + "Clean-newreleases.txt")
os.remove(str(pullpath) + "newreleases.txt")
pullitcheck(forcecheck=forcecheck)
def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurepull=None, issue=None):
@ -490,7 +490,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
unlines.append(comic1off_name.strip())
comicid.append(comic1off_id)
latestissue.append(issue)
w = 1
w = 1
else:
#let's read in the comic.watchlist from the db here
#cur.execute("SELECT ComicID, ComicName_Filesafe, ComicYear, ComicPublisher, ComicPublished, LatestDate, ForceContinuing, AlternateSearch, LatestIssue from comics WHERE Status = 'Active'")
@ -528,7 +528,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
latest_day = '01'
else:
latest_day = latestdate[8:]
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),int(latest_day))
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), int(latest_day))
n_date = datetime.date.today()
logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
recentchk = (n_date - c_date).days
@ -555,7 +555,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
if Altload == 'no results':
pass
else:
wc = 0
wc = 0
alt_cid = Altload['ComicID']
n = 0
iscnt = Altload['Count']
@ -570,17 +570,17 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
comicid.append(alt_cid)
pubdate.append(week['ComicPublished'])
latestissue.append(week['LatestIssue'])
lines.append(a_list[w+wc].strip())
unlines.append(a_list[w+wc].strip())
lines.append(a_list[w +wc].strip())
unlines.append(a_list[w +wc].strip())
logger.fdebug('loading in Alternate name for ' + str(cleanedname))
n+=1
wc+=1
w+=wc
else:
logger.fdebug("Determined to not be a Continuing series at this time.")
cnt = int(w-1)
cntback = int(w-1)
logger.fdebug("Determined to not be a Continuing series at this time.")
cnt = int(w -1)
cntback = int(w -1)
kp = []
ki = []
kc = []
@ -653,7 +653,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
#logger.fdebug("modwatchcomic:" + modwatchcomic)
#annuals!
if 'ANNUAL' in comicnm.upper():
if 'ANNUAL' in comicnm.upper():
modcomicnm = re.sub("\\bANNUAL\\b", "", modcomicnm.upper())
watchcomic = re.sub(r'\s', '', watchcomic)
@ -693,7 +693,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
#if the store-date is <= weeklypull-list date then break.
### week['ISSUE'] #issue # from pullist
### week['SHIPDATE'] #weeklypull-list date
### comicid[cnt] #comicid of matched series
### comicid[cnt] #comicid of matched series
## if it's a futurepull, the dates get mixed up when two titles exist of the same name
## ie. Wolverine-2011 & Wolverine-2014
@ -706,8 +706,8 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
if 'ANNUAL' in comicnm.upper():
chktype = 'annual'
else:
chktype = 'series'
chktype = 'series'
datevalues = loaditup(watchcomic, comicid[cnt], week['ISSUE'], chktype)
date_downloaded = None
@ -731,7 +731,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
if validcheck == False:
if date_downloaded is None:
break
if chktype == 'series':
if chktype == 'series':
latest_int = helpers.issuedigits(latestiss)
weekiss_int = helpers.issuedigits(week['ISSUE'])
logger.fdebug('comparing ' + str(latest_int) + ' to ' + str(weekiss_int))
@ -796,7 +796,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
cstatus = statusupdate['Status']
cstatusid = {"ComicID": statusupdate['ComicID'],
"IssueID": statusupdate['IssueID']}
else:
cstatus = None
cstatusid = None
@ -805,9 +805,9 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
cstatus = None
#set the variable fp to denote updating the futurepull list ONLY
if futurepull is None:
if futurepull is None:
fp = None
else:
else:
cstatusid = ComicID
fp = "yes"
@ -833,7 +833,7 @@ def check(fname, txt):
with open(fname) as dataf:
return any(txt in line for line in dataf)
except:
return None
return None
def loaditup(comicname, comicid, issue, chktype):
myDB = db.DBConnection()
@ -851,7 +851,7 @@ def loaditup(comicname, comicid, issue, chktype):
logger.fdebug('No results matched for Issue number - either this is a NEW issue with no data yet, or something is wrong')
return 'no results'
dataissue = []
dataissue = []
releasedate = issueload['ReleaseDate']
storedate = issueload['IssueDate']
status = issueload['Status']
@ -860,7 +860,7 @@ def loaditup(comicname, comicid, issue, chktype):
logger.fdebug('Store date of 0000-00-00 returned for ' + str(typedisplay) + ' # ' + str(issue) + '. Refreshing series to see if valid date present')
mismatch = 'no'
#issuerecheck = mylar.importer.addComictoDB(comicid,mismatch,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
issuerecheck = mylar.importer.updateissuedata(comicid,comicname,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
issuerecheck = mylar.importer.updateissuedata(comicid, comicname, calledfrom='weekly', issuechk=issue_number, issuetype=chktype)
if issuerecheck is not None:
for il in issuerecheck:
#this is only one record..
@ -887,7 +887,7 @@ def loaditup(comicname, comicid, issue, chktype):
return dataissue
def checkthis(datecheck,datestatus,usedate):
def checkthis(datecheck, datestatus, usedate):
logger.fdebug('Now checking date comparison using an issue store date of ' + str(datecheck))
logger.fdebug('Using a compare date (usedate) of ' + str(usedate))
@ -921,14 +921,14 @@ def weekly_singlecopy(comicid, issuenum, file, path, module=None, issueid=None):
logger.fdebug(module + ' Weekly pull list detected as : ' + str(pulldate))
except (sqlite3.OperationalError, TypeError),msg:
except (sqlite3.OperationalError, TypeError), msg:
logger.info(module + ' Error determining current weekly pull-list date - you should refresh the pull-list manually probably.')
return
if issueid is None:
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND ISSUE=?',[comicid, issuenum]).fetchone()
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND ISSUE=?', [comicid, issuenum]).fetchone()
else:
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND IssueID=?',[comicid, issueid]).fetchone()
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND IssueID=?', [comicid, issueid]).fetchone()
if chkit is None:
logger.fdebug(module + ' ' + file + ' is not on the weekly pull-list or it is a one-off download that is not supported as of yet.')
@ -956,7 +956,7 @@ def weekly_singlecopy(comicid, issuenum, file, path, module=None, issueid=None):
logger.error(module + ' Could not copy ' + str(srcfile) + ' to ' + str(desfile))
return
logger.info(module + ' Sucessfully copied to ' + desfile.encode('utf-8').strip() )
logger.info(module + ' Sucessfully copied to ' + desfile.encode('utf-8').strip())
if mylar.SEND2READ:
send2read(comicid, issueid, issuenum)
@ -966,8 +966,8 @@ def send2read(comicid, issueid, issuenum):
if mylar.SEND2READ:
logger.info(module + " Send to Reading List enabled for new pulls. Adding to your readlist in the status of 'Added'")
if issueid is None:
chkthis = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?',[comicid, helpers.issuedigits(issuenum)]).fetchone()
annchk = myDB.selectone('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?',[comicid, helpers.issuedigits(issuenum)]).fetchone()
chkthis = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, helpers.issuedigits(issuenum)]).fetchone()
annchk = myDB.selectone('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?', [comicid, helpers.issuedigits(issuenum)]).fetchone()
if chkthis is None and annchk is None:
logger.warn(module + ' Unable to locate issue within your series watchlist.')
return
@ -983,7 +983,7 @@ def send2read(comicid, issueid, issuenum):
anncomp = annchk['ReleaseDate'][:4]
logger.info(module + ' Comparing :' + str(pullcomp) + ' to issdate: ' + str(isscomp) + ' to annyear: ' + str(anncomp))
if int(pullcomp) == int(isscomp) and int(pullcomp) != int(anncomp):
issueid = chkthis['IssueID']
issueid = chkthis['IssueID']
elif int(pullcomp) == int(anncomp) and int(pullcomp) != int(isscomp):
issueid = annchk['IssueID']
else:
@ -991,7 +991,7 @@ def send2read(comicid, issueid, issuenum):
issueid = annchk['IssueID']
else:
logger.info(module + ' Unsure as to the exact issue this is. Not adding to the Reading list at this time.')
return
return
read = mylar.readinglist.Readinglist(IssueID=issueid)
read.addtoreadlist()
return
@ -1045,8 +1045,8 @@ def future_check():
logger.info('More than one result returned - this may have to be a manual add')
matches = []
for sr in searchresults:
tmpsername = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]','', ser['ComicName']).strip()
tmpsrname = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]','', sr['name']).strip()
tmpsername = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]', '', ser['ComicName']).strip()
tmpsrname = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]', '', sr['name']).strip()
if tmpsername.lower() == tmpsrname.lower() and len(tmpsername) <= len(tmpsrname):
logger.info('name & lengths matched : ' + sr['name'])
if str(sr['comicyear']) == str(theissdate):