Whitespace cleanup

This commit is contained in:
Adrian Moisey 2015-05-22 10:32:51 +02:00 committed by evilhero
parent d21cc0d80e
commit 9a4a6b4bc5
37 changed files with 1769 additions and 1771 deletions

View File

@ -32,9 +32,11 @@ try:
except ImportError:
import lib.argparse as argparse
def handler_sigterm(signum, frame):
mylar.SIGNAL = 'shutdown'
def main():
# Fixed paths to mylar
@ -92,10 +94,10 @@ def main():
if sys.platform == 'win32':
print "Daemonize not supported under Windows, starting normally"
else:
mylar.DAEMON=True
mylar.VERBOSE=0
mylar.DAEMON = True
mylar.VERBOSE = 0
if args.pidfile :
if args.pidfile:
mylar.PIDFILE = str(args.pidfile)
# If the pidfile already exists, mylar may still be running, so exit
@ -112,7 +114,6 @@ def main():
else:
logger.warn("Not running in daemon mode. PID file creation disabled.")
if args.datadir:
mylar.DATA_DIR = args.datadir
else:
@ -183,7 +184,7 @@ def main():
if exception.errno != errno.EXIST:
raise
i+=1
i += 1
mylar.CFG = ConfigObj(mylar.CONFIG_FILE, encoding='utf-8')

View File

@ -105,8 +105,8 @@ class FailedProcessor(object):
if nzbiss is None:
logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
else:
@ -124,8 +124,8 @@ class FailedProcessor(object):
nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone()
if nzbiss is None:
logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
nzbname = nzbiss['NZBName']
@ -165,8 +165,8 @@ class FailedProcessor(object):
else:
logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
@ -208,8 +208,8 @@ class FailedProcessor(object):
else:
logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*')
self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)

View File

@ -104,7 +104,7 @@ class PostProcessor(object):
first_line = f.readline()
if mylar.PRE_SCRIPTS.endswith('.sh'):
shell_cmd = re.sub('#!','', first_line).strip()
shell_cmd = re.sub('#!', '', first_line).strip()
if shell_cmd == '' or shell_cmd is None:
shell_cmd = '/bin/bash'
else:
@ -119,8 +119,8 @@ class PostProcessor(object):
self._log("cmd to be executed: " + str(script_cmd))
# use subprocess to run the command and capture output
logger.fdebug(u"Executing command "+str(script_cmd))
logger.fdebug(u"Absolute path to script: "+script_cmd[0])
logger.fdebug(u"Executing command " +str(script_cmd))
logger.fdebug(u"Absolute path to script: " +script_cmd[0])
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
@ -145,7 +145,7 @@ class PostProcessor(object):
first_line = f.readline()
if mylar.EXTRA_SCRIPTS.endswith('.sh'):
shell_cmd = re.sub('#!','', first_line)
shell_cmd = re.sub('#!', '', first_line)
if shell_cmd == '' or shell_cmd is None:
shell_cmd = '/bin/bash'
else:
@ -160,8 +160,8 @@ class PostProcessor(object):
self._log("cmd to be executed: " + str(script_cmd))
# use subprocess to run the command and capture output
logger.fdebug(u"Executing command "+str(script_cmd))
logger.fdebug(u"Absolute path to script: "+script_cmd[0])
logger.fdebug(u"Executing command " +str(script_cmd))
logger.fdebug(u"Absolute path to script: " +script_cmd[0])
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
@ -261,7 +261,7 @@ class PostProcessor(object):
"ComicPublisher": wv_comicpublisher,
"AlternateSearch": wv_alternatesearch,
"ComicID": wv_comicid,
"WatchValues" : {"SeriesYear": wv_seriesyear,
"WatchValues": {"SeriesYear": wv_seriesyear,
"LatestDate": latestdate,
"ComicVersion": wv_comicversion,
"Publisher": wv_publisher,
@ -272,7 +272,7 @@ class PostProcessor(object):
ccnt=0
nm=0
for cs in watchvals:
watchmatch = filechecker.listFiles(self.nzb_folder,cs['ComicName'],cs['ComicPublisher'],cs['AlternateSearch'], manual=cs['WatchValues'])
watchmatch = filechecker.listFiles(self.nzb_folder, cs['ComicName'], cs['ComicPublisher'], cs['AlternateSearch'], manual=cs['WatchValues'])
if watchmatch['comiccount'] == 0: # is None:
nm+=1
continue
@ -283,7 +283,7 @@ class PostProcessor(object):
while (fn < fccnt):
try:
tmpfc = watchmatch['comiclist'][fn]
except IndexError,KeyError:
except IndexError, KeyError:
break
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
@ -297,10 +297,10 @@ class PostProcessor(object):
logger.fdebug(module + ' Annual detected.')
fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip())
annchk = "yes"
issuechk = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
issuechk = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit]).fetchone()
else:
fcdigit = helpers.issuedigits(temploc)
issuechk = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
issuechk = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit]).fetchone()
if issuechk is None:
logger.fdebug(module + ' No corresponding issue # found for ' + str(cs['ComicID']))
@ -382,7 +382,7 @@ class PostProcessor(object):
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+',' ', nzbname) #make sure we remove the extra spaces.
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[NZBNAME] nzbname (remove extensions, double spaces, convert underscores to spaces): ' + nzbname)
nzbname = re.sub('\s', '.', nzbname)
@ -391,7 +391,7 @@ class PostProcessor(object):
# nzbname=self.nzb_name
self._log("nzbname: " + str(nzbname))
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname,nzbname]).fetchone()
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
self._log("Failure - could not initially locate nzbfile in my database to rename.")
@ -400,12 +400,12 @@ class PostProcessor(object):
nzbname = re.sub('[\(\)]', '', str(nzbname))
self._log("trying again with this nzbname: " + str(nzbname))
logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname))
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname,nzbname]).fetchone()
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname, nzbname]).fetchone()
if nzbiss is None:
logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
else:
self._log("I corrected and found the nzb as : " + str(nzbname))
@ -490,7 +490,7 @@ class PostProcessor(object):
issuearcid = re.sub('S', '', issueid)
logger.fdebug(module + ' issuearcid:' + str(issuearcid))
arcdata = myDB.selectone("SELECT * FROM readinglist WHERE IssueArcID=?",[issuearcid]).fetchone()
arcdata = myDB.selectone("SELECT * FROM readinglist WHERE IssueArcID=?", [issuearcid]).fetchone()
issueid = arcdata['IssueID']
#tag the meta.
@ -582,8 +582,8 @@ class PostProcessor(object):
logger.info(module + ' Post-Processing completed for: [' + sarc + '] ' + grab_dst)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
@ -612,7 +612,7 @@ class PostProcessor(object):
dupthis = helpers.duplicate_filecheck(ml['ComicLocation'], ComicID=comicid, IssueID=issueid)
if dupthis == "write":
stat = ' [' + str(i) + '/' + str(len(manual_list)) + ']'
self.Process_next(comicid,issueid,issuenumOG,ml,stat)
self.Process_next(comicid, issueid, issuenumOG, ml, stat)
dupthis = None
logger.info(module + ' Manual post-processing completed for ' + str(i) + ' issues.')
return
@ -622,17 +622,17 @@ class PostProcessor(object):
#the self.nzb_folder should contain only the existing filename
dupthis = helpers.duplicate_filecheck(self.nzb_folder, ComicID=comicid, IssueID=issueid)
if dupthis == "write":
return self.Process_next(comicid,issueid,issuenumOG)
return self.Process_next(comicid, issueid, issuenumOG)
else:
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop',
"issueid" : issueid,
"comicid" : comicid})
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)
def Process_next(self,comicid,issueid,issuenumOG,ml=None,stat=None):
def Process_next(self, comicid, issueid, issuenumOG, ml=None, stat=None):
if stat is None: stat = ' [1/1]'
module = self.module
annchk = "no"
@ -640,9 +640,9 @@ class PostProcessor(object):
snatchedtorrent = False
myDB = db.DBConnection()
comicnzb = myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid, comicid]).fetchone()
if ml is not None and mylar.SNATCHEDTORRENT_NOTIFY:
snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=?) AND Status='Snatched'", [issueid,comicid,'KAT','32P']).fetchone()
snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=?) AND Status='Snatched'", [issueid, comicid, 'KAT', '32P']).fetchone()
if snatchnzb is None:
logger.fdebug(module + ' Was not downloaded with Mylar and the usage of torrents. Disabling torrent manual post-processing completion notification.')
else:
@ -650,7 +650,7 @@ class PostProcessor(object):
snatchedtorrent = True
if issuenzb is None:
issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone()
issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid, comicid]).fetchone()
annchk = "yes"
if annchk == "no":
logger.info(module + stat + ' Starting Post-Processing for ' + issuenzb['ComicName'] + ' issue: ' + issuenzb['Issue_Number'])
@ -690,7 +690,7 @@ class PostProcessor(object):
if '.' in issuenum:
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
iss_decval = issuenum[iss_find+1:]
iss_decval = issuenum[iss_find +1:]
if iss_decval.endswith('.'): iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
iss = iss_b4dec
@ -778,7 +778,7 @@ class PostProcessor(object):
issueyear = issuenzb['IssueDate'][:4]
self._log("Issue Year: " + str(issueyear))
logger.fdebug(module + ' Issue Year : ' + str(issueyear))
month = issuenzb['IssueDate'][5:7].replace('-','').strip()
month = issuenzb['IssueDate'][5:7].replace('-', '').strip()
month_name = helpers.fullmonth(month)
# comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
publisher = comicnzb['ComicPublisher']
@ -806,7 +806,7 @@ class PostProcessor(object):
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
self._log("No version # found for series - tag will not be available for renaming.")
@ -816,7 +816,7 @@ class PostProcessor(object):
chunk_file_format = mylar.FILE_FORMAT
if annchk == "no":
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug(module + ' Not an annual - removing from filename parameters')
@ -893,7 +893,7 @@ class PostProcessor(object):
'publisher': publisher
})
seriesmetadata['seriesmeta'] = seriesmeta
self._run_pre_scripts(nzbn, nzbf, seriesmetadata )
self._run_pre_scripts(nzbn, nzbf, seriesmetadata)
#rename file and move to new path
#nfilename = series + " " + issueno + " (" + seriesyear + ")"
@ -945,8 +945,8 @@ class PostProcessor(object):
if ofilename is None:
logger.error(module + ' Aborting PostProcessing - the filename does not exist in the location given. Make sure that ' + str(self.nzb_folder) + ' exists and is the correct location.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
self._log("Original Filename: " + ofilename)
self._log("Original Extension: " + ext)
@ -994,8 +994,8 @@ class PostProcessor(object):
logger.fdebug(module + ' nfilename:' + nfilename + ext)
if mylar.RENAME_FILES:
if str(ofilename) != str(nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename) + ' ..to.. ' + os.path.join(odir,nfilename + ext))
os.rename(os.path.join(odir, ofilename), os.path.join(odir,nfilename + ext))
logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename) + ' ..to.. ' + os.path.join(odir, nfilename + ext))
os.rename(os.path.join(odir, ofilename), os.path.join(odir, nfilename + ext))
else:
logger.fdebug(module + ' Filename is identical as original, not renaming.')
@ -1008,8 +1008,8 @@ class PostProcessor(object):
self._log("Post-Processing ABORTED.")
logger.warn(module + ' Failed to move directory : ' + src + ' to ' + dst + ' - check directory and manually re-run')
logger.warn(module + ' Post-Processing ABORTED')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
#tidyup old path
@ -1020,8 +1020,8 @@ class PostProcessor(object):
self._log("Post-Processing ABORTED.")
logger.warn(module + ' Failed to remove temporary directory : ' + self.nzb_folder)
logger.warn(module + ' Post-Processing ABORTED')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
self._log("Removed temporary directory : " + str(self.nzb_folder))
logger.fdebug(module + ' Removed temporary directory : ' + self.nzb_folder)
@ -1032,8 +1032,8 @@ class PostProcessor(object):
src = os.path.join(odir, ofilename)
if mylar.RENAME_FILES:
if str(ofilename) != str(nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, str(ofilename)) + ' ..to.. ' + os.path.join(odir, self.nzb_folder,str(nfilename + ext)))
os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir ,str(nfilename + ext)))
logger.fdebug(module + ' Renaming ' + os.path.join(odir, str(ofilename)) + ' ..to.. ' + os.path.join(odir, self.nzb_folder, str(nfilename + ext)))
os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir, str(nfilename + ext)))
src = os.path.join(odir, str(nfilename + ext))
else:
logger.fdebug(module + ' Filename is identical as original, not renaming.')
@ -1046,8 +1046,8 @@ class PostProcessor(object):
logger.fdebug(module + ' Failed to move directory - check directories and manually re-run.')
logger.fdebug(module + ' Post-Processing ABORTED.')
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop'})
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
logger.fdebug(module + ' Successfully moved to : ' + dst)
@ -1088,11 +1088,11 @@ class PostProcessor(object):
dispiss = issuenumOG
#force rescan of files
updater.forceRescan(comicid,module=module)
updater.forceRescan(comicid, module=module)
if mylar.WEEKFOLDER:
#if enabled, will *copy* the post-processed file to the weeklypull list folder for the given week.
weeklypull.weekly_singlecopy(comicid,issuenum,str(nfilename+ext),dst,module=module,issueid=issueid)
weeklypull.weekly_singlecopy(comicid, issuenum, str(nfilename +ext), dst, module=module, issueid=issueid)
# retrieve/create the corresponding comic objects
if mylar.ENABLE_EXTRA_SCRIPTS:
@ -1113,7 +1113,7 @@ class PostProcessor(object):
'publisher': publisher
})
seriesmetadata['seriesmeta'] = seriesmeta
self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata )
self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata)
if ml is not None:
#we only need to return self.log if it's a manual run and it's not a snatched torrent
@ -1122,12 +1122,12 @@ class PostProcessor(object):
pass
else:
#manual run + not snatched torrent (or normal manual-run)
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss )
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop',
"issueid" : issueid,
"comicid" : comicid})
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)
@ -1144,7 +1144,7 @@ class PostProcessor(object):
if mylar.PROWL_ENABLED:
pushmessage = prline
prowl = notifiers.PROWL()
prowl.notify(pushmessage,"Download and Postprocessing completed", module=module)
prowl.notify(pushmessage, "Download and Postprocessing completed", module=module)
if mylar.NMA_ENABLED:
nma = notifiers.NMA()
@ -1162,13 +1162,13 @@ class PostProcessor(object):
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(prline=prline, prline2=prline2, module=module)
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss )
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log" : self.log,
"mode" : 'stop',
"issueid" : issueid,
"comicid" : comicid})
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
return self.queue.put(self.valreturn)

View File

@ -808,7 +808,7 @@ def initialize():
#print('creating provider sequence order now...')
TMPPR_NUM = 0
PROV_ORDER = []
while TMPPR_NUM < PR_NUM :
while TMPPR_NUM < PR_NUM:
PROV_ORDER.append({"order_seq": TMPPR_NUM,
"provider": str(PR[TMPPR_NUM])})
TMPPR_NUM +=1
@ -831,7 +831,7 @@ def initialize():
TMPPR_NUM = 0
while (TMPPR_NUM < PR_NUM):
#print 'checking entry #' + str(TMPPR_NUM) + ': ' + str(PR[TMPPR_NUM])
if not any(d.get("provider",None) == str(PR[TMPPR_NUM]) for d in PROV_ORDER):
if not any(d.get("provider", None) == str(PR[TMPPR_NUM]) for d in PROV_ORDER):
new_order_seqnum = len(PROV_ORDER)
#print 'new provider should be : ' + str(new_order_seqnum) + ' -- ' + str(PR[TMPPR_NUM])
PROV_ORDER.append({"order_seq": str(new_order_seqnum),
@ -849,7 +849,7 @@ def initialize():
flatt_providers = []
for pro in PROV_ORDER:
try:
provider_seq = re.sub('cbt','32p', pro['provider'])
provider_seq = re.sub('cbt', '32p', pro['provider'])
flatt_providers.extend([pro['order_seq'], provider_seq])
except TypeError:
#if the value is None (no Name specified for Newznab entry), break out now
@ -861,8 +861,8 @@ def initialize():
# update folder formats in the config & bump up config version
if CONFIG_VERSION == '0':
from mylar.helpers import replace_all
file_values = { 'issue': 'Issue', 'title': 'Title', 'series' : 'Series', 'year' : 'Year' }
folder_values = { 'series' : 'Series', 'publisher':'Publisher', 'year' : 'Year', 'first' : 'First', 'lowerfirst' : 'first' }
file_values = {'issue': 'Issue', 'title': 'Title', 'series': 'Series', 'year': 'Year'}
folder_values = {'series': 'Series', 'publisher': 'Publisher', 'year': 'Year', 'first': 'First', 'lowerfirst': 'first'}
FILE_FORMAT = replace_all(FILE_FORMAT, file_values)
FOLDER_FORMAT = replace_all(FOLDER_FORMAT, folder_values)
@ -872,7 +872,7 @@ def initialize():
from mylar.helpers import replace_all
file_values = { 'Issue': '$Issue',
file_values = {'Issue': '$Issue',
'Title': '$Title',
'Series': '$Series',
'Year': '$Year',
@ -880,7 +880,7 @@ def initialize():
'series': '$series',
'year': '$year'
}
folder_values = { 'Series': '$Series',
folder_values = {'Series': '$Series',
'Publisher': '$Publisher',
'Year': '$Year',
'First': '$First',
@ -965,7 +965,7 @@ def initialize():
else:
vers = 'D'
USER_AGENT = 'Mylar/'+str(hash)+'('+vers+') +http://www.github.com/evilhero/mylar/'
USER_AGENT = 'Mylar/' +str(hash) +'(' +vers +') +http://www.github.com/evilhero/mylar/'
# Check for new versions
if CHECK_GITHUB_ON_STARTUP:
@ -1878,12 +1878,12 @@ def csv_load():
if not os.path.exists(EXCEPTIONS_FILE):
try:
csvfile = open(str(EXCEPTIONS_FILE), "rb")
except (OSError,IOError):
except (OSError, IOError):
if i == 1:
logger.info('No Custom Exceptions found - Using base exceptions only. Creating blank custom_exceptions for your personal use.')
try:
shutil.copy(os.path.join(DATA_DIR,"custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError,IOError):
shutil.copy(os.path.join(DATA_DIR, "custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError, IOError):
logger.error('Cannot create custom_exceptions.csv in ' + str(DATA_DIR) + '. Make sure _sample.csv is present and/or check permissions.')
return
else:

View File

@ -15,6 +15,7 @@
from mylar import db
def getCachedArt(albumid):
from mylar import cache

View File

@ -46,8 +46,7 @@ class Api(object):
self.data = None
self.callback = None
def checkParams(self,*args,**kwargs):
def checkParams(self, *args, **kwargs):
if not mylar.API_ENABLED:
self.data = 'API not enabled'

View File

@ -5,6 +5,7 @@ from bs4 import BeautifulSoup
import mylar
from mylar import logger
class info32p(object):
def __init__(self, reauthenticate=False, searchterm=None):
@ -50,8 +51,8 @@ class info32p(object):
logger.info('[32P] Successfully authenticated. Initiating search for : ' + self.searchterm)
return self.search32p(s)
soup = BeautifulSoup(r.content)
all_script = soup.find_all("script", {"src":False})
all_script2 = soup.find_all("link", {"rel":"alternate"})
all_script = soup.find_all("script", {"src": False})
all_script2 = soup.find_all("link", {"rel": "alternate"})
for ind_s in all_script:
all_value = str(ind_s)
@ -62,13 +63,13 @@ class info32p(object):
if al == 'authkey':
auth_found = True
elif auth_found == True and al != '=':
authkey = re.sub('["/;]','', al).strip()
authkey = re.sub('["/;]', '', al).strip()
auth_found = False
logger.fdebug(self.module + ' Authkey found: ' + str(authkey))
if al == 'userid':
user_found = True
elif user_found == True and al != '=':
userid = re.sub('["/;]','', al).strip()
userid = re.sub('["/;]', '', al).strip()
user_found = False
logger.fdebug(self.module + ' Userid found: ' + str(userid))
@ -79,23 +80,23 @@ class info32p(object):
alurl = al['href']
if 'auth=' in alurl and 'torrents_notify' in alurl and not authfound:
f1 = alurl.find('auth=')
f2 = alurl.find('&',f1+1)
auth = alurl[f1+5:f2]
f2 = alurl.find('&', f1 + 1)
auth = alurl[f1 +5:f2]
logger.fdebug(self.module + ' Auth:' + str(auth))
authfound = True
p1 = alurl.find('passkey=')
p2 = alurl.find('&',p1+1)
passkey = alurl[p1+8:p2]
p2 = alurl.find('&', p1 + 1)
passkey = alurl[p1 +8:p2]
logger.fdebug(self.module + ' Passkey:' + str(passkey))
if self.reauthenticate: break
if 'torrents_notify' in alurl and ('torrents_notify_' + str(passkey)) not in alurl:
notifyname_st = alurl.find('name=')
notifyname_en = alurl.find('&',notifyname_st+1)
notifyname_en = alurl.find('&', notifyname_st +1)
if notifyname_en == -1: notifyname_en = len(alurl)
notifyname = alurl[notifyname_st+5:notifyname_en]
notifyname = alurl[notifyname_st +5:notifyname_en]
notifynumber_st = alurl.find('torrents_notify_')
notifynumber_en = alurl.find('_', notifynumber_st+17)
notifynumber_en = alurl.find('_', notifynumber_st +17)
notifynumber = alurl[notifynumber_st:notifynumber_en]
logger.fdebug(self.module + ' [NOTIFICATION: ' + str(notifyname) + '] Notification ID: ' + str(notifynumber))

View File

@ -21,6 +21,7 @@ import lib.simplejson as simplejson
import mylar
from mylar import db, helpers, logger
class Cache(object):
"""
This class deals with getting, storing and serving up artwork (album
@ -81,7 +82,7 @@ class Cache(object):
def _get_age(self, date):
# There's probably a better way to do this
split_date = date.split('-')
days_old = int(split_date[0])*365 + int(split_date[1])*30 + int(split_date[2])
days_old = int(split_date[0]) *365 + int(split_date[1]) *30 + int(split_date[2])
return days_old
@ -155,11 +156,10 @@ class Cache(object):
self.artwork_url = image_url
def getArtwork(ComicID=None, imageURL=None):
c = Cache()
artwork_path = c.get_artwork_from_cache(ComicID,imageURL)
artwork_path = c.get_artwork_from_cache(ComicID, imageURL)
logger.info('artwork path at : ' + str(artwork_path))
if not artwork_path:
return None
@ -169,4 +169,3 @@ def getArtwork(ComicID=None, imageURL=None):
else:
artwork_file = os.path.basename(artwork_path)
return "cache/artwork/" + artwork_file

View File

@ -18,7 +18,8 @@ import mylar
from mylar import logger
from mylar.helpers import cvapi_check
def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, filename=None, module=None):
def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filename=None, module=None):
if module is None:
module = ''
module += '[META-TAGGER]'
@ -28,7 +29,6 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
## Set the directory in which comictagger and other external commands are located - IMPORTANT - ##
# ( User may have to modify, depending on their setup, but these are some guesses for now )
if platform.system() == "Windows":
#if it's a source install.
sys_type = 'windows'
@ -57,7 +57,6 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd)
elif platform.system() == "Darwin":
#Mac OS X
sys_type = 'mac'
@ -100,7 +99,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
file_conversion = True
file_extension_fixing = True
if not os.path.exists( unrar_cmd ):
if not os.path.exists(unrar_cmd):
logger.fdebug(module + ' WARNING: cannot find the unrar command.')
logger.fdebug(module + ' File conversion and extension fixing not available')
logger.fdebug(module + ' You probably need to edit this script, or install the missing tool, or both!')
@ -108,13 +107,12 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
#file_conversion = False
#file_extension_fixing = False
## Sets up other directories ##
scriptname = os.path.basename( sys.argv[0] )
downloadpath = os.path.abspath( dirName )
sabnzbdscriptpath = os.path.dirname( sys.argv[0] )
scriptname = os.path.basename(sys.argv[0])
downloadpath = os.path.abspath(dirName)
sabnzbdscriptpath = os.path.dirname(sys.argv[0])
if manual is None:
comicpath = os.path.join( downloadpath , "temp" )
comicpath = os.path.join(downloadpath, "temp")
else:
chkpath, chkfile = os.path.split(filename)
logger.fdebug(module + ' chkpath: ' + chkpath)
@ -123,8 +121,8 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
if os.path.isdir(chkpath) and chkpath != downloadpath:
logger.fdebug(module + ' Changing ' + downloadpath + ' location to ' + chkpath + ' as it is a directory.')
downloadpath = chkpath
comicpath = os.path.join( downloadpath, issueid )
unrar_folder = os.path.join( comicpath , "unrard" )
comicpath = os.path.join(downloadpath, issueid)
unrar_folder = os.path.join(comicpath, "unrard")
logger.fdebug(module + ' Paths / Locations:')
logger.fdebug(module + ' scriptname : ' + scriptname)
@ -134,8 +132,8 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' unrar_folder : ' + unrar_folder)
logger.fdebug(module + ' Running the ComicTagger Add-on for Mylar')
if os.path.exists( comicpath ):
shutil.rmtree( comicpath )
if os.path.exists(comicpath):
shutil.rmtree(comicpath)
logger.fdebug(module + ' Attempting to create directory @: ' + str(comicpath))
try:
@ -147,8 +145,8 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' Filename is : ' + str(filename))
if filename is None:
filename_list = glob.glob( os.path.join( downloadpath, "*.cbz" ) )
filename_list.extend( glob.glob( os.path.join( downloadpath, "*.cbr" ) ) )
filename_list = glob.glob(os.path.join(downloadpath, "*.cbz"))
filename_list.extend(glob.glob(os.path.join(downloadpath, "*.cbr")))
fcount = 1
for f in filename_list:
if fcount > 1:
@ -158,16 +156,16 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' .cbz file detected. Excluding from temporary directory move at this time.')
comicpath = downloadpath
else:
shutil.move( f, comicpath )
filename = f #just the filename itself
fcount+=1
shutil.move(f, comicpath)
filename = f # just the filename itself
fcount += 1
else:
# if the filename is identical to the parent folder, the entire subfolder gets copied since it's the first match, instead of just the file
#if os.path.isfile(filename):
#if the filename doesn't exist - force the path assuming it's the 'download path'
filename = os.path.join(downloadpath, filename)
logger.fdebug(module + ' The path where the file is that I was provided is probably wrong - modifying it to : ' + filename)
shutil.move( filename, os.path.join(comicpath, os.path.split(filename)[1]) )
shutil.move(filename, os.path.join(comicpath, os.path.split(filename)[1]))
logger.fdebug(module + ' moving : ' + filename + ' to ' + os.path.join(comicpath, os.path.split(filename)[1]))
try:
@ -176,33 +174,33 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.warn('Unable to detect filename within directory - I am aborting the tagging. You best check things out.')
return "fail"
#print comicpath
#print os.path.join( comicpath, filename )
#print os.path.join(comicpath, filename)
if filename.endswith('.cbr'):
f = os.path.join( comicpath, filename )
if zipfile.is_zipfile( f ):
f = os.path.join(comicpath, filename)
if zipfile.is_zipfile(f):
logger.fdebug(module + ' zipfile detected')
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
logger.fdebug(module + ' {0}: renaming {1} to be a cbz'.format( scriptname, os.path.basename( f ) ))
base = os.path.splitext(f)[0]
shutil.move(f, base + ".cbz")
logger.fdebug(module + ' {0}: renaming {1} to be a cbz'.format(scriptname, os.path.basename(f)))
filename = base + '.cbz'
if file_extension_fixing:
if filename.endswith('.cbz'):
logger.info(module + ' Filename detected as a .cbz file.')
f = os.path.join( comicpath, filename )
f = os.path.join(comicpath, filename)
logger.fdebug(module + ' filename : ' + f)
if os.path.isfile( f ):
if os.path.isfile(f):
try:
rar_test_cmd_output = "is not RAR archive" #default, in case of error
rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] )
rar_test_cmd_output = "is not RAR archive" # default, in case of error
rar_test_cmd_output = subprocess.check_output([unrar_cmd, "t", f])
except:
logger.fdebug(module + ' This is a zipfile. Unable to test rar.')
if not "is not RAR archive" in rar_test_cmd_output:
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbr" )
logger.fdebug(module + ' {0}: renaming {1} to be a cbr'.format( scriptname, os.path.basename( f ) ))
base = os.path.splitext(f)[0]
shutil.move(f, base + ".cbr")
logger.fdebug(module + ' {0}: renaming {1} to be a cbr'.format(scriptname, os.path.basename(f)))
else:
try:
with open(f): pass
@ -210,7 +208,6 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.warn(module + ' No zip file present')
return "fail"
#if the temp directory is the LAST directory in the path, it's part of the CT logic path above
#and can be removed to allow a copy back to the original path to work.
if 'temp' in os.path.basename(os.path.normpath(comicpath)):
@ -220,7 +217,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
base = os.path.join(re.sub(issueid, '', comicpath), filename) #extension is already .cbz
logger.fdebug(module + ' Base set to : ' + base)
logger.fdebug(module + ' Moving : ' + f + ' - to - ' + base)
shutil.move( f, base)
shutil.move(f, base)
try:
with open(base):
logger.fdebug(module + ' Verified file exists in location: ' + base)
@ -231,7 +228,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
if removetemp == True:
if comicpath != downloadpath:
shutil.rmtree( comicpath )
shutil.rmtree(comicpath)
logger.fdebug(module + ' Successfully removed temporary directory: ' + comicpath)
else:
logger.fdebug(module + ' Unable to remove temporary directory since it is identical to the download location : ' + comicpath)
@ -241,28 +238,28 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
# Now rename all CBR files to RAR
if filename.endswith('.cbr'):
#logger.fdebug('renaming .cbr to .rar')
f = os.path.join( comicpath, filename)
base = os.path.splitext( f )[0]
f = os.path.join(comicpath, filename)
base = os.path.splitext(f)[0]
baserar = base + ".rar"
shutil.move( f, baserar )
shutil.move(f, baserar)
## Changes any cbr files to cbz files for insertion of metadata ##
if file_conversion:
f = os.path.join( comicpath, filename )
logger.fdebug(module + ' {0}: converting {1} to be zip format'.format( scriptname, os.path.basename( f ) ))
basename = os.path.splitext( f )[0]
f = os.path.join(comicpath, filename)
logger.fdebug(module + ' {0}: converting {1} to be zip format'.format(scriptname, os.path.basename(f)))
basename = os.path.splitext(f)[0]
zipname = basename + ".cbz"
# Move into the folder where we will be unrar-ing things
os.makedirs( unrar_folder )
os.chdir( unrar_folder )
os.makedirs(unrar_folder)
os.chdir(unrar_folder)
# Extract and zip up
logger.fdebug(module + ' {0}: Comicpath is ' + baserar) #os.path.join(comicpath,basename))
logger.fdebug(module + ' {0}: Unrar is ' + unrar_folder )
logger.fdebug(module + ' {0}: Comicpath is ' + baserar) # os.path.join(comicpath,basename))
logger.fdebug(module + ' {0}: Unrar is ' + unrar_folder)
try:
#subprocess.Popen( [ unrar_cmd, "x", os.path.join(comicpath,basename) ] ).communicate()
output = subprocess.check_output( [ unrar_cmd, 'x', baserar ] )
output = subprocess.check_output([unrar_cmd, 'x', baserar])
except CalledProcessError as e:
if e.returncode == 3:
logger.warn(module + ' [Unrar Error 3] - Broken Archive.')
@ -271,23 +268,23 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.warn(module + ' Marking this as an incomplete download.')
return "unrar error"
shutil.make_archive( basename, "zip", unrar_folder )
shutil.make_archive(basename, "zip", unrar_folder)
# get out of unrar folder and clean up
os.chdir( comicpath )
shutil.rmtree( unrar_folder )
os.chdir(comicpath)
shutil.rmtree(unrar_folder)
## Changes zip to cbz
f = os.path.join( comicpath, os.path.splitext(filename)[0] + ".zip" )
f = os.path.join(comicpath, os.path.splitext(filename)[0] + ".zip")
#print "zipfile" + f
try:
with open(f): pass
except:
logger.warn(module + ' No zip file present:' + f)
return "fail"
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
base = os.path.splitext(f)[0]
shutil.move(f, base + ".cbz")
nfilename = base + ".cbz"
#else:
# logger.fdebug(module + ' Filename:' + filename)
@ -313,7 +310,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' Converted directory: ' + str(file_dir))
logger.fdebug(module + ' Converted filename: ' + str(file_n))
logger.fdebug(module + ' Destination path: ' + os.path.join(file_dir,file_n)) #dirName,file_n))
logger.fdebug(module + ' Destination path: ' + os.path.join(file_dir, file_n)) #dirName,file_n))
logger.fdebug(module + ' dirName: ' + dirName)
logger.fdebug(module + ' absDirName: ' + os.path.abspath(dirName))
@ -322,25 +319,26 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
comversion = '1'
comversion = re.sub('[^0-9]', '', comversion).strip()
cvers = 'volume=' + str(comversion)
tagoptions = [ "-s", "--verbose", "-m", cvers ]
tagoptions = ["-s", "--verbose", "-m", cvers]
## check comictagger version - less than 1.15.beta - take your chances.
if sys_type == 'windows':
ctversion = subprocess.check_output( [ comictagger_cmd, "--version" ] )
ctversion = subprocess.check_output([comictagger_cmd, "--version"])
else:
ctversion = subprocess.check_output( [ sys.executable, comictagger_cmd, "--version" ] )
ctversion = subprocess.check_output([sys.executable, comictagger_cmd, "--version"])
ctend = ctversion.find(':')
ctcheck = re.sub("[^0-9]", "", ctversion[:ctend])
ctcheck = re.sub('\.', '', ctcheck).strip()
if int(ctcheck) >= int('1115'): #(v1.1.15)
if int(ctcheck) >= int('1115'): # (v1.1.15)
if mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - no personal ComicVine API Key supplied. Take your chances.')
use_cvapi = "False"
else:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - using personal ComicVine API key supplied via mylar.')
use_cvapi = "True"
tagoptions.extend( [ "--cv-api-key", mylar.COMICVINE_API ] )
tagoptions.extend(["--cv-api-key", mylar.COMICVINE_API])
else:
logger.fdebug(module + ' ' + ctversion[:ctend] + ' being used - personal ComicVine API key not supported in this version. Good luck.')
use_cvapi = "False"
@ -367,25 +365,24 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
logger.fdebug(module + ' Will modify existing tag blocks even if it exists.')
else:
logger.fdebug(module + ' Will NOT modify existing tag blocks even if they exist already.')
tagoptions.extend( [ "--nooverwrite" ] )
tagoptions.extend(["--nooverwrite"])
if issueid is None:
tagoptions.extend( [ "-f", "-o" ] )
tagoptions.extend(["-f", "-o"])
else:
tagoptions.extend( [ "-o", "--id", issueid ] )
tagoptions.extend(["-o", "--id", issueid])
original_tagoptions = tagoptions
og_tagtype = None
while ( i <= tagcnt ):
while (i <= tagcnt):
if i == 1:
tagtype = 'cr' # CR meta-tagging cycle.
tagdisp = 'ComicRack tagging'
elif i == 2:
tagtype = 'cbl' #Cbl meta-tagging cycle
tagtype = 'cbl' # Cbl meta-tagging cycle
tagdisp = 'Comicbooklover tagging'
f_tagoptions = original_tagoptions
if og_tagtype is not None:
@ -393,7 +390,7 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
if item == og_tagtype:
f_tagoptions[index] = tagtype
else:
f_tagoptions.extend( [ "--type", tagtype, nfilename ] )
f_tagoptions.extend(["--type", tagtype, nfilename])
og_tagtype = tagtype
@ -411,19 +408,18 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
script_cmd = shlex.split(currentScriptName, posix=False) + f_tagoptions
# use subprocess to run the command and capture output
logger.fdebug(module + ' Executing command: '+str(script_cmd))
logger.fdebug(module + ' Absolute path to script: '+script_cmd[0])
logger.fdebug(module + ' Executing command: ' +str(script_cmd))
logger.fdebug(module + ' Absolute path to script: ' +script_cmd[0])
try:
p = subprocess.Popen(script_cmd)
out, err = p.communicate() #@UnusedVariable
logger.fdebug(module + '[COMIC-TAGGER] : '+str(out))
out, err = p.communicate() # @UnusedVariable
logger.fdebug(module + '[COMIC-TAGGER] : ' +str(out))
logger.info(module + '[COMIC-TAGGER] Successfully wrote ' + tagdisp)
except OSError, e:
logger.warn(module + '[COMIC-TAGGER] Unable to run comictagger with the options provided: ' + str(script_cmd))
#increment CV API counter.
mylar.CVAPI_COUNT +=1
mylar.CVAPI_COUNT += 1
## Tag each CBZ, and move it back to original directory ##
@ -444,33 +440,32 @@ def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, file
# mylar.CVAPI_COUNT +=1
i+=1
if os.path.exists(os.path.join(os.path.abspath(file_dir),file_n)): #(os.path.abspath(dirName),file_n)):
logger.fdebug(module + ' Unable to move from temporary directory - file already exists in destination: ' + os.path.join(os.path.abspath(file_dir),file_n))
if os.path.exists(os.path.join(os.path.abspath(file_dir), file_n)): # (os.path.abspath(dirName),file_n)):
logger.fdebug(module + ' Unable to move from temporary directory - file already exists in destination: ' + os.path.join(os.path.abspath(file_dir), file_n))
else:
try:
shutil.move( os.path.join(comicpath, nfilename), os.path.join(os.path.abspath(file_dir),file_n)) #os.path.abspath(dirName),file_n))
shutil.move(os.path.join(comicpath, nfilename), os.path.join(os.path.abspath(file_dir), file_n)) #os.path.abspath(dirName),file_n))
#shutil.move( nfilename, os.path.join(os.path.abspath(dirName),file_n))
logger.fdebug(module + ' Sucessfully moved file from temporary path.')
except:
logger.error(module + ' Unable to move file from temporary path [' + os.path.join(comicpath, nfilename) + ']. Deletion of temporary path halted.')
logger.error(module + ' attempt to move: ' + os.path.join(comicpath, nfilename) + ' to ' + os.path.join(os.path.abspath(file_dir), file_n))
return os.path.join(os.path.abspath(file_dir), file_n) #os.path.join(comicpath, nfilename)
return os.path.join(os.path.abspath(file_dir), file_n) # os.path.join(comicpath, nfilename)
i = 0
os.chdir( mylar.PROG_DIR )
os.chdir(mylar.PROG_DIR)
while i < 10:
try:
logger.fdebug(module + ' Attempting to remove: ' + comicpath)
shutil.rmtree( comicpath )
shutil.rmtree(comicpath)
except:
time.sleep(.1)
else:
return os.path.join(os.path.abspath(file_dir), file_n) #dirName), file_n)
i+=1
return os.path.join(os.path.abspath(file_dir), file_n) # dirName), file_n)
i += 1
logger.fdebug(module + ' Failed to remove temporary path : ' + str(comicpath))
return os.path.join(os.path.abspath(file_dir),file_n) #dirName),file_n)
return os.path.join(os.path.abspath(file_dir), file_n) # dirName),file_n)

View File

@ -10,17 +10,18 @@ from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
def cbdb(comicnm, ComicYear):
#comicnm = 'Animal Man'
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
comicnm = re.sub(' ', '+', comicnm)
input = "http://mobile.comicbookdb.com/search.php?form_search=" + str(comicnm) + "&form_searchtype=Title&x=0&y=0"
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
response = urllib2.urlopen(input)
soup = BeautifulSoup(response)
abc = soup.findAll('a', href=True)
lenabc = len(abc)
i=0
i = 0
resultName = []
resultID = []
resultYear = []
@ -29,7 +30,7 @@ def cbdb(comicnm, ComicYear):
matched = "no"
while (i < lenabc):
titlet = abc[i] #iterate through the href's, pulling out only results.
titlet = abc[i] # iterate through the href's, pulling out only results.
print ("titlet: " + str(titlet))
if "title.php" in str(titlet):
print ("found title")
@ -38,10 +39,10 @@ def cbdb(comicnm, ComicYear):
resultName = tempName[:tempName.find("(")]
print ("ComicName: " + resultName)
resultYear = tempName[tempName.find("(")+1:tempName.find(")")]
resultYear = tempName[tempName.find("(") +1:tempName.find(")")]
if resultYear.isdigit(): pass
else:
i+=1
i += 1
continue
print "ComicYear: " + resultYear
@ -50,11 +51,10 @@ def cbdb(comicnm, ComicYear):
print "CBDB URL: " + resultURL
IDst = ID_som.find('?ID=')
resultID = ID_som[(IDst+4):]
resultID = ID_som[(IDst +4):]
print "CBDB ID: " + resultID
print ("resultname: " + resultName)
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
@ -67,7 +67,7 @@ def cbdb(comicnm, ComicYear):
print ("i:" + str(i) + "...matched by name to Mylar!")
print ("ComicYear: " + str(ComicYear) + ".. to ResultYear: " + str(resultYear))
if resultYear.isdigit():
if int(resultYear) == int(ComicYear) or int(resultYear) == int(ComicYear)+1:
if int(resultYear) == int(ComicYear) or int(resultYear) == int(ComicYear) +1:
resultID = str(resultID)
print ("Matchumundo!")
matched = "yes"
@ -75,7 +75,7 @@ def cbdb(comicnm, ComicYear):
continue
if matched == "yes":
break
i+=1
i += 1
return IssueDetails(resultID)
@ -92,7 +92,7 @@ def IssueDetails(cbdb_id):
total = len(resultp) # -- number of tables
#get details here
startit = resultp[0].find("table", {"width" : "884" })
startit = resultp[0].find("table", {"width": "884"})
i = 0
pubchk = 0
@ -111,7 +111,7 @@ def IssueDetails(cbdb_id):
noi = boop[i].nextSibling
print ("number of issues: " + noi)
i+=1
i += 1
if i > len(boop): break
@ -124,16 +124,16 @@ def IssueDetails(cbdb_id):
ti = 1 # start at one as 0 is the ENTIRE soup structure
while (ti < total):
#print result
if resultp[ti].find("a", {"class" : "page_link" }):
if resultp[ti].find("a", {"class": "page_link"}):
#print "matcheroso"
tableno = resultp[ti].findAll('tr') #7th table, all the tr's
tableno = resultp[ti].findAll('tr') # 7th table, all the tr's
#print ti, total
break
ti+=1
ti += 1
noresults = len(tableno)
#print ("tableno: " + str(tableno))
print ("there are " + str(noresults) + " issues total (cover variations, et all).")
i=1 # start at 1 so we don't grab the table headers ;)
i = 1 # start at 1 so we don't grab the table headers ;)
issue = []
storyarc = []
pubdate = []
@ -143,24 +143,24 @@ def IssueDetails(cbdb_id):
while (i < noresults):
resultit = tableno[i] # 7th table, 1st set of tr (which indicates an issue).
print ("resultit: " + str(resultit))
issuet = resultit.find("a", {"class" : "page_link" }) # gets the issue # portion
issuet = resultit.find("a", {"class": "page_link"}) # gets the issue # portion
try:
issue = issuet.findNext(text=True)
except:
print ("blank space - skipping")
i+=1
i += 1
continue
if 'annual' not in issue.lower():
i+=1
i += 1
continue
lent = resultit('a',href=True) #gathers all the a href's within this particular tr
lent = resultit('a', href=True) #gathers all the a href's within this particular tr
#print ("lent: " + str(lent))
lengtht = len(lent) #returns the # of ahref's within this particular tr
lengtht = len(lent) # returns the # of ahref's within this particular tr
#print ("lengtht: " + str(lengtht))
#since we don't know which one contains the story arc, we need to iterate through to find it
#we need to know story arc, because the following td is the Publication Date
n=0
n = 0
issuetitle = 'None'
while (n < lengtht):
storyt = lent[n] #
@ -173,21 +173,21 @@ def IssueDetails(cbdb_id):
storyarc = storyt.findNext(text=True)
#print ("Story Arc: " + str(storyarc))
break
n+=1
n += 1
pubd = resultit('td') # find all the <td>'s within this tr
publen = len(pubd) # find the # of <td>'s
pubs = pubd[publen-1] #take the last <td> which will always contain the publication date
pdaters = pubs.findNext(text=True) #get the actual date :)
basmonths = {'january':'01','february':'02','march':'03','april':'04','may':'05','june':'06','july':'07','august':'09','september':'10','october':'11','december':'12','annual':''}
pubs = pubd[publen -1] # take the last <td> which will always contain the publication date
pdaters = pubs.findNext(text=True) # get the actual date :)
basmonths = {'january': '01', 'february': '02', 'march': '03', 'april': '04', 'may': '05', 'june': '06', 'july': '07', 'august': '09', 'september': '10', 'october': '11', 'december': '12', 'annual': ''}
for numbs in basmonths:
if numbs in pdaters.lower():
pconv = basmonths[numbs]
ParseYear = re.sub('/s','',pdaters[-5:])
ParseYear = re.sub('/s', '', pdaters[-5:])
if basmonths[numbs] == '':
pubdate = str(ParseYear)
else:
pubdate= str(ParseYear) + "-" + str(pconv)
#logger.fdebug("!success - Publication date: " + str(ParseDate))
pubdate = str(ParseYear) + "-" + str(pconv)
# logger.fdebug("!success - Publication date: " + str(ParseDate))
#pubdate = re.sub("[^0-9]", "", pdaters)
issuetmp = re.sub("[^0-9]", '', issue)
@ -200,9 +200,9 @@ def IssueDetails(cbdb_id):
'AnnualDate': pubdate.strip(),
'AnnualYear': ParseYear.strip()
})
gcount+=1
gcount += 1
print("annualslist appended...")
i+=1
i += 1
annuals['annualslist'] = annualslist

View File

@ -27,6 +27,7 @@ from mylar.helpers import cvapi_check
from bs4 import BeautifulSoup as Soup
import httplib
def patch_http_response_read(func):
def inner(*args):
try:
@ -41,7 +42,8 @@ if platform.python_version() == '2.7.6':
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None):
def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist=None):
#import easy to use xml parser called minidom:
from xml.dom.minidom import parseString
@ -53,7 +55,7 @@ def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None
if type == 'comic':
if not comicid.startswith('4050-'): comicid = '4050-' + comicid
PULLURL= mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
PULLURL = mylar.CVURL + 'volume/' + str(comicid) + '/?api_key=' + str(comicapi) + '&format=xml&field_list=name,count_of_issues,issues,start_year,site_detail_url,image,publisher,description,first_issue,deck,aliases'
elif type == 'issue':
if mylar.CV_ONLY:
cv_type = 'issues'
@ -80,7 +82,7 @@ def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None
#download the file:
file = urllib2.urlopen(PULLURL)
#increment CV API counter.
mylar.CVAPI_COUNT +=1
mylar.CVAPI_COUNT += 1
#convert to string:
data = file.read()
#close file because we dont need it anymore:
@ -91,7 +93,7 @@ def pulldetails(comicid,type,issueid=None,offset=1,arclist=None,comicidlist=None
return dom
def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidlist=None):
def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, comicidlist=None):
if type == 'issue':
offset = 1
issue = {}
@ -107,7 +109,7 @@ def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidl
else:
id = comicid
islist = None
searched = pulldetails(id,'issue',None,0,islist)
searched = pulldetails(id, 'issue', None, 0, islist)
if searched is None: return False
totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
logger.fdebug("there are " + str(totalResults) + " search results...")
@ -119,8 +121,8 @@ def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidl
if countResults > 0:
#new api - have to change to page # instead of offset count
offsetcount = countResults
searched = pulldetails(id,'issue',None,offsetcount,islist)
issuechoice,tmpdate = GetIssuesInfo(id,searched,arcid)
searched = pulldetails(id, 'issue', None, offsetcount, islist)
issuechoice, tmpdate = GetIssuesInfo(id, searched, arcid)
if tmpdate < firstdate:
firstdate = tmpdate
ndic = ndic + issuechoice
@ -133,22 +135,22 @@ def getComic(comicid,type,issueid=None,arc=None,arcid=None,arclist=None,comicidl
return issue
elif type == 'comic':
dom = pulldetails(comicid,'comic',None,1)
return GetComicInfo(comicid,dom)
dom = pulldetails(comicid, 'comic', None, 1)
return GetComicInfo(comicid, dom)
elif type == 'firstissue':
dom = pulldetails(comicid,'firstissue',issueid,1)
return GetFirstIssue(issueid,dom)
dom = pulldetails(comicid, 'firstissue', issueid, 1)
return GetFirstIssue(issueid, dom)
elif type == 'storyarc':
dom = pulldetails(arc,'storyarc',None,1)
return GetComicInfo(issueid,dom)
dom = pulldetails(arc, 'storyarc', None, 1)
return GetComicInfo(issueid, dom)
elif type == 'comicyears':
#used by the story arc searcher when adding a given arc to poll each ComicID in order to populate the Series Year.
#this grabs each issue based on issueid, and then subsets the comicid for each to be used later.
#set the offset to 0, since we're doing a filter.
dom = pulldetails(arcid,'comicyears',offset=0,comicidlist=comicidlist)
dom = pulldetails(arcid, 'comicyears', offset=0, comicidlist=comicidlist)
return GetSeriesYears(dom)
def GetComicInfo(comicid,dom,safechk=None):
def GetComicInfo(comicid, dom, safechk=None):
if safechk is None:
#safetycheck when checking comicvine. If it times out, increment the chk on retry attempts up until 5 tries then abort.
safechk = 1
@ -182,9 +184,9 @@ def GetComicInfo(comicid,dom,safechk=None):
# where [0] denotes the number of the name field(s)
# where nodeName denotes the parentNode : ComicName = results, publisher = publisher, issues = issue
try:
names = len( dom.getElementsByTagName('name') )
names = len(dom.getElementsByTagName('name'))
n = 0
while ( n < names ):
while (n < names):
if dom.getElementsByTagName('name')[n].parentNode.nodeName == 'results':
try:
comic['ComicName'] = dom.getElementsByTagName('name')[n].firstChild.wholeText
@ -199,7 +201,7 @@ def GetComicInfo(comicid,dom,safechk=None):
except:
comic['ComicPublisher'] = "Unknown"
n+=1
n += 1
except:
logger.warn('Something went wrong retrieving from ComicVine. Ensure your API is up-to-date and that comicvine is accessible')
return
@ -269,12 +271,12 @@ def GetComicInfo(comicid,dom,safechk=None):
#increased to 10 to allow for text numbering (+5 max)
#sometimes it's volume 5 and ocassionally it's fifth volume.
if i == 0:
vfind = comicDes[v_find:v_find+15] #if it's volume 5 format
basenums = {'zero':'0','one':'1','two':'2','three':'3','four':'4','five':'5','six':'6','seven':'7','eight':'8','nine':'9','ten':'10','i':'1','ii':'2','iii':'3','iv':'4','v':'5'}
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)
else:
vfind = comicDes[:v_find] # if it's fifth volume format
basenums = {'zero':'0','first':'1','second':'2','third':'3','fourth':'4','fifth':'5','sixth':'6','seventh':'7','eighth':'8','nineth':'9','tenth':'10','i':'1','ii':'2','iii':'3','iv':'4','v':'5'}
basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('X volume format - ' + str(i) + ': ' + vfind)
volconv = ''
for nums in basenums:
@ -288,10 +290,10 @@ def GetComicInfo(comicid,dom,safechk=None):
if i == 0:
volthis = vfind.lower().find('volume')
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
vfind = vfind[volthis:volthis+4] #grab the next 4 characters ;)
vfind = vfind[volthis:volthis + 4] # grab the next 4 characters ;)
elif i == 1:
volthis = vfind.lower().find('volume')
vfind = vfind[volthis-4:volthis] #grab the next 4 characters ;)
vfind = vfind[volthis - 4:volthis] # grab the next 4 characters ;)
if '(' in vfind:
#bracket detected in versioning'
@ -303,13 +305,13 @@ def GetComicInfo(comicid,dom,safechk=None):
comic['ComicVersion'] = ledigit
logger.fdebug("Volume information found! Adding to series record : volume " + comic['ComicVersion'])
break
i+=1
i += 1
else:
i+=1
i += 1
if comic['ComicVersion'] == 'noversion':
logger.fdebug('comic[ComicVersion]:' + str(comic['ComicVersion']))
desdeck -=1
desdeck -= 1
else:
break
@ -338,7 +340,7 @@ def GetComicInfo(comicid,dom,safechk=None):
# comic['comicchoice'] = comicchoice
return comic
def GetIssuesInfo(comicid,dom,arcid=None):
def GetIssuesInfo(comicid, dom, arcid=None):
subtracks = dom.getElementsByTagName('issue')
if not mylar.CV_ONLY:
cntiss = dom.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
@ -349,7 +351,7 @@ def GetIssuesInfo(comicid,dom,arcid=None):
logger.fdebug("CV's count is wrong, I counted different...going with my count for physicals" + str(len(subtracks)))
cntiss = len(subtracks) # assume count of issues is wrong, go with ACTUAL physical api count
cntiss = int(cntiss)
n = cntiss-1
n = cntiss -1
else:
n = int(len(subtracks))
tempissue = {}
@ -372,7 +374,7 @@ def GetIssuesInfo(comicid,dom,arcid=None):
})
else:
try:
totnames = len( subtrack.getElementsByTagName('name') )
totnames = len(subtrack.getElementsByTagName('name'))
tot = 0
while (tot < totnames):
if subtrack.getElementsByTagName('name')[tot].parentNode.nodeName == 'volume':
@ -382,19 +384,19 @@ def GetIssuesInfo(comicid,dom,arcid=None):
tempissue['Issue_Name'] = subtrack.getElementsByTagName('name')[tot].firstChild.wholeText
except:
tempissue['Issue_Name'] = None
tot+=1
tot += 1
except:
tempissue['ComicName'] = 'None'
try:
totids = len( subtrack.getElementsByTagName('id') )
totids = len(subtrack.getElementsByTagName('id'))
idt = 0
while (idt < totids):
if subtrack.getElementsByTagName('id')[idt].parentNode.nodeName == 'volume':
tempissue['Comic_ID'] = subtrack.getElementsByTagName('id')[idt].firstChild.wholeText
elif subtrack.getElementsByTagName('id')[idt].parentNode.nodeName == 'issue':
tempissue['Issue_ID'] = subtrack.getElementsByTagName('id')[idt].firstChild.wholeText
idt+=1
idt += 1
except:
tempissue['Issue_Name'] = 'None'
@ -435,12 +437,12 @@ def GetIssuesInfo(comicid,dom,arcid=None):
if tempissue['CoverDate'] < firstdate and tempissue['CoverDate'] != '0000-00-00':
firstdate = tempissue['CoverDate']
n-=1
n-= 1
#issue['firstdate'] = firstdate
return issuech, firstdate
def GetFirstIssue(issueid,dom):
def GetFirstIssue(issueid, dom):
#if the Series Year doesn't exist, get the first issue and take the date from that
try:
first_year = dom.getElementsByTagName('cover_date')[0].firstChild.wholeText
@ -462,7 +464,7 @@ def GetSeriesYears(dom):
serieslist = []
for dm in series:
try:
totids = len( dm.getElementsByTagName('id') )
totids = len(dm.getElementsByTagName('id'))
idc = 0
while (idc < totids):
if dm.getElementsByTagName('id')[idc].parentNode.nodeName == 'volume':
@ -475,7 +477,7 @@ def GetSeriesYears(dom):
tempseries['Series'] = 'None'
tempseries['Publisher'] = 'None'
try:
totnames = len( dm.getElementsByTagName('name') )
totnames = len(dm.getElementsByTagName('name'))
namesc = 0
while (namesc < totnames):
if dm.getElementsByTagName('name')[namesc].parentNode.nodeName == 'volume':
@ -492,7 +494,6 @@ def GetSeriesYears(dom):
logger.warn('There was a problem retrieving the start year for a particular series within the story arc.')
tempseries['SeriesYear'] = '0000'
serieslist.append({"ComicID": tempseries['ComicID'],
"ComicName": tempseries['Series'],
"SeriesYear": tempseries['SeriesYear'],
@ -500,6 +501,7 @@ def GetSeriesYears(dom):
return serieslist
def drophtml(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)

View File

@ -163,14 +163,14 @@ class DBConnection:
changesBefore = self.connection.total_changes
genParams = lambda myDict : [x + " = ?" for x in myDict.keys()]
genParams = lambda myDict: [x + " = ?" for x in myDict.keys()]
query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
self.action(query, valueDict.values() + keyDict.values())
if self.connection.total_changes == changesBefore:
query = "INSERT INTO "+tableName+" (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \
query = "INSERT INTO " +tableName +" (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + \
" VALUES (" + ", ".join(["?"] * len(valueDict.keys() + keyDict.keys())) + ")"
self.action(query, valueDict.values() + keyDict.values())

View File

@ -31,7 +31,7 @@ def file2comicmatch(watchmatch):
#print ("match: " + str(watchmatch))
pass
def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=None):
def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sarc=None):
# use AlternateSearch to check for filenames that follow that naming pattern
# ie. Star Trek TNG Doctor Who Assimilation won't get hits as the
@ -86,7 +86,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
basedir = fname['directory']
#if it's a subdir, strip out the main dir and retain the remainder for the filechecker to find it.
#start at position 1 so the initial slash is removed since it's a sub, and os.path.join will choke.
moddir = basedir.replace(dir,'')[1:].rstrip()
moddir = basedir.replace(dir, '')[1:].rstrip()
item = fname['filename']
@ -122,7 +122,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[SARC] subname: ' + subname)
removest = subname.find('-') # the - gets removed above so we test for the first blank space...
logger.fdebug('[SARC] Checking filename for Reading Order sequence - removest: ' + str(removest))
logger.fdebug('removestdig: ' + subname[:removest-1])
logger.fdebug('removestdig: ' + subname[:removest -1])
if subname[:removest].isdigit() and removest == 3:
subname = subname[4:]
logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : ' + subname)
@ -147,9 +147,9 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
vers4vol = volrem
break
elif subit.lower()[:3] == 'vol':
tsubit = re.sub('vol','', subit.lower())
tsubit = re.sub('vol', '', subit.lower())
try:
if any( [ tsubit.isdigit(), len(tsubit) > 5 ] ):
if any([tsubit.isdigit(), len(tsubit) > 5]):
#if in format vol.2013 etc
#because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
logger.fdebug('[FILECHECKER] volume indicator detected as version #:' + str(subit))
@ -197,7 +197,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
bracket_length_st = watchcomic.find('(')
bracket_length_en = watchcomic.find(')', bracket_length_st)
bracket_length = bracket_length_en - bracket_length_st
bracket_word = watchcomic[bracket_length_st:bracket_length_en+1]
bracket_word = watchcomic[bracket_length_st:bracket_length_en +1]
logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracket_word))
logger.fdebug('[FILECHECKER] numberinseries: ' + str(numberinseries))
@ -217,7 +217,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#logger.fdebug('[FILECHECKER] subnm_mod_en: ' + str(subname[bracket_length_en:]))
#logger.fdebug('[FILECHECKER] modified subname is now : ' + str(subnm_mod))
if bracket_word in subname:
nobrackets_word = re.sub('[\(\)]','', bracket_word).strip()
nobrackets_word = re.sub('[\(\)]', '', bracket_word).strip()
subname = re.sub(nobrackets_word, '', subname).strip()
subnm = re.findall('[^()]+', subname)
@ -232,7 +232,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
subthis = re.sub('.cbr', '', subname)
subthis = re.sub('.cbz', '', subthis)
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\.\-]', '', subthis)
subthis = re.sub('\s+',' ', subthis)
subthis = re.sub('\s+', ' ', subthis)
logger.fdebug('[FILECHECKER] sub-cleaned: ' + subthis)
#we need to make sure the file is part of the correct series or else will match falsely
if watchname.lower() not in subthis.lower():
@ -245,7 +245,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if subthis.startswith('('):
# if it startswith a bracket, then it's probably a year - let's check.
for i in subthis.split():
tmpi = re.sub('[\(\)]','',i).strip()
tmpi = re.sub('[\(\)]', '', i).strip()
if tmpi.isdigit():
if (tmpi.startswith('19') or tmpi.startswith('20')) and len(tmpi) == 4:
logger.fdebug('[FILECHECKER] year detected: ' + str(tmpi))
@ -305,7 +305,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', watchcomic) #remove spec chars for watchcomic match.
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis)
logger.fdebug('[FILECHECKER] watch-cleaned: ' + watchname)
subthis = re.sub('\s+',' ', subthis)
subthis = re.sub('\s+', ' ', subthis)
logger.fdebug('[FILECHECKER] sub-cleaned: ' + subthis)
#we need to make sure the file is part of the correct series or else will match falsely
if watchname.lower() not in subthis.lower():
@ -317,7 +317,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if subthis.startswith('('):
# if it startswith a bracket, then it's probably a year and the format is incorrect to continue - let's check.
for i in subthis.split():
tmpi = re.sub('[\(\)]','',i).strip()
tmpi = re.sub('[\(\)]', '', i).strip()
if tmpi.isdigit():
if (tmpi.startswith('19') or tmpi.startswith('20')) and len(tmpi) == 4:
logger.fdebug('[FILECHECKER] Year detected: ' + str(tmpi))
@ -420,16 +420,16 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
for nono in not_these:
if nono in subname:
subcnt = subname.count(nono)
charpos = indices(subname,nono) # will return a list of char positions in subname
charpos = indices(subname, nono) # will return a list of char positions in subname
logger.fdebug("[" + str(nono) + "] charpos: " + str(charpos))
if nono == '-':
i=0
while (i < len(charpos)):
for i,j in enumerate(charpos):
if j+2 > len(subname):
sublimit = subname[j+1:]
for i, j in enumerate(charpos):
if j +2 > len(subname):
sublimit = subname[j +1:]
else:
sublimit = subname[j+1:j+2]
sublimit = subname[j +1:j +2]
if sublimit.isdigit():
logger.fdebug('[FILECHECKER] possible negative issue detected.')
nonocount = nonocount + subcnt - 1
@ -437,9 +437,9 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
elif '-' in watchcomic and j < lenwatch:
lenwatch -=1
logger.fdebug('[FILECHECKER] - appears in series title.')
logger.fdebug('[FILECHECKER] up to - :' + subname[:j+1].replace('-', ' '))
logger.fdebug('[FILECHECKER] after - :' + subname[j+1:])
subname = subname[:j+1].replace('-', '') + subname[j+1:]
logger.fdebug('[FILECHECKER] up to - :' + subname[:j +1].replace('-', ' '))
logger.fdebug('[FILECHECKER] after - :' + subname[j +1:])
subname = subname[:j +1].replace('-', '') + subname[j +1:]
logger.fdebug('[FILECHECKER] new subname is : ' + subname)
should_restart = True
leavehyphen = True
@ -455,16 +455,16 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
fndit = 0
dcspace = 0
while (x < len(charpos)):
for x,j in enumerate(charpos):
for x, j in enumerate(charpos):
fndit = j
logger.fdebug('fndit: ' + str(fndit))
logger.fdebug('isdigit1: ' + subname[fndit-1:fndit])
logger.fdebug('isdigit2: ' + subname[fndit+1:fndit+2])
if subname[fndit-1:fndit].isdigit() and subname[fndit+1:fndit+2].isdigit():
logger.fdebug('isdigit1: ' + subname[fndit -1:fndit])
logger.fdebug('isdigit2: ' + subname[fndit +1:fndit +2])
if subname[fndit -1:fndit].isdigit() and subname[fndit +1:fndit +2].isdigit():
logger.fdebug('[FILECHECKER] decimal issue detected.')
dcspace+=1
else:
subname = subname[:fndit] + ' ' + subname[fndit+1:]
subname = subname[:fndit] + ' ' + subname[fndit +1:]
nonocount+=1
x+=1
nonocount += (subcnt + dcspace)
@ -480,15 +480,15 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
blspc = 0
if nono == '#':
fndit = subname.find(nono)
if subname[fndit+1].isdigit():
subname = re.sub('#','',subname)
if subname[fndit +1].isdigit():
subname = re.sub('#', '', subname)
continue
while x < subcnt:
fndit = subname.find(nono, fndit)
#print ("space before check: " + str(subname[fndit-1:fndit]))
#print ("space after check: " + str(subname[fndit+1:fndit+2]))
if subname[fndit-1:fndit] == ' ' and subname[fndit+1:fndit+2] == ' ':
if subname[fndit -1:fndit] == ' ' and subname[fndit +1:fndit +2] == ' ':
logger.fdebug('[FILECHECKER] blankspace detected before and after ' + str(nono))
blspc+=1
x+=1
@ -533,7 +533,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
AS_Alternate = AlternateSearch
for calt in chkthealt:
AS_tupled = False
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
if '!!' in AS_Alternate:
# if it's !! present, it's the comicid associated with the series as an added annual.
# extract the !!, store it and then remove it so things will continue.
@ -542,7 +542,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
as_end = AS_Alternate.find('##', as_start)
if as_end == -1: as_end = len(AS_Alternate)
logger.fdebug('as_start: ' + str(as_end) + ' --- ' + str(AS_Alternate[as_start:as_end]))
AS_ComicID = AS_Alternate[as_start+2:as_end]
AS_ComicID = AS_Alternate[as_start +2:as_end]
logger.fdebug('[FILECHECKER] Extracted comicid for given annual : ' + str(AS_ComicID))
AS_Alternate = re.sub('!!' + str(AS_ComicID), '', AS_Alternate)
AS_tupled = True
@ -630,30 +630,30 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
findtitlepos = subname.find('-')
if charpos != 0:
logger.fdebug('[FILECHECKER] detected ' + str(len(charpos)) + ' special characters')
for i,j in enumerate(charpos):
for i, j in enumerate(charpos):
logger.fdebug('i,j:' + str(i) + ',' + str(j))
logger.fdebug(str(len(subname)) + ' - subname: ' + subname)
logger.fdebug("digitchk: " + subname[j-1:])
logger.fdebug("digitchk: " + subname[j -1:])
if j >= len(subname):
logger.fdebug('[FILECHECKER] ' + str(j) + ' is >= ' + str(len(subname)) + ' .End reached. ignoring remainder.')
break
elif subname[j:] == '-':
try:
if j <= len(subname) and subname[j+1].isdigit():
if j <= len(subname) and subname[j +1].isdigit():
logger.fdebug('[FILECHECKER] negative issue detected.')
#detneg = "yes"
except IndexError:
logger.fdebug('[FILECHECKER] There was a problem parsing the information from this filename: ' + comicpath)
elif j > findtitlepos:
if subname[j:] == '#':
if subname[j+1].isdigit():
if subname[j +1].isdigit():
logger.fdebug('[FILECHECKER] # detected denoting issue#, ignoring.')
else:
nonocount-=1
elif ('-' in watchcomic or '.' in watchcomic) and j < len(watchcomic):
logger.fdebug('[FILECHECKER] - appears in series title, ignoring.')
else:
digitchk = re.sub('#','', subname[j-1:]).strip()
digitchk = re.sub('#', '', subname[j -1:]).strip()
logger.fdebug('[FILECHECKER] special character appears outside of title - ignoring @ position: ' + str(charpos[i]))
nonocount-=1
@ -683,7 +683,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] after title removed from FILENAME [' + str(item[jtd_len:]) + ']')
logger.fdebug('[FILECHECKER] creating just the digits using SUBNAME, pruning first [' + str(jtd_len) + '] chars from [' + subname + ']')
justthedigits_1 = re.sub('#','', subname[jtd_len:]).strip()
justthedigits_1 = re.sub('#', '', subname[jtd_len:]).strip()
if enable_annual:
logger.fdebug('enable annual is on')
@ -710,7 +710,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('st:' + str(st))
st_d = digitchk[:st]
logger.fdebug('st_d:' + str(st_d))
st_e = digitchk[st+1:]
st_e = digitchk[st +1:]
logger.fdebug('st_e:' + str(st_e))
#x = int(float(st_d))
#logger.fdebug('x:' + str(x))
@ -727,7 +727,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
if len(justthedigits_1) >= len(digitchk) and len(digitchk) > 3:
logger.fdebug('[FILECHECKER] Removing issue title.')
justthedigits_1 = re.sub(digitchk,'', justthedigits_1).strip()
justthedigits_1 = re.sub(digitchk, '', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
hyphensplit = digitchk
@ -747,7 +747,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] digitchk len : ' + str(len(digitchk)))
if len(justthedigits_1) >= len(digitchk) and len(digitchk) > 3:
logger.fdebug('[FILECHECKER] Removing issue title.')
justthedigits_1 = re.sub(digitchk,'', justthedigits_1).strip()
justthedigits_1 = re.sub(digitchk, '', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
hyphensplit = digitchk
@ -825,7 +825,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
# justthedigits = justthedigits.split(' ', 1)[0]
#if the issue has an alphanumeric (issue_exceptions, join it and push it through)
logger.fdebug('[FILECHECKER] JUSTTHEDIGITS [' + justthedigits + ']' )
logger.fdebug('[FILECHECKER] JUSTTHEDIGITS [' + justthedigits + ']')
if digitsvalid == "true":
pass
else:
@ -835,7 +835,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if '.' in justthedigits:
tmpdec = justthedigits.find('.')
b4dec = justthedigits[:tmpdec]
a4dec = justthedigits[tmpdec+1:]
a4dec = justthedigits[tmpdec +1:]
if a4dec.isdigit() and b4dec.isdigit():
logger.fdebug('[FILECHECKER] DECIMAL ISSUE DETECTED')
digitsvalid = "true"
@ -1009,7 +1009,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#if the issue title was present and it contained a numeric, it will pull that as the issue incorrectly
if isstitle_chk == True:
justthedigits = possibleissue_num
subname = re.sub(' '.join(vals[0]['isstitle_removal']),'',subname).strip()
subname = re.sub(' '.join(vals[0]['isstitle_removal']), '', subname).strip()
else:
logger.fdebug('No issue title.')
@ -1033,7 +1033,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
sub_removed = subname.replace('_', ' ')
logger.fdebug('[FILECHECKER] sub_removed: ' + sub_removed)
split_sub = sub_removed.rsplit(' ',1)[0].split(' ') #removes last word (assuming it's the issue#)
split_sub = sub_removed.rsplit(' ', 1)[0].split(' ') #removes last word (assuming it's the issue#)
split_mod = modwatchcomic.replace('_', ' ').split() #batman
i = 0
newc = ''
@ -1045,12 +1045,12 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] split_sub: ' + str(split_sub))
logger.fdebug('[FILECHECKER] split_mod: ' + str(split_mod))
x = len(split_sub)-1
x = len(split_sub) -1
scnt = 0
if x > len(split_mod)-1:
if x > len(split_mod) -1:
logger.fdebug('[FILECHECKER] number of words do not match...aborting.')
else:
while ( x > -1 ):
while (x > -1):
logger.fdebug(str(split_sub[x]) + ' comparing to ' + str(split_mod[x]))
if str(split_sub[x]).lower() == str(split_mod[x]).lower():
scnt+=1
@ -1062,7 +1062,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
totalcnt = int(len(split_mod))
logger.fdebug('[FILECHECKER] split_mod length:' + str(totalcnt))
try:
spercent = (wordcnt/totalcnt) * 100
spercent = (wordcnt /totalcnt) * 100
except ZeroDivisionError:
spercent = 0
logger.fdebug('[FILECHECKER] we got ' + str(spercent) + ' percent.')
@ -1129,7 +1129,7 @@ def validateAndCreateDirectory(dir, create=False, module=None):
try:
permission = int(mylar.CHMOD_DIR, 8)
os.umask(0) # this is probably redudant, but it doesn't hurt to clear the umask here.
os.makedirs(dir.rstrip(), permission )
os.makedirs(dir.rstrip(), permission)
except OSError:
raise SystemExit(module + ' Could not create directory: ' + dir + '. Exiting....')
return True
@ -1140,7 +1140,7 @@ def validateAndCreateDirectory(dir, create=False, module=None):
def indices(string, char):
return [ i for i,c in enumerate(string) if c == char ]
return [i for i, c in enumerate(string) if c == char]
def traverse_directories(dir):
filelist = []
@ -1172,4 +1172,4 @@ def crc(filename):
#return "%X"%(prev & 0xFFFFFFFF)
#speed in lieu of memory (file into memory entirely)
return "%X" % (zlib.crc32(open(filename,"rb").read()) & 0xFFFFFFFF)
return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF)

View File

@ -27,9 +27,9 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
encodeSearch = urllib.quote_plus(searchName)
splitSearch = encodeSearch.split(" ")
joinSearch = "+".join(splitSearch)+"+"+searchIssue
searchIsOne = "0"+searchIssue
searchIsTwo = "00"+searchIssue
joinSearch = "+".join(splitSearch) +"+" +searchIssue
searchIsOne = "0" +searchIssue
searchIsTwo = "00" +searchIssue
if mylar.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr"
elif mylar.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz"
@ -82,7 +82,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
"link": urlParse["href"],
"length": urlParse["length"],
"pubdate": feed.entries[countUp].updated})
countUp=countUp+1
countUp=countUp +1
logger.fdebug('keypair: ' + str(keyPair))
@ -94,10 +94,10 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
regExOne = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, searchYear)
#Sometimes comics aren't actually published the same year comicVine says - trying to adjust for these cases
regExTwo = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1)
regExThree = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1)
regExFour = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1)
regExFive = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1)
regExTwo = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) +1)
regExThree = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) -1)
regExFour = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) +1)
regExFive = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear) -1)
regexList=[regEx, regExOne, regExTwo, regExThree, regExFour, regExFive]

View File

@ -6,7 +6,7 @@ import time
import mylar
from mylar import logger
def putfile(localpath,file): #localpath=full path to .torrent (including filename), file=filename of torrent
def putfile(localpath, file): #localpath=full path to .torrent (including filename), file=filename of torrent
try:
import paramiko
@ -80,7 +80,7 @@ def sendfiles(filelist):
fhost = mylar.TAB_HOST.find(':')
host = mylar.TAB_HOST[:fhost]
port = int(mylar.TAB_HOST[fhost+1:])
port = int(mylar.TAB_HOST[fhost +1:])
logger.fdebug('Destination: ' + host)
logger.fdebug('Using SSH port : ' + str(port))
@ -104,7 +104,7 @@ def sendfiles(filelist):
def sendtohome(sftp, remotepath, filelist, transport):
fhost = mylar.TAB_HOST.find(':')
host = mylar.TAB_HOST[:fhost]
port = int(mylar.TAB_HOST[fhost+1:])
port = int(mylar.TAB_HOST[fhost +1:])
successlist = []
filestotal = len(filelist)
@ -126,13 +126,13 @@ def sendtohome(sftp, remotepath, filelist, transport):
filename = tempfile.replace('\0ff1a', '-')
#now we encode the structure to ascii so we can write directories/filenames without error.
filename = tempfile.encode('ascii','ignore')
filename = tempfile.encode('ascii', 'ignore')
remdir = remotepath
localsend = files['filepath']
logger.info('Sending : ' + localsend)
remotesend = os.path.join(remdir,filename)
remotesend = os.path.join(remdir, filename)
logger.info('To : ' + remotesend)
try:
@ -152,7 +152,7 @@ def sendtohome(sftp, remotepath, filelist, transport):
sftp.put(localsend, remotesend)#, callback=printTotals)
sendcheck = True
except Exception, e:
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__,e))
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__, e))
logger.info('Forcibly closing connection and attempting to reconnect')
sftp.close()
transport.close()
@ -181,7 +181,7 @@ def sendtohome(sftp, remotepath, filelist, transport):
sftp.put(localsend, remotesend)
sendcheck = True
except Exception, e:
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__,e))
logger.info('Attempt #' + str(count) + ': ERROR Sending issue to seedbox *** Caught exception: %s: %s' % (e.__class__, e))
logger.info('Forcibly closing connection and attempting to reconnect')
sftp.close()
transport.close()

View File

@ -24,7 +24,7 @@ import mylar
def multikeysort(items, columns):
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
@ -53,31 +53,31 @@ def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',
0xc6:'Ae', 0xc7:'C',
0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E', 0x86:'e',
0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',
0xd0:'Th', 0xd1:'N',
0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',
0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',
0xdd:'Y', 0xde:'th', 0xdf:'ss',
0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',
0xe6:'ae', 0xe7:'c',
0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e', 0x0259:'e',
0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',
0xf0:'th', 0xf1:'n',
0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',
0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',
0xfd:'y', 0xfe:'th', 0xff:'y',
0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',
0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',
0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',
0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',
0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'",
0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',
0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',
0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',
0xd7:'*', 0xf7:'/'
xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
@ -92,7 +92,7 @@ def latinToAscii(unicrap):
def convert_milliseconds(ms):
seconds = ms/1000
seconds = ms /1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
@ -122,7 +122,7 @@ def now():
def bytes_to_mb(bytes):
mb = int(bytes)/1048576
mb = int(bytes) /1048576
size = '%.1f MB' % mb
return size
@ -136,7 +136,7 @@ def human_size(size_bytes):
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
suffixes_table = [('bytes', 0), ('KB', 0), ('MB', 1), ('GB', 2), ('TB', 2), ('PB', 2)]
num = float(0 if size_bytes is None else size_bytes)
for suffix, precision in suffixes_table:
@ -163,9 +163,9 @@ def human2bytes(s):
num = s[:-1]
assert num.isdigit() and letter in symbols
num = float(num)
prefix = {symbols[0]:1}
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
prefix[s] = 1 << (i +1) *10
return int(num * prefix[letter])
def replace_all(text, dic):
@ -225,7 +225,7 @@ def decimal_issue(iss):
deciss = int(iss) * 1000
else:
iss_b4dec = iss[:iss_find]
iss_decval = iss[iss_find+1:]
iss_decval = iss[iss_find +1:]
if int(iss_decval) == 0:
iss = iss_b4dec
issdec = int(iss_decval)
@ -286,10 +286,10 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
#use issueid to get publisher, series, year, issue number
logger.fdebug('issueid is now : ' + str(issueid))
issuenzb = myDB.selectone("SELECT * from issues WHERE ComicID=? AND IssueID=?", [comicid,issueid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
if issuenzb is None:
logger.fdebug('not an issue, checking against annuals')
issuenzb = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND IssueID=?", [comicid,issueid]).fetchone()
issuenzb = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
if issuenzb is None:
logger.fdebug('Unable to rename - cannot locate issue id within db')
return
@ -308,7 +308,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
'C',
'X',
'O']
valid_spaces = ('.','-')
valid_spaces = ('.', '-')
for issexcept in issue_exceptions:
if issexcept.lower() in issuenum.lower():
logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
@ -344,7 +344,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
if '.' in issuenum:
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
iss_decval = issuenum[iss_find+1:]
iss_decval = issuenum[iss_find +1:]
if iss_decval.endswith('.'):
iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
@ -444,7 +444,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('Pretty Comic Issue is : ' + str(prettycomiss))
issueyear = issuenzb['IssueDate'][:4]
month = issuenzb['IssueDate'][5:7].replace('-','').strip()
month = issuenzb['IssueDate'][5:7].replace('-', '').strip()
month_name = fullmonth(month)
logger.fdebug('Issue Year : ' + str(issueyear))
comicnzb= myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
@ -466,7 +466,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('No version # found for series, removing from filename')
@ -475,7 +475,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
chunk_file_format = mylar.FILE_FORMAT
if annualize is None:
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('not an annual - removing from filename paramaters')
@ -492,7 +492,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('[' + series + '][ANNUALS-ON][ANNUAL IN SERIES][NOT $ANNUAL] prettycomiss: ' + str(prettycomiss))
else:
#because it exists within title, strip it then use formatting tag for placement of wording.
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('[' + series + '][ANNUALS-ON][ANNUAL IN SERIES][$ANNUAL] prettycomiss: ' + str(prettycomiss))
@ -516,7 +516,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('[' + series + '][ANNUALS-OFF][ANNUAL IN SERIES][NOT $ANNUAL] prettycomiss: ' + str(prettycomiss))
else:
#because it exists within title, strip it then use formatting tag for placement of wording.
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('[' + series + '][ANNUALS-OFF][ANNUAL IN SERIES][$ANNUAL] prettycomiss: ' + str(prettycomiss))
@ -533,15 +533,15 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('Annual detected within series title of ' + series + '. Not auto-correcting issue #')
seriesfilename = seriesfilename.encode('ascii', 'ignore').strip()
filebad = [':',',','/','?','!','\''] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
filebad = [':', ',', '/', '?', '!', '\''] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
for dbd in filebad:
if dbd in seriesfilename:
if dbd == '/': repthechar = '-'
else: repthechar = ''
seriesfilename = seriesfilename.replace(dbd,repthechar)
seriesfilename = seriesfilename.replace(dbd, repthechar)
logger.fdebug('Altering series name due to filenaming restrictions: ' + seriesfilename)
publisher = re.sub('!','', publisher)
publisher = re.sub('!', '', publisher)
file_values = {'$Series': seriesfilename,
'$Issue': prettycomiss,
@ -585,10 +585,10 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
logger.fdebug('Source: ' + str(ofilename))
logger.fdebug('Destination: ' + str(dst))
rename_this = { "destination_dir" : dst,
"nfilename" : nfilename,
"issueid" : issueid,
"comicid" : comicid }
rename_this = {"destination_dir": dst,
"nfilename": nfilename,
"issueid": issueid,
"comicid": comicid}
return rename_this
@ -608,7 +608,7 @@ def apiremove(apistring, type):
#need to remove the urlencoded-portions as well in future
return apiremoved
def ComicSort(comicorder=None,sequence=None,imported=None):
def ComicSort(comicorder=None, sequence=None, imported=None):
if sequence:
# if it's on startup, load the sql into a tuple for use to avoid record-locking
i = 0
@ -642,15 +642,15 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
i+=1
if sequence == 'startup':
if i == 0:
comicorder['SortOrder'] = ({'ComicID':'99999','ComicOrder':1})
comicorder['SortOrder'] = ({'ComicID': '99999', 'ComicOrder': 1})
comicorder['LastOrderNo'] = 1
comicorder['LastOrderID'] = 99999
else:
comicorder['SortOrder'] = comicorderlist
comicorder['LastOrderNo'] = i-1
comicorder['LastOrderID'] = comicorder['SortOrder'][i-1]['ComicID']
comicorder['LastOrderNo'] = i -1
comicorder['LastOrderID'] = comicorder['SortOrder'][i -1]['ComicID']
if i < 0: i == 0
logger.info('Sucessfully ordered ' + str(i-1) + ' series in your watchlist.')
logger.info('Sucessfully ordered ' + str(i -1) + ' series in your watchlist.')
return comicorder
elif sequence == 'update':
mylar.COMICSORT['SortOrder'] = comicorderlist
@ -658,7 +658,7 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
if i == 0:
placemnt = 1
else:
placemnt = int(i-1)
placemnt = int(i -1)
mylar.COMICSORT['LastOrderNo'] = placemnt
mylar.COMICSORT['LastOrderID'] = mylar.COMICSORT['SortOrder'][placemnt]['ComicID']
return
@ -682,7 +682,7 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
def fullmonth(monthno):
#simple numerical to worded month conversion....
basmonths = {'1':'January','2':'February','3':'March','4':'April','5':'May','6':'June','7':'July','8':'August','9':'September','10':'October','11':'November','12':'December'}
basmonths = {'1': 'January', '2': 'February', '3': 'March', '4': 'April', '5': 'May', '6': 'June', '7': 'July', '8': 'August', '9': 'September', '10': 'October', '11': 'November', '12': 'December'}
monthconv = None
@ -715,14 +715,14 @@ def updateComicLocation():
# let's remove the non-standard characters here that will break filenaming / searching.
comicname_folder = filesafe(u_comicnm)
publisher = re.sub('!','',dl['ComicPublisher']) # thanks Boom!
publisher = re.sub('!', '', dl['ComicPublisher']) # thanks Boom!
year = dl['ComicYear']
comversion = dl['ComicVersion']
if comversion is None:
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FOLDER_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FOLDER_FORMAT)
chunk_f = re.compile(r'\s+')
folderformat = chunk_f.sub(' ', chunk_f_f)
else:
@ -750,7 +750,7 @@ def updateComicLocation():
if mylar.REPLACE_SPACES:
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
first = first.replace(' ', mylar.REPLACE_CHAR)
comlocation = os.path.join(mylar.NEWCOM_DIR,first).strip()
comlocation = os.path.join(mylar.NEWCOM_DIR, first).strip()
else:
#DESTINATION_DIR = /mnt/mediavg/Comics
@ -818,7 +818,7 @@ def issuedigits(issnum):
return 9999999999
if issnum.isdigit():
int_issnum = int( issnum ) * 1000
int_issnum = int(issnum) * 1000
else:
#count = 0
#for char in issnum:
@ -869,13 +869,13 @@ def issuedigits(issnum):
int_issnum = 9999999999 * 1000 # set 9999999999 for integer value of issue
elif '.' in issnum or ',' in issnum:
#logger.fdebug('decimal detected.')
if ',' in issnum: issnum = re.sub(',','.', issnum)
if ',' in issnum: issnum = re.sub(',', '.', issnum)
issst = str(issnum).find('.')
if issst == 0:
issb4dec = 0
else:
issb4dec = str(issnum)[:issst]
decis = str(issnum)[issst+1:]
decis = str(issnum)[issst +1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
@ -899,7 +899,7 @@ def issuedigits(issnum):
#validity check
if x < 0:
#logger.info("I've encountered a negative issue #: " + str(issnum) + ". Trying to accomodate.")
int_issnum = (int(x)*1000) - 1
int_issnum = (int(x) *1000) - 1
else: raise ValueError
except ValueError, e:
#this will account for any alpha in a issue#, so long as it doesn't have decimals.
@ -911,9 +911,9 @@ def issuedigits(issnum):
if issnum[x].isalpha():
#take first occurance of alpha in string and carry it through
tstord = issnum[x:].rstrip()
tstord = re.sub('[\-\,\.\+]','', tstord).rstrip()
tstord = re.sub('[\-\,\.\+]', '', tstord).rstrip()
issno = issnum[:x].rstrip()
issno = re.sub('[\-\,\.\+]','', issno).rstrip()
issno = re.sub('[\-\,\.\+]', '', issno).rstrip()
try:
isschk = float(issno)
except ValueError, e:
@ -1043,9 +1043,9 @@ def latestdate_fix():
#logger.info('dash found at position ' + str(finddash))
if finddash != 4: #format of mm-yyyy
lat_month = latestdate[:finddash]
lat_year = latestdate[finddash+1:]
lat_year = latestdate[finddash +1:]
else: #format of yyyy-mm
lat_month = latestdate[finddash+1:]
lat_month = latestdate[finddash +1:]
lat_year = latestdate[:finddash]
latestdate = (lat_year) + '-' + str(lat_month) + '-01'
@ -1097,7 +1097,7 @@ def LoadAlternateSearchNames(seriesname_alt, comicid):
AS_Alternate = seriesname_alt
AS_Alt.append(seriesname_alt)
for calt in chkthealt:
AS_Alter = re.sub('##','',calt)
AS_Alter = re.sub('##', '', calt)
u_altsearchcomic = AS_Alter.encode('ascii', 'ignore').strip()
AS_formatrem_seriesname = re.sub('\s+', ' ', u_altsearchcomic)
if AS_formatrem_seriesname[:1] == ' ': AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
@ -1164,7 +1164,7 @@ def havetotals(refreshit=None):
return False # if it's 5/5 or 4/5, send back to updater and restore previous status'
try:
percent = (haveissues*100.0)/totalissues
percent = (haveissues *100.0) /totalissues
if percent > 100:
percent = 101
except (ZeroDivisionError, TypeError):
@ -1178,9 +1178,9 @@ def havetotals(refreshit=None):
recentstatus = 'Unknown'
elif comic['ForceContinuing'] == 1:
recentstatus = 'Continuing'
elif 'present' in comic['ComicPublished'].lower() or ( today()[:4] in comic['LatestDate']):
elif 'present' in comic['ComicPublished'].lower() or (today()[:4] in comic['LatestDate']):
latestdate = comic['LatestDate']
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if comic['NewPublish']:
@ -1201,7 +1201,7 @@ def havetotals(refreshit=None):
"ComicImage": comic['ComicImage'],
"LatestIssue": comic['LatestIssue'],
"LatestDate": comic['LatestDate'],
"ComicPublished": re.sub('(N)','',comic['ComicPublished']).strip(),
"ComicPublished": re.sub('(N)', '', comic['ComicPublished']).strip(),
"Status": comic['Status'],
"recentstatus": recentstatus,
"percent": percent,
@ -1217,19 +1217,19 @@ def cvapi_check(web=None):
# logger.fdebug('[ComicVine API] ComicVine API Check Running...')
if mylar.CVAPI_TIME is None or mylar.CVAPI_TIME == '':
c_date = now()
c_obj_date = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S")
c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
mylar.CVAPI_TIME = c_obj_date
else:
if isinstance(mylar.CVAPI_TIME, unicode):
c_obj_date = datetime.datetime.strptime(mylar.CVAPI_TIME,"%Y-%m-%d %H:%M:%S")
c_obj_date = datetime.datetime.strptime(mylar.CVAPI_TIME, "%Y-%m-%d %H:%M:%S")
else:
c_obj_date = mylar.CVAPI_TIME
#if web is None: logger.fdebug('[ComicVine API] API Start Monitoring Time (~15mins): ' + str(mylar.CVAPI_TIME))
now_date = now()
n_date = datetime.datetime.strptime(now_date,"%Y-%m-%d %H:%M:%S")
n_date = datetime.datetime.strptime(now_date, "%Y-%m-%d %H:%M:%S")
#if web is None: logger.fdebug('[ComicVine API] Time now: ' + str(n_date))
absdiff = abs(n_date - c_obj_date)
mins = round(((absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0),2)
mins = round(((absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0), 2)
if mins < 15:
#if web is None: logger.info('[ComicVine API] Comicvine API count now at : ' + str(mylar.CVAPI_COUNT) + ' / ' + str(mylar.CVAPI_MAX) + ' in ' + str(mins) + ' minutes.')
if mylar.CVAPI_COUNT > mylar.CVAPI_MAX:
@ -1238,7 +1238,7 @@ def cvapi_check(web=None):
elif mins > 15:
mylar.CVAPI_COUNT = 0
c_date = now()
mylar.CVAPI_TIME = datetime.datetime.strptime(c_date,"%Y-%m-%d %H:%M:%S")
mylar.CVAPI_TIME = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
#if web is None: logger.info('[ComicVine API] 15 minute API interval resetting [' + str(mylar.CVAPI_TIME) + ']. Resetting API count to : ' + str(mylar.CVAPI_COUNT))
if web is None:
@ -1252,7 +1252,7 @@ def filesafe(comic):
u_comic = unicodedata.normalize('NFKD', comic).encode('ASCII', 'ignore').strip()
comicname_filesafe = re.sub('[\:\'\,\?\!\\\]', '', u_comic)
comicname_filesafe = re.sub('[\/]','-', comicname_filesafe)
comicname_filesafe = re.sub('[\/]', '-', comicname_filesafe)
return comicname_filesafe
@ -1266,7 +1266,7 @@ def IssueDetails(filelocation, IssueID=None):
if filelocation.endswith('.cbz'):
logger.fdebug('CBZ file detected. Checking for .xml within file')
shutil.copy( filelocation, dstlocation )
shutil.copy(filelocation, dstlocation)
else:
logger.fdebug('filename is not a cbz : ' + filelocation)
return
@ -1287,16 +1287,16 @@ def IssueDetails(filelocation, IssueID=None):
#looks for the first page and assumes it's the cover. (Alternate covers handled later on)
elif '000.jpg' in infile or '000.png' in infile or '00.jpg' in infile or '00.png' in infile:
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CACHE_DIR,'temp.jpg'), "wb")
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
elif any( [ '00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile ]):
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]):
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
altlist = ('00a', '00b', '00c', '00d', '00e')
for alt in altlist:
if alt in infile:
local_file = open(os.path.join(mylar.CACHE_DIR,'temp.jpg'), "wb")
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
@ -1304,12 +1304,12 @@ def IssueDetails(filelocation, IssueID=None):
elif ('001.jpg' in infile or '001.png' in infile) and cover == "notfound":
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CACHE_DIR,'temp.jpg'), "wb")
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
ComicImage = os.path.join('cache', 'temp.jpg?'+str(modtime))
ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime))
IssueImage = replacetheslash(ComicImage)
@ -1319,7 +1319,7 @@ def IssueDetails(filelocation, IssueID=None):
unzip_cmd = "/usr/bin/unzip"
try:
#unzip -z will extract the zip comment field.
data = subprocess.check_output( [ unzip_cmd, '-z', dstlocation ] )
data = subprocess.check_output([unzip_cmd, '-z', dstlocation])
# return data is encoded in bytes, not unicode. Need to figure out how to run check_output returning utf-8
issuetag = 'comment'
except CalledProcessError as e:
@ -1558,7 +1558,7 @@ def incr_snatched(ComicID):
import db, logger
myDB = db.DBConnection()
incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone()
logger.fdebug('Incrementing HAVE count total to : ' + str( incr_count['Have'] + 1 ))
logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1))
newCtrl = {"ComicID": ComicID}
newVal = {"Have": incr_count['Have'] + 1}
myDB.upsert("comics", newVal, newCtrl)
@ -1597,7 +1597,7 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
logger.info('[DUPECHECK] Series has invalid issue totals [' + str(havechk['Have']) + '/' + str(havechk['Total']) + '] Attempting to Refresh & continue post-processing this issue.')
cid.append(ComicID)
logger.fdebug('[DUPECHECK] ComicID: ' + str(ComicID))
mylar.updater.dbUpdate(ComicIDList=cid,calledfrom='dupechk')
mylar.updater.dbUpdate(ComicIDList=cid, calledfrom='dupechk')
return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
else:
rtnval = "dupe"
@ -1705,25 +1705,25 @@ def parse_32pfeed(rssfeedline):
KEYS_32P = {}
if mylar.ENABLE_32P and len(rssfeedline) > 1:
userid_st = rssfeedline.find('&user')
userid_en = rssfeedline.find('&',userid_st+1)
userid_en = rssfeedline.find('&', userid_st +1)
if userid_en == -1:
USERID_32P = rssfeedline[userid_st+6:]
USERID_32P = rssfeedline[userid_st +6:]
else:
USERID_32P = rssfeedline[userid_st+6:userid_en]
USERID_32P = rssfeedline[userid_st +6:userid_en]
auth_st = rssfeedline.find('&auth')
auth_en = rssfeedline.find('&',auth_st+1)
auth_en = rssfeedline.find('&', auth_st +1)
if auth_en == -1:
AUTH_32P = rssfeedline[auth_st+6:]
AUTH_32P = rssfeedline[auth_st +6:]
else:
AUTH_32P = rssfeedline[auth_st+6:auth_en]
AUTH_32P = rssfeedline[auth_st +6:auth_en]
authkey_st = rssfeedline.find('&authkey')
authkey_en = rssfeedline.find('&',authkey_st+1)
authkey_en = rssfeedline.find('&', authkey_st +1)
if authkey_en == -1:
AUTHKEY_32P = rssfeedline[authkey_st+9:]
AUTHKEY_32P = rssfeedline[authkey_st +9:]
else:
AUTHKEY_32P = rssfeedline[authkey_st+9:authkey_en]
AUTHKEY_32P = rssfeedline[authkey_st +9:authkey_en]
KEYS_32P = {"user": USERID_32P,
"auth": AUTH_32P,

View File

@ -47,7 +47,7 @@ def is_exists(comicid):
return False
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,calledfrom=None,annload=None,chkwant=None,issuechk=None,issuetype=None,latestissueinfo=None):
def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=None, calledfrom=None, annload=None, chkwant=None, issuechk=None, issuetype=None, latestissueinfo=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
# from mylar import cache
@ -92,7 +92,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
helpers.ComicSort(comicorder=mylar.COMICSORT, imported=comicid)
# we need to lookup the info for the requested ComicID in full now
comic = cv.getComic(comicid,'comic')
comic = cv.getComic(comicid, 'comic')
logger.fdebug(comic)
if not comic:
@ -124,7 +124,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if gcdinfo == "No Match":
updater.no_searchresults(comicid)
nomatch = "true"
logger.info('There was an error when trying to add ' + comic['ComicName'] + ' (' + comic['ComicYear'] + ')' )
logger.info('There was an error when trying to add ' + comic['ComicName'] + ' (' + comic['ComicYear'] + ')')
return nomatch
else:
mismatch_com = "yes"
@ -150,18 +150,18 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if mylar.CV_ONLY:
#we'll defer this until later when we grab all the issues and then figure it out
logger.info('Uh-oh. I cannot find a Series Year for this series. I am going to try analyzing deeper.')
SeriesYear = cv.getComic(comicid,'firstissue',comic['FirstIssueID'])
SeriesYear = cv.getComic(comicid, 'firstissue', comic['FirstIssueID'])
if SeriesYear == '0000':
logger.info('Ok - I could not find a Series Year at all. Loading in the issue data now and will figure out the Series Year.')
CV_NoYearGiven = "yes"
issued = cv.getComic(comicid,'issue')
issued = cv.getComic(comicid, 'issue')
SeriesYear = issued['firstdate'][:4]
else:
SeriesYear = gcdinfo['SeriesYear']
else:
SeriesYear = comic['ComicYear']
logger.info('Sucessfully retrieved details for ' + comic['ComicName'] )
logger.info('Sucessfully retrieved details for ' + comic['ComicName'])
#since the weekly issue check could return either annuals or issues, let's initialize it here so it carries through properly.
weeklyissue_check = []
@ -237,7 +237,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid,'issue')
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
@ -267,7 +267,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"IssueName": issname,
"ComicID": comicid,
"ComicName": comic['ComicName'],
"ReleaseComicID": re.sub('4050-','',firstval['Comic_ID']).strip(),
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name'],
"Status": "Skipped"}
myDB.upsert("annuals", newVals, newCtrl)
@ -319,7 +319,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if comlocation is None:
comicdir = comicname_filesafe
series = comicdir
publisher = re.sub('!','',comic['ComicPublisher']) # thanks Boom!
publisher = re.sub('!', '', comic['ComicPublisher']) # thanks Boom!
publisher = helpers.filesafe(publisher)
year = SeriesYear
comversion = comic['ComicVersion']
@ -327,7 +327,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
mylar.FILE_FORMAT = chunk_f.sub(' ', chunk_f_f)
@ -386,7 +386,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
comicIssues = str(int(comic['ComicIssues']) + 1)
#let's download the image...
if os.path.exists(mylar.CACHE_DIR):pass
if os.path.exists(mylar.CACHE_DIR): pass
else:
#let's make the dir.
try:
@ -404,7 +404,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#urllib.urlretrieve(str(thisci), str(coverfile))
try:
cimage = re.sub('[\+]','%20', comic['ComicImage'])
cimage = re.sub('[\+]', '%20', comic['ComicImage'])
request = urllib2.Request(cimage)#, headers={'Content-Type': 'application/x-www-form-urlencoded'})
#request.add_header('User-Agent', str(mylar.USER_AGENT))
@ -426,7 +426,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImage']))
logger.info('Attempting to use alternate image size to get cover.')
try:
cimage = re.sub('[\+]','%20', comic['ComicImageALT'])
cimage = re.sub('[\+]', '%20', comic['ComicImageALT'])
request = urllib2.Request(cimage)
response = urllib2.urlopen(request)
com_image = response.read()
@ -437,7 +437,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
except Exception, e:
logger.warn('[%s] Error fetching data using : %s' % (e, comic['ComicImageALT']))
PRComicImage = os.path.join('cache',str(comicid) + ".jpg")
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage)
#this is for Firefox when outside the LAN...it works, but I don't know how to implement it
@ -447,8 +447,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.COMIC_COVER_LOCAL:
try:
comiclocal = os.path.join(comlocation,'cover.jpg')
shutil.copy(coverfile,comiclocal)
comiclocal = os.path.join(comlocation, 'cover.jpg')
shutil.copy(coverfile, comiclocal)
except IOError as e:
logger.error('Unable to save cover (' + str(coverfile) + ') into series directory (' + str(comiclocal) + ') at this time.')
@ -496,11 +496,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if CV_NoYearGiven == 'no':
#if set to 'no' then we haven't pulled down the issues, otherwise we did it already
issued = cv.getComic(comicid,'issue')
issued = cv.getComic(comicid, 'issue')
if issued is None:
logger.warn('Unable to retrieve data from ComicVine. Get your own API key already!')
return
logger.info('Sucessfully retrieved issue details for ' + comic['ComicName'] )
logger.info('Sucessfully retrieved issue details for ' + comic['ComicName'])
#move to own function so can call independently to only refresh issue data
#issued is from cv.getComic, comic['ComicName'] & comicid would both be already known to do independent call.
@ -510,8 +510,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
return
if mylar.CVINFO or (mylar.CV_ONLY and mylar.CVINFO):
if not os.path.exists(os.path.join(comlocation,"cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation,"cvinfo"),"w") as text_file:
if not os.path.exists(os.path.join(comlocation, "cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation, "cvinfo"), "w") as text_file:
text_file.write(str(comic['ComicURL']))
logger.info('Updating complete for: ' + comic['ComicName'])
@ -542,16 +542,16 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
else:
if mylar.IMP_MOVE:
logger.info('Mass import - Move files')
moveit.movefiles(comicid,comlocation,ogcname)
moveit.movefiles(comicid, comlocation, ogcname)
else:
logger.info('Mass import - Moving not Enabled. Setting Archived Status for import.')
moveit.archivefiles(comicid,ogcname)
moveit.archivefiles(comicid, ogcname)
#check for existing files...
statbefore = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
statbefore = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid, str(latestiss)]).fetchone()
logger.fdebug('issue: ' + str(latestiss) + ' status before chk :' + str(statbefore['Status']))
updater.forceRescan(comicid)
statafter = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
statafter = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid, str(latestiss)]).fetchone()
logger.fdebug('issue: ' + str(latestiss) + ' status after chk :' + str(statafter['Status']))
logger.fdebug('pullupd: ' + str(pullupd))
@ -562,7 +562,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
# do this for only Present comics....
if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present' and series_status == 'Active': #and 'Present' in gcdinfo['resultPublished']:
logger.fdebug('latestissue: #' + str(latestiss))
chkstats = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
chkstats = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid, str(latestiss)]).fetchone()
if chkstats is None:
if mylar.ANNUALS_ON:
chkstats = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?", [comicid, latestiss]).fetchone()
@ -575,7 +575,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
cn_pull = comicname_filesafe
else:
cn_pull = comic['ComicName']
updater.newpullcheck(ComicName=cn_pull,ComicID=comicid,issue=latestiss)
updater.newpullcheck(ComicName=cn_pull, ComicID=comicid, issue=latestiss)
#here we grab issues that have been marked as wanted above...
results = []
@ -647,7 +647,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.info('Sucessfully added ' + comic['ComicName'] + ' (' + str(SeriesYear) + ') by directly using the ComicVine ID')
return
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None):
# this is for importing via GCD only and not using CV.
# used when volume spanning is discovered for a Comic (and can't be added using CV).
# Issue Counts are wrong (and can't be added).
@ -707,11 +707,11 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
resultURL = "/series/" + str(comicid) + "/"
gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None)
if gcdinfo == "No Match":
logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" )
logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")")
updater.no_searchresults(gcomicid)
nomatch = "true"
return nomatch
logger.info(u"Sucessfully retrieved details for " + ComicName )
logger.info(u"Sucessfully retrieved details for " + ComicName)
# print ("Series Published" + parseit.resultPublished)
#--End
@ -726,11 +726,11 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
comicdir = u_comicname
if ':' in comicdir:
comicdir = comicdir.replace(':','')
comicdir = comicdir.replace(':', '')
if '/' in comicdir:
comicdir = comicdir.replace('/','-')
comicdir = comicdir.replace('/', '-')
if ',' in comicdir:
comicdir = comicdir.replace(',','')
comicdir = comicdir.replace(',', '')
else: comicdir = u_comicname
series = comicdir
@ -774,7 +774,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
comicIssues = gcdinfo['totalissues']
#let's download the image...
if os.path.exists(mylar.CACHE_DIR):pass
if os.path.exists(mylar.CACHE_DIR): pass
else:
#let's make the dir.
try:
@ -790,7 +790,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
urllib.urlretrieve(str(ComicImage), str(coverfile))
try:
with open(str(coverfile)) as f:
ComicImage = os.path.join('cache',str(gcomicid) + ".jpg")
ComicImage = os.path.join('cache', str(gcomicid) + ".jpg")
#this is for Firefox when outside the LAN...it works, but I don't know how to implement it
#without breaking the normal flow for inside the LAN (above)
@ -800,7 +800,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.COMIC_COVER_LOCAL:
comiclocal = os.path.join(comlocation + "/cover.jpg")
shutil.copy(ComicImage,comiclocal)
shutil.copy(ComicImage, comiclocal)
except IOError as e:
logger.error(u"Unable to save cover locally at this time.")
@ -832,7 +832,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if pullupd is None:
helpers.ComicSort(sequence='update')
logger.info(u"Sucessfully retrieved issue details for " + ComicName )
logger.info(u"Sucessfully retrieved issue details for " + ComicName)
n = 0
iscnt = int(comicIssues)
issnum = []
@ -856,7 +856,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if gcdinfo['gcdvariation'] == 'gcd':
#print ("gcd-variation accounted for.")
issdate = '0000-00-00'
int_issnum = int ( issis / 1000 )
int_issnum = int (issis / 1000)
break
if 'nn' in str(gcdval['GCDIssue']):
#no number detected - GN, TP or the like
@ -867,7 +867,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
issst = str(gcdval['GCDIssue']).find('.')
issb4dec = str(gcdval['GCDIssue'])[:issst]
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(gcdval['GCDIssue'])[issst+1:]
decis = str(gcdval['GCDIssue'])[issst +1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
@ -881,7 +881,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
gcdis = int(str(gcdval['GCDIssue'])) * 1000
gcd_issue = str(gcdval['GCDIssue'])
#get the latest issue / date using the date.
int_issnum = int( gcdis / 1000 )
int_issnum = int(gcdis / 1000)
issdate = str(gcdval['GCDDate'])
issid = "G" + str(gcdval['IssueID'])
if gcdval['GCDDate'] > latestdate:
@ -946,7 +946,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
if mylar.CVINFO:
if not os.path.exists(comlocation + "/cvinfo"):
with open(comlocation + "/cvinfo","w") as text_file:
with open(comlocation + "/cvinfo", "w") as text_file:
text_file.write("http://www.comicvine.com/volume/49-" + str(comicid))
logger.info(u"Updating complete for: " + ComicName)
@ -957,10 +957,10 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
else:
if mylar.IMP_MOVE:
logger.info("Mass import - Move files")
moveit.movefiles(gcomicid,comlocation,ogcname)
moveit.movefiles(gcomicid, comlocation, ogcname)
else:
logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.")
moveit.archivefiles(gcomicid,ogcname)
moveit.archivefiles(gcomicid, ogcname)
#check for existing files...
updater.forceRescan(gcomicid)
@ -989,7 +989,7 @@ def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
logger.info(u"Finished grabbing what I could.")
def issue_collection(issuedata,nostatus):
def issue_collection(issuedata, nostatus):
myDB = db.DBConnection()
nowdate = datetime.datetime.now()
@ -1024,7 +1024,7 @@ def issue_collection(issuedata,nostatus):
# Only change the status & add DateAdded if the issue is already in the database
if iss_exists is None:
newValueDict['DateAdded'] = helpers.today()
datechk = re.sub('-','', issue['ReleaseDate']).strip() # converts date to 20140718 format
datechk = re.sub('-', '', issue['ReleaseDate']).strip() # converts date to 20140718 format
#logger.fdebug('issue #' + str(issue['Issue_Number']) + 'does not exist in db.')
if mylar.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
@ -1066,7 +1066,7 @@ def manualAnnual(manual_comicid, comicname, comicyear, comicid):
n = 0
noissues = sr['ComicIssues']
logger.fdebug('there are ' + str(noissues) + ' annuals within this series.')
issued = cv.getComic(re.sub('4050-','',manual_comicid).strip(),'issue')
issued = cv.getComic(re.sub('4050-', '', manual_comicid).strip(), 'issue')
while (n < int(noissues)):
try:
firstval = issued['issuechoice'][n]
@ -1095,9 +1095,9 @@ def manualAnnual(manual_comicid, comicname, comicyear, comicid):
"ReleaseDate": stdate,
"IssueName": issname,
"ComicID": comicid, #this is the series ID
"ReleaseComicID": re.sub('4050-','',manual_comicid).strip(), #this is the series ID for the annual(s)
"ReleaseComicID": re.sub('4050-', '', manual_comicid).strip(), #this is the series ID for the annual(s)
"ComicName": comicname, #series ComicName
"ReleaseComicName" :sr['ComicName'], #series ComicName for the manual_comicid
"ReleaseComicName": sr['ComicName'], #series ComicName for the manual_comicid
"Status": "Skipped"}
#need to add in the values for the new series to be added.
#"M_ComicName": sr['ComicName'],
@ -1121,7 +1121,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#to facilitate independent calls to updateissuedata ONLY, account for data not available and get it.
#chkType comes from the weeklypulllist - either 'annual' or not to distinguish annuals vs. issues
if comicIssues is None:
comic = cv.getComic(comicid,'comic')
comic = cv.getComic(comicid, 'comic')
if comic is None:
logger.warn('Error retrieving from ComicVine - either the site is down or you are not using your own CV API key')
return
@ -1132,7 +1132,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if comicname is None:
comicname = comic['ComicName']
if issued is None:
issued = cv.getComic(comicid,'issue')
issued = cv.getComic(comicid, 'issue')
if issued is None:
logger.warn('Error retrieving from ComicVine - either the site is down or you are not using your own CV API key')
return
@ -1177,7 +1177,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
issdate = str(firstval['Issue_Date'])
storedate = str(firstval['Store_Date'])
if issnum.isdigit():
int_issnum = int( issnum ) * 1000
int_issnum = int(issnum) * 1000
else:
if 'a.i.' in issnum.lower() or 'ai' in issnum.lower():
issnum = re.sub('\.', '', issnum)
@ -1199,7 +1199,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#issnum = utf-8 will encode the infinity symbol without any help
int_issnum = 9999999999 * 1000 # set 9999999999 for integer value of issue
elif '.' in issnum or ',' in issnum:
if ',' in issnum: issnum = re.sub(',','.', issnum)
if ',' in issnum: issnum = re.sub(',', '.', issnum)
issst = str(issnum).find('.')
#logger.fdebug("issst:" + str(issst))
if issst == 0:
@ -1208,7 +1208,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
issb4dec = str(issnum)[:issst]
#logger.fdebug("issb4dec:" + str(issb4dec))
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(issnum)[issst+1:]
decis = str(issnum)[issst +1:]
#logger.fdebug("decis:" + str(decis))
if len(decis) == 1:
decisval = int(decis) * 10
@ -1237,7 +1237,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if x < 0:
logger.info('I have encountered a negative issue #: ' + str(issnum) + '. Trying to accomodate.')
logger.fdebug('value of x is : ' + str(x))
int_issnum = (int(x)*1000) - 1
int_issnum = (int(x) *1000) - 1
else: raise ValueError
except ValueError, e:
x = 0
@ -1248,9 +1248,9 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if issnum[x].isalpha():
#take first occurance of alpha in string and carry it through
tstord = issnum[x:].rstrip()
tstord = re.sub('[\-\,\.\+]','', tstord).rstrip()
tstord = re.sub('[\-\,\.\+]', '', tstord).rstrip()
issno = issnum[:x].rstrip()
issno = re.sub('[\-\,\.\+]','', issno).rstrip()
issno = re.sub('[\-\,\.\+]', '', issno).rstrip()
try:
isschk = float(issno)
except ValueError, e:
@ -1325,10 +1325,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
else:
if len(issuedata) >= 1 and not calledfrom == 'dbupdate':
logger.fdebug('initiating issue updating - info & status')
issue_collection(issuedata,nostatus='False')
issue_collection(issuedata, nostatus='False')
else:
logger.fdebug('initiating issue updating - just the info')
issue_collection(issuedata,nostatus='True')
issue_collection(issuedata, nostatus='True')
styear = str(SeriesYear)
@ -1337,7 +1337,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
else:
stmonth = helpers.fullmonth(firstdate[5:7])
ltyear = re.sub('/s','', latestdate[:4])
ltyear = re.sub('/s', '', latestdate[:4])
if latestdate[5:7] == '00':
ltmonth = "?"
else:
@ -1346,12 +1346,12 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#try to determine if it's an 'actively' published comic from above dates
#threshold is if it's within a month (<55 days) let's assume it's recent.
try:
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
except:
logger.error('Cannot determine Latest Date for given series. This is most likely due to an issue having a date of : 0000-00-00')
latestdate = str(SeriesYear) + '-01-01'
logger.error('Setting Latest Date to be ' + str(latestdate) + '. You should inform CV that the issue data is stale.')
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
@ -1470,7 +1470,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists & was refreshed.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid,'issue')
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
@ -1500,12 +1500,12 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
"IssueName": issname,
"ComicID": comicid,
"ComicName": ComicName,
"ReleaseComicID": re.sub('4050-','',firstval['Comic_ID']).strip(),
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name']}
iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone()
if iss_exists is None:
datechk = re.sub('-','', issdate).strip() # converts date to 20140718 format
datechk = re.sub('-', '', issdate).strip() # converts date to 20140718 format
if mylar.AUTOWANT_ALL:
newVals['Status'] = "Wanted"
elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING:

View File

@ -48,8 +48,8 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
comic_list = []
comiccnt = 0
extensions = ('cbr','cbz')
for r,d,f in os.walk(dir):
extensions = ('cbr', 'cbz')
for r, d, f in os.walk(dir):
#for directory in d[:]:
# if directory.startswith("."):
# d.remove(directory)
@ -66,10 +66,10 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace')
comiccnt+=1
comic_dict = { 'ComicFilename': comic,
comic_dict = {'ComicFilename': comic,
'ComicLocation': comicpath,
'ComicSize': comicsize,
'Unicode_ComicLocation': unicode_comic_path }
'Unicode_ComicLocation': unicode_comic_path}
comic_list.append(comic_dict)
logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now")
@ -132,7 +132,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
watchfound = 0
datelist = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
datelist = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
# datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
# #search for number as text, and change to numeric
# for numbs in basnumbs:
@ -258,7 +258,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
#make sure it's a number on either side of decimal and assume decimal issue.
decst = cs[i].find('.')
dec_st = cs[i][:decst]
dec_en = cs[i][decst+1:]
dec_en = cs[i][decst +1:]
logger.fdebug("st: " + str(dec_st))
logger.fdebug("en: " + str(dec_en))
if dec_st.isdigit() and dec_en.isdigit():
@ -294,7 +294,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
while (findcn < len(cnsplit)):
cname = cname + cs[findcn] + " "
findcn+=1
cname = cname[:len(cname)-1] # drop the end space...
cname = cname[:len(cname) -1] # drop the end space...
print ("assuming name is : " + cname)
com_NAME = cname
print ("com_NAME : " + com_NAME)
@ -323,7 +323,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
logger.fdebug('charcount is : ' + str(charcount))
if charcount > 0:
logger.fdebug('entering loop')
for i,m in enumerate(re.finditer('\#', d_filename)):
for i, m in enumerate(re.finditer('\#', d_filename)):
if m.end() <= displength:
logger.fdebug(comfilename[m.start():m.end()])
# find occurance in c_filename, then replace into d_filname so special characters are brought across
@ -341,12 +341,12 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
#changed this from '' to ' '
comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
comic_iss = comic_iss_b4.replace('.',' ')
comic_iss = comic_iss_b4.replace('.', ' ')
comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
logger.fdebug("adjusted comic and issue: " + str(comic_iss))
#remove 'the' from here for proper comparisons.
if ' the ' in comic_iss.lower():
comic_iss = re.sub('\\bthe\\b','', comic_iss).strip()
comic_iss = re.sub('\\bthe\\b', '', comic_iss).strip()
splitit = comic_iss.split(None)
logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
#here we cycle through the Watchlist looking for a match.
@ -377,7 +377,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
scount = 0
logger.fdebug("search-length: " + str(splitst))
logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
while ( n <= (splitst)-1 ):
while (n <= (splitst) -1):
logger.fdebug("splitit: " + str(splitit[n]))
if n < (splitst) and n < len(watchcomic_split):
logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
@ -410,7 +410,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
logger.fdebug("scount:" + str(wordcnt))
totalcnt = int(splitst)
logger.fdebug("splitit-len:" + str(totalcnt))
spercent = (wordcnt/totalcnt) * 100
spercent = (wordcnt /totalcnt) * 100
logger.fdebug("we got " + str(spercent) + " percent.")
if int(spercent) >= 80:
logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!")
@ -451,14 +451,14 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
logger.fdebug("impid: " + str(impid))
import_by_comicids.append({
"impid" : impid,
"watchmatch" : watchmatch,
"displayname" : dispname,
"comicname" : dispname, #com_NAME,
"comicyear" : result_comyear,
"volume" : vol_label,
"comfilename" : comfilename,
"comlocation" : comlocation.decode(mylar.SYS_ENCODING)
"impid": impid,
"watchmatch": watchmatch,
"displayname": dispname,
"comicname": dispname, #com_NAME,
"comicyear": result_comyear,
"volume": vol_label,
"comfilename": comfilename,
"comlocation": comlocation.decode(mylar.SYS_ENCODING)
})
logger.fdebug('import_by_ids: ' + str(import_by_comicids))
@ -494,12 +494,12 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
nfilename = renameit['nfilename']
dst_path = os.path.join(watch_comlocation,nfilename)
dst_path = os.path.join(watch_comlocation, nfilename)
if str(watch_comicid) not in comicids:
comicids.append(watch_comicid)
else:
print("Renaming files not enabled, keeping original filename(s)")
dst_path = os.path.join(watch_comlocation,orig_filename)
dst_path = os.path.join(watch_comlocation, orig_filename)
#os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
#src = os.path.join(, str(nfilename + ext))
@ -523,7 +523,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
else:
print("...Existing status: " + str(issuechk['Status']))
control = {"IssueID": issuechk['IssueID']}
values = { "Status": "Archived"}
values = {"Status": "Archived"}
print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ")
myDB.upsert("issues", values, control)
if str(watch_comicid) not in comicids:
@ -533,7 +533,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
else:
c_upd = len(comicids)
c = 0
while (c < c_upd ):
while (c < c_upd):
print ("Rescanning.. " + str(c))
updater.forceRescan(c)
if not len(import_by_comicids):
@ -548,7 +548,7 @@ def scanLibrary(scan=None, queue=None):
valreturn = []
if scan:
try:
soma,noids = libraryScan()
soma, noids = libraryScan()
except Exception, e:
logger.error('Unable to complete the scan: %s' % e)
return
@ -593,8 +593,8 @@ def scanLibrary(scan=None, queue=None):
# unzip -z filename.cbz will show the comment field of the zip which contains the metadata.
#self.importResults()
valreturn.append({"somevalue" : 'self.ie',
"result" : 'success'})
valreturn.append({"somevalue": 'self.ie',
"result": 'success'})
return queue.put(valreturn)
#raise cherrypy.HTTPRedirect("importResults")

View File

@ -43,7 +43,7 @@ if platform.python_version() == '2.7.6':
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
def pullsearch(comicapi,comicquery,offset,explicit,type):
def pullsearch(comicapi, comicquery, offset, explicit, type):
u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
u_comicquery = u_comicquery.replace(" ", "%20")
@ -87,7 +87,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
chars = set('!?*')
if any((c in chars) for c in name):
name = '"'+name+'"'
name = '"' +name +'"'
#print ("limityear: " + str(limityear))
if limityear is None: limityear = 'None'
@ -123,7 +123,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
type = 'volume'
#let's find out how many results we get from the query...
searched = pullsearch(comicapi,comicquery,0,explicit,type)
searched = pullsearch(comicapi, comicquery, 0, explicit, type)
if searched is None: return False
totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
logger.fdebug("there are " + str(totalResults) + " search results...")
@ -136,12 +136,12 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#2012/22/02 - CV API flipped back to offset usage instead of page
if explicit == 'all' or explicit == 'loose':
#all / loose uses page for offset
offsetcount = (countResults/100) + 1
offsetcount = (countResults /100) + 1
else:
#explicit uses offset
offsetcount = countResults
searched = pullsearch(comicapi,comicquery,offsetcount,explicit,type)
searched = pullsearch(comicapi, comicquery, offsetcount, explicit, type)
comicResults = searched.getElementsByTagName(type) #('volume')
body = ''
n = 0
@ -155,11 +155,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#call cv.py here to find out issue count in story arc
try:
logger.fdebug('story_arc ascension')
names = len( result.getElementsByTagName('name') )
names = len(result.getElementsByTagName('name'))
n = 0
logger.fdebug('length: ' + str(names))
xmlpub = None #set this incase the publisher field isn't populated in the xml
while ( n < names ):
while (n < names):
logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName)
if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc':
logger.fdebug('yes')
@ -180,11 +180,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
logger.warn('error retrieving story arc search results.')
return
siteurl = len( result.getElementsByTagName('site_detail_url') )
siteurl = len(result.getElementsByTagName('site_detail_url'))
s = 0
logger.fdebug('length: ' + str(names))
xmlurl = None
while ( s < siteurl ):
while (s < siteurl):
logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName)
if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc':
try:
@ -216,15 +216,15 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
try:
logger.fdebug('story_arc ascension')
issuecount = len( arcdom.getElementsByTagName('issue') )
issuecount = len(arcdom.getElementsByTagName('issue'))
issuedom = arcdom.getElementsByTagName('issue')
isc = 0
arclist = ''
for isd in issuedom:
zeline = isd.getElementsByTagName('id')
isdlen = len( zeline )
isdlen = len(zeline)
isb = 0
while ( isb < isdlen):
while (isb < isdlen):
if isc == 0:
arclist = str(zeline[isb].firstChild.wholeText).strip()
else:
@ -240,7 +240,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
try:
firstid = None
arcyear = None
fid = len ( arcdom.getElementsByTagName('id') )
fid = len (arcdom.getElementsByTagName('id'))
fi = 0
while (fi < fid):
if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
@ -253,7 +253,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
if firstid is not None:
firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
logger.fdebug('success')
arcyear = cv.GetFirstIssue(firstid,firstdom)
arcyear = cv.GetFirstIssue(firstid, firstdom)
except:
logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')
@ -316,7 +316,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
#logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
if cnt_numerical >= limiter:
cnl = len ( result.getElementsByTagName('name') )
cnl = len (result.getElementsByTagName('name'))
cl = 0
xmlTag = 'None'
xmlimage = "cache/blankcover.jpg"
@ -336,7 +336,7 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#logger.info('name:' + str(xmlTag) + ' -- ' + str(xmlYr))
if xmlYr in limityear or limityear == 'None':
xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
idl = len ( result.getElementsByTagName('id') )
idl = len (result.getElementsByTagName('id'))
idt = 0
xmlid = None
while (idt < idl):

View File

@ -4,7 +4,7 @@ import os
import shutil
def movefiles(comicid,comlocation,ogcname,imported=None):
def movefiles(comicid, comlocation, ogcname, imported=None):
myDB = db.DBConnection()
logger.fdebug('comlocation is : ' + str(comlocation))
logger.fdebug('original comicname is : ' + str(ogcname))
@ -16,17 +16,17 @@ def movefiles(comicid,comlocation,ogcname,imported=None):
srcimp = impr['ComicLocation']
orig_filename = impr['ComicFilename']
orig_iss = impr['impID'].rfind('-')
orig_iss = impr['impID'][orig_iss+1:]
orig_iss = impr['impID'][orig_iss +1:]
logger.fdebug("Issue :" + str(orig_iss))
#before moving check to see if Rename to Mylar structure is enabled.
if mylar.IMP_RENAME and mylar.FILE_FORMAT != '':
logger.fdebug("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
renameit = helpers.rename_param(comicid, impr['ComicName'], orig_iss, orig_filename)
nfilename = renameit['nfilename']
dstimp = os.path.join(comlocation,nfilename)
dstimp = os.path.join(comlocation, nfilename)
else:
logger.fdebug("Renaming files not enabled, keeping original filename(s)")
dstimp = os.path.join(comlocation,orig_filename)
dstimp = os.path.join(comlocation, orig_filename)
logger.info("moving " + str(srcimp) + " ... to " + str(dstimp))
try:
@ -40,11 +40,11 @@ def movefiles(comicid,comlocation,ogcname,imported=None):
if results is not None:
for result in results:
controlValue = {"impID": result['impid']}
newValue = {"Status": "Imported" }
newValue = {"Status": "Imported"}
myDB.upsert("importresults", newValue, controlValue)
return
def archivefiles(comicid,ogcname):
def archivefiles(comicid, ogcname):
myDB = db.DBConnection()
# if move files isn't enabled, let's set all found comics to Archive status :)
result = myDB.select("SELECT * FROM importresults WHERE ComicName=?", [ogcname])
@ -53,5 +53,5 @@ def archivefiles(comicid,ogcname):
ogdir = result['Location']
origdir = os.path.join(os.path.dirname(ogdir))
updater.forceRescan(comicid,archive=origdir) #send to rescanner with archive mode turned on
updater.forceRescan(comicid, archive=origdir) #send to rescanner with archive mode turned on

View File

@ -19,7 +19,7 @@ from mylar import logger
def newpull():
pagelinks = "http://www.previewsworld.com/Home/1/1/71/952"
pageresponse = urllib2.urlopen ( pagelinks )
pageresponse = urllib2.urlopen (pagelinks)
soup = BeautifulSoup (pageresponse)
getthedate = soup.findAll("div", {"class": "Headline"})[0]
#the date will be in the FIRST ahref
@ -42,7 +42,7 @@ def newpull():
endthis = False
pull_list = []
publishers = {'914':'DARK HORSE COMICS', '915':'DC COMICS', '916':'IDW PUBLISHING', '917':'IMAGE COMICS', '918':'MARVEL COMICS', '952':'COMICS & GRAPHIC NOVELS'}
publishers = {'914': 'DARK HORSE COMICS', '915': 'DC COMICS', '916': 'IDW PUBLISHING', '917': 'IMAGE COMICS', '918': 'MARVEL COMICS', '952': 'COMICS & GRAPHIC NOVELS'}
while (x < lenlinks):
headt = cntlinks[x] #iterate through the hrefs pulling out only results.
@ -64,7 +64,7 @@ def newpull():
if issue_lk == -1:
continue
#headName = headt.findNext(text=True)
publisher_id = issue_link[issue_lk-3:issue_lk]
publisher_id = issue_link[issue_lk -3:issue_lk]
for pub in publishers:
if pub == publisher_id:
isspublisher = publishers[pub]
@ -85,7 +85,7 @@ def newpull():
"name": found_iss[1].findNext(text=True),
"price": found_iss[2],
"publisher": isspublisher,
"ID" : urlID})
"ID": urlID})
if endthis == True: break
x+=1
@ -95,7 +95,7 @@ def newpull():
try:
csvfile = open(str(except_file), 'rb')
csvfile.close()
except (OSError,IOError):
except (OSError, IOError):
logger.fdebug('file does not exist - continuing.')
else:
logger.fdebug('file exists - removing.')
@ -112,10 +112,10 @@ def newpull():
exceptln = pl['publisher'] + "\n" + str(pl['ID']) + "\t" + str(pl['name']) + "\t" + str(pl['price'])
for lb in breakhtml:
exceptln = re.sub(lb,'', exceptln).strip()
exceptln = re.sub(lb, '', exceptln).strip()
exceptline = exceptln.decode('utf-8','ignore')
f.write('%s\n' % (exceptline.encode('ascii','replace').strip()))
exceptline = exceptln.decode('utf-8', 'ignore')
f.write('%s\n' % (exceptline.encode('ascii', 'replace').strip()))
oldpub = pl['publisher']

View File

@ -57,7 +57,7 @@ class PROWL:
'application': 'Mylar',
'event': event,
'description': message.encode("utf-8"),
'priority': mylar.PROWL_PRIORITY }
'priority': mylar.PROWL_PRIORITY}
http_handler.request("POST",
"/publicapi/add",
@ -129,10 +129,10 @@ class NMA:
event = prline
description = prline2
data = { 'apikey': apikey, 'application':'Mylar', 'event': event, 'description': description, 'priority': priority}
data = {'apikey': apikey, 'application': 'Mylar', 'event': event, 'description': description, 'priority': priority}
logger.info(module + ' Sending notification request to NotifyMyAndroid')
request = self._send(data,module)
request = self._send(data, module)
if not request:
logger.warn(module + ' Error sending notification request to NotifyMyAndroid')
@ -173,7 +173,7 @@ class PUSHOVER:
'user': mylar.PUSHOVER_USERKEY,
'message': message.encode("utf-8"),
'title': event,
'priority': mylar.PUSHOVER_PRIORITY }
'priority': mylar.PUSHOVER_PRIORITY}
http_handler.request("POST",
"/1/messages.json",
@ -323,7 +323,7 @@ class PUSHBULLET:
data = {'type': "note", #'device_iden': self.deviceid,
'title': event.encode('utf-8'), #"mylar",
'body': message.encode('utf-8') }
'body': message.encode('utf-8')}
http_handler.request("POST",
"/v2/pushes",

View File

@ -42,10 +42,10 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
comicnm_1 = re.sub('\+', '%2B', comicnm)
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
@ -59,29 +59,29 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
n_odd = -1
n_even = -1
n = 0
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class" : "listing_even"})[n_even]
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
#print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a',href=True)[1]
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
#print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ','')
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('','')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ','')
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
#print ( "Year: " + str(resultYear[n]) )
#print ( "Issues: " + str(resultIssues[n]) )
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
@ -98,7 +98,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
#dates overlapping between Dec/11 and Jan/12. Let's accept a match with a
#1 year grace space, and then pull in the first issue to see the actual pub
# date and if coincides with the other date..match it.
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear)+1):
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
#print ("n:" + str(n) + "...matched by year to Mylar!")
#print ( "Year: " + str(resultYear[n]) )
#Occasionally there are discrepancies in comic count between
@ -106,11 +106,11 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
#as GCD does. Therefore, let's increase the CV count by 1 to get it
#to match, any more variation could cause incorrect matching.
#ie. witchblade on GCD says 159 issues, CV states 161.
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total)+1 or (int(resultIssues[n])+1) == int(Total):
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total) +1 or (int(resultIssues[n]) +1) == int(Total):
#print ("initial issue match..continuing.")
if int(resultIssues[n]) == int(Total)+1:
if int(resultIssues[n]) == int(Total) +1:
issvariation = "cv"
elif int(resultIssues[n])+1 == int(Total):
elif int(resultIssues[n]) +1 == int(Total):
issvariation = "gcd"
else:
issvariation = "no"
@ -128,7 +128,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':11,'twelve':12}
basnumbs = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
@ -194,7 +194,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
# if we're here - it means it's a mismatched name.
# let's pull down the publication date as it'll be blank otherwise
inputMIS = 'http://www.comics.org' + str(resultURL)
resp = urllib2.urlopen ( inputMIS )
resp = urllib2.urlopen (inputMIS)
# soup = BeautifulSoup ( resp )
try:
soup = BeautifulSoup(urllib2.urlopen(inputMIS))
@ -207,22 +207,22 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
logger.info("not working...aborting. Tell Evilhero.")
return
#If CV doesn't have the Series Year (Stupid)...Let's store the Comics.org stated year just in case.
pyearit = soup.find("div", {"class" : "item_data"})
pyearit = soup.find("div", {"class": "item_data"})
pyeartxt = pyearit.find(text=re.compile(r"Series"))
pyearst = pyeartxt.index('Series')
ParseYear = pyeartxt[int(pyearst)-5:int(pyearst)]
ParseYear = pyeartxt[int(pyearst) -5:int(pyearst)]
parsed = soup.find("div", {"id" : "series_data"})
parsed = soup.find("div", {"id": "series_data"})
#recent structure changes - need to adjust now
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
subtxt3 = parsed.find("dd", {"id": "publication_dates"})
resultPublished = subtxt3.findNext(text=True).rstrip()
#print ("pubdate:" + str(resultPublished))
parsfind = parsed.findAll("dt", {"class" : "long"})
parsfind = parsed.findAll("dt", {"class": "long"})
seriesloop = len(parsfind)
resultFormat = ''
for pf in parsfind:
if 'Publishing Format:' in pf.findNext(text=True):
subtxt9 = pf.find("dd", {"id" : "series_format"})
subtxt9 = pf.find("dd", {"id": "series_format"})
resultFormat = subtxt9.findNext(text=True).rstrip()
continue
# the caveat - if a series is ongoing but only has 1 issue published at a particular point in time,
@ -233,11 +233,11 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
resultPublished = resultPublished + " - Present"
if 'limited series' in resultFormat.lower() and '?' in resultPublished:
resultPublished = resultPublished + " (Limited Series)"
coverst = soup.find("div", {"id" : "series_cover"})
coverst = soup.find("div", {"id": "series_cover"})
if coverst < 0:
gcdcover = "None"
else:
subcoverst = coverst('img',src=True)[0]
subcoverst = coverst('img', src=True)[0]
gcdcover = subcoverst['src']
#print ("resultURL:" + str(resultURL))
@ -258,8 +258,8 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
#print ("pub date defaulting")
datetype = "on-sale"
cnt1 = len(soup.findAll("tr", {"class" : "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class" : "row_even_True"}))
cnt1 = len(soup.findAll("tr", {"class": "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class": "row_even_True"}))
cnt = int(cnt1 + cnt2)
@ -271,19 +271,19 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
PI = "1.00"
altcount = 0
PrevYRMO = "0000-00"
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_odd+=1
parsed = soup.findAll("tr", {"class" : "row_even_False"})[n_odd]
parsed = soup.findAll("tr", {"class": "row_even_False"})[n_odd]
ntype = "odd"
else:
n_even+=1
ntype = "even"
parsed = soup.findAll("tr", {"class" : "row_even_True"})[n_even]
parsed = soup.findAll("tr", {"class": "row_even_True"})[n_even]
subtxt3 = parsed.find("a")
ParseIssue = subtxt3.findNext(text=True)
fid = parsed('a',href=True)[0]
fid = parsed('a', href=True)[0]
resultGID = fid['href']
resultID = resultGID[7:-1]
@ -298,7 +298,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
#if it's a digit before and after decimal, assume decimal issue
dec_st = dec_chk.find('.')
dec_b4 = dec_chk[:dec_st]
dec_ad = dec_chk[dec_st+1:]
dec_ad = dec_chk[dec_st +1:]
dec_ad = re.sub("\s", "", dec_ad)
if dec_b4.isdigit() and dec_ad.isdigit():
#logger.fdebug("Alternate decimal issue...*Whew* glad I caught that")
@ -334,7 +334,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
if '.' in isschk:
isschk_find = isschk.find('.')
isschk_b4dec = isschk[:isschk_find]
isschk_decval = isschk[isschk_find+1:]
isschk_decval = isschk[isschk_find +1:]
#logger.fdebug("decimal detected for " + str(isschk))
#logger.fdebug("isschk_decval is " + str(isschk_decval))
if len(isschk_decval) == 1:
@ -381,9 +381,9 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
gcdinfo['ComicIssue'] = ParseIssue
#--- let's use pubdate.
#try publicationd date first
ParseDate = GettheDate(parsed,PrevYRMO)
ParseDate = GettheDate(parsed, PrevYRMO)
ParseDate = ParseDate.replace(' ','')
ParseDate = ParseDate.replace(' ', '')
PrevYRMO = ParseDate
gcdinfo['ComicDate'] = ParseDate
#^^ will retrieve date #
@ -420,7 +420,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
return gcdinfo
## -- end (GCD) -- ##
def GettheDate(parsed,PrevYRMO):
def GettheDate(parsed, PrevYRMO):
#--- let's use pubdate.
#try publicationd date first
#logger.fdebug("parsed:" + str(parsed))
@ -432,30 +432,30 @@ def GettheDate(parsed,PrevYRMO):
ParseDate = subtxt1.findNext(text=True)
pformat = 'on-sale'
if len(ParseDate) < 7: ParseDate = '0000-00' #invalid on-sale date format , drop it 0000-00 to avoid errors
basmonths = {'january':'01','february':'02','march':'03','april':'04','may':'05','june':'06','july':'07','august':'08','september':'09','october':'10','november':'11','december':'12'}
basmonths = {'january': '01', 'february': '02', 'march': '03', 'april': '04', 'may': '05', 'june': '06', 'july': '07', 'august': '08', 'september': '09', 'october': '10', 'november': '11', 'december': '12'}
pdlen = len(ParseDate)
pdfind = ParseDate.find(' ',2)
pdfind = ParseDate.find(' ', 2)
#logger.fdebug("length: " + str(pdlen) + "....first space @ pos " + str(pdfind))
#logger.fdebug("this should be the year: " + str(ParseDate[pdfind+1:pdlen-1]))
if pformat == 'on-sale': pass # date is in correct format...
else:
if ParseDate[pdfind+1:pdlen-1].isdigit():
if ParseDate[pdfind +1:pdlen -1].isdigit():
#assume valid date.
#search for number as text, and change to numeric
for numbs in basmonths:
if numbs in ParseDate.lower():
pconv = basmonths[numbs]
ParseYear = re.sub('/s','',ParseDate[-5:])
ParseYear = re.sub('/s', '', ParseDate[-5:])
ParseDate = str(ParseYear) + "-" + str(pconv)
#logger.fdebug("!success - Publication date: " + str(ParseDate))
break
# some comics are messed with pub.dates and have Spring/Summer/Fall/Winter
else:
baseseasons = {'spring':'03','summer':'06','fall':'09','winter':'12'}
baseseasons = {'spring': '03', 'summer': '06', 'fall': '09', 'winter': '12'}
for seas in baseseasons:
if seas in ParseDate.lower():
sconv = baseseasons[seas]
ParseYear = re.sub('/s','',ParseDate[-5:])
ParseYear = re.sub('/s', '', ParseDate[-5:])
ParseDate = str(ParseYear) + "-" + str(sconv)
break
# #try key date
@ -493,22 +493,22 @@ def GCDAdd(gcdcomicid):
logger.fdebug("looking at gcdid:" + str(gcdid))
input2 = 'http://www.comics.org/series/' + str(gcdid)
logger.fdebug("---url: " + str(input2))
resp = urllib2.urlopen ( input2 )
soup = BeautifulSoup ( resp )
resp = urllib2.urlopen (input2)
soup = BeautifulSoup (resp)
logger.fdebug("SeriesName section...")
parsen = soup.find("span", {"id" : "series_name"})
parsen = soup.find("span", {"id": "series_name"})
#logger.fdebug("series name (UNPARSED): " + str(parsen))
subpar = parsen('a')[0]
resultName = subpar.findNext(text=True)
logger.fdebug("ComicName: " + str(resultName))
#covers-start
logger.fdebug("Covers section...")
coverst = soup.find("div", {"id" : "series_cover"})
coverst = soup.find("div", {"id": "series_cover"})
if coverst < 0:
gcdcover = "None"
logger.fdebug("unable to find any covers - setting to None")
else:
subcoverst = coverst('img',src=True)[0]
subcoverst = coverst('img', src=True)[0]
#logger.fdebug("cover (UNPARSED) : " + str(subcoverst))
gcdcover = subcoverst['src']
logger.fdebug("Cover: " + str(gcdcover))
@ -516,27 +516,27 @@ def GCDAdd(gcdcomicid):
#publisher start
logger.fdebug("Publisher section...")
try:
pubst = soup.find("div", {"class" : "item_data"})
pubst = soup.find("div", {"class": "item_data"})
catchit = pubst('a')[0]
except (IndexError, TypeError):
pubst = soup.findAll("div", {"class" : "left"})[1]
pubst = soup.findAll("div", {"class": "left"})[1]
catchit = pubst.find("a")
publisher = catchit.findNext(text=True)
logger.fdebug("Publisher: " + str(publisher))
#publisher end
parsed = soup.find("div", {"id" : "series_data"})
parsed = soup.find("div", {"id": "series_data"})
#logger.fdebug("series_data: " + str(parsed))
#print ("parse:" + str(parsed))
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
subtxt3 = parsed.find("dd", {"id": "publication_dates"})
#logger.fdebug("publication_dates: " + str(subtxt3))
pubdate = subtxt3.findNext(text=True).rstrip()
logger.fdebug("pubdate:" + str(pubdate))
subtxt4 = parsed.find("dd", {"id" : "issues_published"})
subtxt4 = parsed.find("dd", {"id": "issues_published"})
noiss = subtxt4.findNext(text=True)
lenwho = len(noiss)
lent = noiss.find(' ',2)
lent = noiss.find(' ', 2)
lenf = noiss.find('(')
stringit = noiss[lenf:lenwho]
stringout = noiss[:lent]
@ -547,10 +547,10 @@ def GCDAdd(gcdcomicid):
serieschoice.append({
"ComicID": gcdid,
"ComicName": resultName,
"ComicYear" : pubdate,
"ComicIssues" : noissues,
"ComicPublisher" : publisher,
"ComicCover" : gcdcover
"ComicYear": pubdate,
"ComicIssues": noissues,
"ComicPublisher": publisher,
"ComicCover": gcdcover
})
series['serieschoice'] = serieschoice
return series
@ -582,10 +582,10 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
# take the 1st word ;)
#comicpub = comicpub.split()[0]
# if it's not one of the BIG publisher's it might fail - so let's increase the odds.
pubbiggies = [ 'DC',
pubbiggies = ['DC',
'Marvel',
'Image',
'IDW' ]
'IDW']
uhuh = "no"
for pb in pubbiggies:
if pb in comicpub:
@ -624,10 +624,10 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
if uhuh == "no":
publink = "&pub_name="
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&keywords=&order1=series&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31' + '&title=&feature=&job_number=&pages=&script=&pencils=&inks=&colors=&letters=&story_editing=&genre=&characters=&synopsis=&reprint_notes=&story_reprinted=None&notes=' + str(publink) + '&pub_notes=&brand=&brand_notes=&indicia_publisher=&is_surrogate=None&ind_pub_notes=&series=' + str(comicnm) + '&series_year_began=&series_notes=&tracking_notes=&issue_count=&is_comics=None&format=&color=&dimensions=&paper_stock=&binding=&publishing_format=&issues=&volume=&issue_title=&variant_name=&issue_date=&indicia_frequency=&price=&issue_pages=&issue_editing=&isbn=&barcode=&issue_notes=&issue_reprinted=None&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
# print ("cnt1: " + str(cnt1))
@ -643,13 +643,13 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
n_odd = -1
n_even = -1
n = 0
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class" : "listing_even"})[n_even]
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
rtpit = rtp.findNext(text=True)
rtpthis = rtpit.encode('utf-8').strip()
@ -663,19 +663,19 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
resultPublisher.append(pubthis)
# print ( "Publisher: " + str(resultPublisher[n]) )
fip = resultp('a',href=True)[1]
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
# print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ','')
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('','')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ','')
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
# print ( "Year: " + str(resultYear[n]) )
# print ( "Issues: " + str(resultIssues[n]) )
# print ("comchkchoice: " + str(comchkchoice))
@ -685,10 +685,10 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
"ComicID": str(comicid),
"ComicName": resultName[n],
"GCDID": str(resultID[n]).split('/')[2],
"ComicYear" : str(resultYear[n]),
"ComicPublisher" : resultPublisher[n],
"ComicURL" : "http://www.comics.org" + str(resultID[n]),
"ComicIssues" : str(resultIssues[n])
"ComicYear": str(resultYear[n]),
"ComicPublisher": resultPublisher[n],
"ComicURL": "http://www.comics.org" + str(resultID[n]),
"ComicIssues": str(resultIssues[n])
})
#else:
#print ( str(resultID[n]) + " already in DB...skipping" )
@ -721,10 +721,10 @@ def annualCheck(gcomicid, comicid, comicname, comicyear):
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyear) + '-01-01&end_date=' + str(comicyear) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
cnt = int(cnt1 + cnt2)
@ -738,33 +738,33 @@ def annualCheck(gcomicid, comicid, comicname, comicyear):
n_odd = -1
n_even = -1
n = 0
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class" : "listing_even"})[n_even]
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
rtp1 = re.sub('Annual', '', rtp)
resultName.append(helpers.cleanName(rtp1.findNext(text=True)))
print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a',href=True)[1]
print ("Comic Name: " + str(resultName[n]))
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
print ( "ID: " + str(resultID[n]) )
print ("ID: " + str(resultID[n]))
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ','')
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('','')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ','')
print ( "Year: " + str(resultYear[n]) )
print ( "Issues: " + str(resultIssues[n]) )
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
print ("Year: " + str(resultYear[n]))
print ("Issues: " + str(resultIssues[n]))
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
@ -775,9 +775,9 @@ def annualCheck(gcomicid, comicid, comicname, comicyear):
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear)+1):
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
print ("n:" + str(n) + "...matched by year to Mylar!")
print ( "Year: " + str(resultYear[n]) )
print ("Year: " + str(resultYear[n]))
TotalIssues = resultIssues[n]
resultURL = str(resultID[n])
rptxt = resultp('td')[6]

View File

@ -217,7 +217,7 @@ class Readinglist(object):
cmd = shlex.split(cmdstring)
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError,e:
except subprocess.CalledProcessError, e:
logger.info(module + ' The host {0} is not Reachable at this time.'.format(cmd[-1]))
return
else:

View File

@ -29,7 +29,7 @@ def _start_newznab_attr(self, attrsD):
feedparser._FeedParserMixin._start_newznab_attr = _start_newznab_attr
def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
if pickfeed is None:
return
@ -85,7 +85,7 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases RSS Feed for comics'
elif pickfeed == "4": #32p search
if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ):
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
lp=+1
continue
@ -102,7 +102,7 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
elif pickfeed == "6": # kat.ph rss feed (category:other so that we can get them quicker if need-be)
feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases for category Other RSS Feed that contain comics'
elif int(pickfeed) >=7 and feedinfo is not None:
elif int(pickfeed) >= 7 and feedinfo is not None:
#personal 32P notification feeds.
#get the info here
feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname']
@ -149,15 +149,15 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
tmpdesc = feedme.entries[i].description
st_pub = feedme.entries[i].title.find('(')
st_end = feedme.entries[i].title.find(')')
pub = feedme.entries[i].title[st_pub+1:st_end] # +1 to not include (
pub = feedme.entries[i].title[st_pub +1:st_end] # +1 to not include (
#logger.fdebug('publisher: ' + re.sub("'",'', pub).strip()) #publisher sometimes is given within quotes for some reason, strip 'em.
vol_find = feedme.entries[i].title.find('vol.')
series = feedme.entries[i].title[st_end+1:vol_find].strip()
series = feedme.entries[i].title[st_end +1:vol_find].strip()
#logger.fdebug('series title: ' + series)
iss_st = feedme.entries[i].title.find(' - ', vol_find)
vol = re.sub('\.', '', feedme.entries[i].title[vol_find:iss_st]).strip()
#logger.fdebug('volume #: ' + str(vol))
issue = feedme.entries[i].title[iss_st+3:].strip()
issue = feedme.entries[i].title[iss_st +3:].strip()
#logger.fdebug('issue # : ' + str(issue))
#break it down to get the Size since it's available on THIS 32P feed only so far.
@ -172,18 +172,18 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
if '.' in fdigits:
decfind = fdigits.find('.')
wholenum = fdigits[:decfind]
decnum = fdigits[decfind+1:]
decnum = fdigits[decfind +1:]
else:
wholenum = fdigits
decnum = 0
if 'MB' in tmpsize:
wholebytes = int(wholenum) * 1048576
wholedecimal = ( int(decnum) * 1048576 ) / 100
wholedecimal = (int(decnum) * 1048576) / 100
justdigits = wholebytes + wholedecimal
else:
#it's 'GB' then
wholebytes = ( int(wholenum) * 1024 ) * 1048576
wholedecimal = ( ( int(decnum) * 1024 ) * 1048576 ) / 100
wholebytes = (int(wholenum) * 1024) * 1048576
wholedecimal = ((int(decnum) * 1024) * 1048576) / 100
justdigits = wholebytes + wholedecimal
#this is not currently working for 32p
#Get the # of seeders.
@ -201,9 +201,10 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
if int(mylar.MINSEEDS) >= int(seeddigits):
link = feedme.entries[i].link
linkst = link.find('&id')
linken = link.find('&', linkst+1)
if linken == -1: linken = len(link)
newlink = re.sub('&id=','', link[linkst:linken]).strip()
linken = link.find('&', linkst +1)
if linken == -1:
linken = len(link)
newlink = re.sub('&id=', '', link[linkst:linken]).strip()
feeddata.append({
'site': picksite,
'title': series.lstrip() + ' ' + vol + ' #' + issue,
@ -214,7 +215,7 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
'size': justdigits
})
i+=1
i += 1
if feedtype is None:
logger.info('[' + picksite + '] there were ' + str(i) + ' results..')
@ -222,11 +223,10 @@ def torrents(pickfeed=None,seriesname=None,issue=None,feedinfo=None):
logger.info('[' + picksite + '] there were ' + str(i) + ' results' + feedtype)
totalcount += i
lp +=1
lp += 1
if not seriesname:
rssdbupdate(feeddata,totalcount,'torrent')
rssdbupdate(feeddata, totalcount, 'torrent')
else:
katinfo['entries'] = torthekat
return katinfo
@ -325,7 +325,7 @@ def nzbs(provider=None, forcerss=False):
rssdbupdate(feeddata, i, 'usenet')
return
def rssdbupdate(feeddata,i,type):
def rssdbupdate(feeddata, i, type):
rsschktime = 15
myDB = db.DBConnection()
@ -340,7 +340,7 @@ def rssdbupdate(feeddata,i,type):
newlink = dataval['link']
else:
#store the hash/id from KAT
newlink = os.path.basename(re.sub('.torrent','', dataval['link'][:dataval['link'].find('?title')]))
newlink = os.path.basename(re.sub('.torrent', '', dataval['link'][:dataval['link'].find('?title')]))
newVal = {"Link": newlink,
"Pubdate": dataval['pubdate'],
@ -356,12 +356,13 @@ def rssdbupdate(feeddata,i,type):
"Size": dataval['Size']}
ctrlVal = {"Title": dataval['Title']}
myDB.upsert("rssdb", newVal,ctrlVal)
myDB.upsert("rssdb", newVal, ctrlVal)
logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.RSS_CHECKINTERVAL) + ' minutes')
return
def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
myDB = db.DBConnection()
seriesname_alt = None
if comicid is None or comicid == 'None':
@ -376,12 +377,11 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
seriesname = snm['ComicName']
seriesname_alt = snm['AlternateSearch']
#remove 'and' and 'the':
tsearch_rem1 = re.sub("\\band\\b", "%", seriesname.lower())
tsearch_rem2 = re.sub("\\bthe\\b", "%", tsearch_rem1.lower())
tsearch_removed = re.sub('\s+', ' ', tsearch_rem2)
tsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\-\;\/\\=\?\&\.\s]', '%',tsearch_removed)
tsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\-\;\/\\=\?\&\.\s]', '%', tsearch_removed)
if mylar.PREFERRED_QUALITY == 0:
tsearch = tsearch_seriesname + "%"
elif mylar.PREFERRED_QUALITY == 1:
@ -412,7 +412,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
AS_Alternate = seriesname_alt
AS_Alt.append(seriesname_alt)
for calt in chkthealt:
AS_Alter = re.sub('##','',calt)
AS_Alter = re.sub('##', '', calt)
u_altsearchcomic = AS_Alter.encode('ascii', 'ignore').strip()
AS_Altrem = re.sub("\\band\\b", "", u_altsearchcomic.lower())
AS_Altrem = re.sub("\\bthe\\b", "", AS_Altrem.lower())
@ -420,7 +420,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
AS_Alternate = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\+\'\&\?\@\s]', '%', AS_Altrem)
AS_Altrem_mod = re.sub('[\&]', ' ', AS_Altrem)
AS_formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',AS_Altrem_mod)
AS_formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '', AS_Altrem_mod)
AS_formatrem_seriesname = re.sub('\s+', ' ', AS_formatrem_seriesname)
if AS_formatrem_seriesname[:1] == ' ': AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
AS_Alt.append(AS_formatrem_seriesname)
@ -499,14 +499,14 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
seriesname_mod = re.sub('[\&]', ' ', seriesname_mod)
foundname_mod = re.sub('[\&]', ' ', foundname_mod)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.]', '',seriesname_mod)
formatrem_seriesname = re.sub('[\-]', ' ',formatrem_seriesname)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.]', '', seriesname_mod)
formatrem_seriesname = re.sub('[\-]', ' ', formatrem_seriesname)
formatrem_seriesname = re.sub('[\/]', ' ', formatrem_seriesname) #not necessary since seriesname in a torrent file won't have /
formatrem_seriesname = re.sub('\s+', ' ', formatrem_seriesname)
if formatrem_seriesname[:1] == ' ': formatrem_seriesname = formatrem_seriesname[1:]
formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.]', '',foundname_mod)
formatrem_torsplit = re.sub('[\-]', ' ',formatrem_torsplit) #we replace the - with space so we'll get hits if differnces
formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.]', '', foundname_mod)
formatrem_torsplit = re.sub('[\-]', ' ', formatrem_torsplit) #we replace the - with space so we'll get hits if differnces
formatrem_torsplit = re.sub('[\/]', ' ', formatrem_torsplit) #not necessary since if has a /, should be removed in above line
formatrem_torsplit = re.sub('\s+', ' ', formatrem_torsplit)
logger.fdebug(str(len(formatrem_torsplit)) + ' - formatrem_torsplit : ' + formatrem_torsplit.lower())
@ -541,7 +541,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
st_pub = rebuiltline.find('(')
if st_pub < 2 and st_pub != -1:
st_end = rebuiltline.find(')')
rebuiltline = rebuiltline[st_end+1:]
rebuiltline = rebuiltline[st_end +1:]
tortheinfo.append({
'title': rebuiltline, #cttitle,
@ -555,7 +555,7 @@ def torrentdbsearch(seriesname,issue,comicid=None,nzbprov=None):
return torinfo
def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,ComicVersion=None):
def nzbdbsearch(seriesname, issue, comicid=None, nzbprov=None, searchYear=None, ComicVersion=None):
myDB = db.DBConnection()
seriesname_alt = None
if comicid is None or comicid == 'None':
@ -569,12 +569,12 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,Comic
seriesname = snm['ComicName']
seriesname_alt = snm['AlternateSearch']
nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-\s]', '%',seriesname)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname)
nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-\s]', '%', seriesname)
formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '', seriesname)
nsearch = '%' + nsearch_seriesname + "%"
nresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [nsearch,nzbprov])
nresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [nsearch, nzbprov])
if nresults is None:
logger.fdebug('nzb search returned no results for ' + seriesname)
if seriesname_alt is None:
@ -585,9 +585,9 @@ def nzbdbsearch(seriesname,issue,comicid=None,nzbprov=None,searchYear=None,Comic
if chkthealt == 0:
AS_Alternate = AlternateSearch
for calt in chkthealt:
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
AS_Alternate = '%' + AS_Alternate + "%"
nresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [AS_Alternate,nzbprov])
nresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?", [AS_Alternate, nzbprov])
if nresults is None:
logger.fdebug('nzb alternate name search returned no results.')
return "no results"
@ -709,7 +709,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.fdebug('[32P-AUTHENTICATION] 32P (Legacy) Authentication already done. Attempting to use existing keys.')
mylar.AUTHKEY_32P = mylar.KEYS_32P['authkey']
else:
if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == ''] ):
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
logger.error('[RSS] Unable to sign-on to 32P to validate settings and initiate download sequence. Please enter/check your username password in the configuration.')
return "fail"
elif mylar.PASSKEY_32P is None or mylar.AUTHKEY_32P is None or mylar.KEYS_32P is None:
@ -802,7 +802,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
return "pass"
elif mylar.TORRENT_SEEDBOX:
tssh = ftpsshup.putfile(filepath,filename)
tssh = ftpsshup.putfile(filepath, filename)
return tssh

View File

@ -64,13 +64,13 @@ class tehMain():
logger.info('[RSS] Initiating Torrent RSS Feed Check on 32P.')
if mylar.MODE_32P == 0:
logger.fdebug('[RSS] 32P mode set to Legacy mode. Monitoring New Releases feed only.')
if any( [mylar.PASSKEY_32P is None, mylar.PASSKEY_32P == '', mylar.RSSFEED_32P is None, mylar.RSSFEED_32P == ''] ):
if any([mylar.PASSKEY_32P is None, mylar.PASSKEY_32P == '', mylar.RSSFEED_32P is None, mylar.RSSFEED_32P == '']):
logger.error('[RSS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.')
else:
rsscheck.torrents(pickfeed='1', feedinfo=mylar.KEYS_32P)
else:
logger.fdebug('[RSS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed')
if any( [mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None] ):
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None]):
logger.error('[RSS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.')
else:
if mylar.KEYS_32P is None:

View File

@ -29,25 +29,25 @@ def sabnzbd():
r = requests.get(sabline + 'config/general/')
soup = BeautifulSoup(r.content)
#lenlinks = len(cntlinks)
cnt1 = len(soup.findAll("div", {"class" : "field-pair alt"}))
cnt2 = len(soup.findAll("div", {"class" : "field-pair"}))
cnt1 = len(soup.findAll("div", {"class": "field-pair alt"}))
cnt2 = len(soup.findAll("div", {"class": "field-pair"}))
cnt = int(cnt1 + cnt2)
n = 0
n_even = -1
n_odd = -1
while ( n < cnt ):
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("div", {"class" : "field-pair"})[n_even]
resultp = soup.findAll("div", {"class": "field-pair"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("div", {"class" : "field-pair alt"})[n_odd]
resultp = soup.findAll("div", {"class": "field-pair alt"})[n_odd]
if resultp.find("label", {"for" : "nzbkey"}):
if resultp.find("label", {"for": "nzbkey"}):
#logger.fdebug resultp
try:
result = resultp.find("input", {"type" : "text"})
result = resultp.find("input", {"type": "text"})
except:
continue

View File

@ -77,7 +77,7 @@ class Scheduler:
self.action.run()
except Exception, e:
logger.fdebug("Exception generated in thread " + self.threadName + ": %s" % e )
logger.fdebug("Exception generated in thread " + self.threadName + ": %s" % e)
logger.fdebug(repr(traceback.format_exc()))
if self.abort:

View File

@ -143,7 +143,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
nzbprov = None
return findit, nzbprov
prov_order,newznab_info = provider_sequence(nzbprovider,torprovider,newznab_hosts)
prov_order, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts)
# end provider order sequencing
logger.info('search provider order is ' + str(prov_order))
@ -169,7 +169,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
searchcnt = 2 #set the searchcnt to 2 (api)
i = 2 #start the counter at api, so it will exit without running RSS
while ( i <= searchcnt ):
while (i <= searchcnt):
#searchmodes:
# rss - will run through the built-cached db of entries
# api - will run through the providers via api (or non-api in the case of Experimental)
@ -188,7 +188,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#torprtmp = 0 # torprtmp = torpr
prov_count = 0
while (prov_count <= len(prov_order)-1):
while (prov_count <= len(prov_order) -1):
#while (torprtmp <= torpr): #(torprtmp >=0 ):
newznab_host = None
if prov_order[prov_count] == '32p':
@ -219,7 +219,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
AS_Alternate = AlternateSearch
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate)
if findit == 'yes':
@ -238,7 +238,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
AS_Alternate = AlternateSearch
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##','',calt)
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
@ -282,7 +282,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
apikey = newznab_host[2].rstrip()
if '#' in newznab_host[3].rstrip():
catstart = newznab_host[3].find('#')
category_newznab = newznab_host[3][catstart+1:]
category_newznab = newznab_host[3][catstart +1:]
logger.fdebug('non-default Newznab category set to :' + str(category_newznab))
else:
category_newznab = '7030'
@ -380,7 +380,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
break
elif '.' in findcomiciss[i]:
c_number = findcomiciss[:i].rstrip()
c_num_a4 = findcomiciss[i+1:].rstrip()
c_num_a4 = findcomiciss[i +1:].rstrip()
#if decimal seperates numeric from alpha (ie - 7.INH)
#don't give calpha a value or else will seperate with a space further down
#assign it to dsp_c_alpha so that it can be displayed for debugging.
@ -418,9 +418,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# if issue is '011' instead of '11' in nzb search results, will not have same
# results. '011' will return different than '11', as will '009' and '09'.
while (findloop < findcount ):
while (findloop < findcount):
comsrc = comsearch
while (cmloopit >= 1 ):
while (cmloopit >= 1):
#if issue_except is None: issue_exc = ''
#else: issue_exc = issue_except
if done is True and seperatealpha == "no":
@ -451,7 +451,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == '32P' or nzbprov == 'KAT':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [" + str(nzbprov) + "] RSS for " + str(findcomic) + " : " + str(mod_isssearch))
bb = rsscheck.torrentdbsearch(findcomic,mod_isssearch,ComicID,nzbprov)
bb = rsscheck.torrentdbsearch(findcomic, mod_isssearch, ComicID, nzbprov)
rss = "yes"
#if bb is not None: logger.fdebug("bb results: " + str(bb))
else:
@ -460,7 +460,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == 'newznab':
nzbprov_fix = name_newznab
else: nzbprov_fix = nzbprov
bb = rsscheck.nzbdbsearch(findcomic,mod_isssearch,ComicID,nzbprov_fix,ComicYear,ComicVersion)
bb = rsscheck.nzbdbsearch(findcomic, mod_isssearch, ComicID, nzbprov_fix, ComicYear, ComicVersion)
rss = "yes"
#if bb is not None: logger.fdebug("bb results: " + str(bb))
#this is the API calls
@ -474,7 +474,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
elif nzbprov == 'KAT':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch))
bb = rsscheck.torrents(pickfeed='KAT',seriesname=cmname,issue=mod_isssearch)#cmname,issue=mod_isssearch)
bb = rsscheck.torrents(pickfeed='KAT', seriesname=cmname, issue=mod_isssearch)#cmname,issue=mod_isssearch)
rss = "no"
#if bb is not None: logger.fdebug("results: " + str(bb))
elif nzbprov != 'experimental':
@ -484,14 +484,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it.
if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/':
if host_newznab[len(host_newznab) -1:len(host_newznab)] != '/':
host_newznab_fix = str(host_newznab) + "/"
else: host_newznab_fix = host_newznab
findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch) + "&o=xml&cat=" + str(category_newznab)
if nzbprov != 'nzbx':
# helper function to replace apikey here so we avoid logging it ;)
findurl = findurl + "&apikey=" + str(apikey)
logsearch = helpers.apiremove(str(findurl),'nzb')
logsearch = helpers.apiremove(str(findurl), 'nzb')
logger.fdebug("search-url: " + str(logsearch))
### IF USENET_RETENTION is set, honour it
@ -658,9 +658,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
dateconv = email.utils.parsedate_tz(pubdate)
# convert it to a numeric time, then subtract the timezone difference (+/- GMT)
if dateconv[-1] is not None:
postdate_int = time.mktime(dateconv[:len(dateconv)-1]) - dateconv[-1]
postdate_int = time.mktime(dateconv[:len(dateconv) -1]) - dateconv[-1]
else:
postdate_int = time.mktime(dateconv[:len(dateconv)-1])
postdate_int = time.mktime(dateconv[:len(dateconv) -1])
#convert it to a Thu, 06 Feb 2014 00:00:00 format
issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')
# to get past different locale's os-dependent dates, let's convert it to a generic datetime format
@ -674,13 +674,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
econv = email.utils.parsedate_tz(issconv)
#convert it to a numeric and drop the GMT/Timezone
try:
issuedate_int = time.mktime(econv[:len(econv)-1])
issuedate_int = time.mktime(econv[:len(econv) -1])
except OverflowError:
logger.fdebug('Unable to convert timestamp to integer format. Forcing things through.')
isyear = econv[1]
epochyr = '1970'
if int(isyear) <= int(epochyr):
tm = datetime.datetime(1970,1,1)
tm = datetime.datetime(1970, 1, 1)
issuedate_int = int(time.mktime(tm.timetuple()))
else:
continue
@ -719,7 +719,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# this is new - if title contains a '&' in the title it will assume the filename has ended at that point
# which causes false positives (ie. wolverine & the x-men becomes the x-men, which matches on x-men.
# 'the' is removed for comparisons later on
if '&' in cleantitle: cleantitle = re.sub('[\&]','and', cleantitle)
if '&' in cleantitle: cleantitle = re.sub('[\&]', 'and', cleantitle)
nzbname = cleantitle
@ -735,7 +735,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
vers4vol = "no"
versionfound = "no"
if any( ['cover only' in cleantitle.lower(), 'variant' in cleantitle.lower()] ):
if any(['cover only' in cleantitle.lower(), 'variant' in cleantitle.lower()]):
logger.fdebug("Ignoring title as Cover/Variant Only detected.")
cleantitle = "abcdefghijk 0 (1901).cbz"
continue
@ -961,7 +961,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#changed this from '' to ' '
comic_iss_b4 = re.sub('[\-\:\,\?\!]', ' ', comic_andiss)
comic_iss_b4 = re.sub('\'', '', comic_iss_b4)
comic_iss = comic_iss_b4.replace('.',' ')
comic_iss = comic_iss_b4.replace('.', ' ')
#if issue_except: comic_iss = re.sub(issue_except.lower(), '', comic_iss)
logger.fdebug("adjusted nzb comic and issue: " + comic_iss)
@ -970,15 +970,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#screwed up most search results with dognzb. Let's try to adjust.
#watchcomic_split = findcomic[findloop].split(None)
if splitit[(len(splitit)-1)].isdigit():
if splitit[(len(splitit) -1)].isdigit():
#compares - if the last digit and second last digit are #'s seperated by spaces assume decimal
comic_iss = splitit[(len(splitit)-1)]
comic_iss = splitit[(len(splitit) -1)]
splitst = len(splitit) - 1
if splitit[(len(splitit)-2)].isdigit():
if splitit[(len(splitit) -2)].isdigit():
# for series that have a digit at the end, it screws up the logistics.
i = 1
chg_comic = splitit[0]
while (i < (len(splitit)-1)):
while (i < (len(splitit) -1)):
chg_comic = chg_comic + " " + splitit[i]
i+=1
logger.fdebug("chg_comic:" + str(chg_comic))
@ -993,13 +993,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if chg_comic.upper() in findcomic_chksplit.upper():
logger.fdebug("series contains numerics...adjusting..")
else:
changeup = "." + splitit[(len(splitit)-1)]
changeup = "." + splitit[(len(splitit) -1)]
logger.fdebug("changeup to decimal: " + str(changeup))
comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss
comic_iss = splitit[(len(splitit) -2)] + "." + comic_iss
splitst = len(splitit) - 2
else:
#if the issue is alphanumeric (ie. 15AU, 12A) it'll error.
tmpiss = splitit[(len(splitit)-1)]
tmpiss = splitit[(len(splitit) -1)]
i = 0
alphas = None
a_issno = None
@ -1007,7 +1007,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if tmpiss[i].isalpha():
#take first occurance of alpha in string and carry it through
alphas = tmpiss[i:].rstrip()
a_issno = tmpiss[:i+1].rstrip()
a_issno = tmpiss[:i +1].rstrip()
break
i+=1
logger.fdebug("alphas: " + str(alphas))
@ -1022,13 +1022,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#print 'splitit: ' + splitit[(len(splitit)-2)]
#print 'splitit: ' + splitit[(len(splitit)-1)]
#if there' a space between the issue & alpha, join them.
findstart = thisentry.find(splitit[(len(splitit)-1)])
findstart = thisentry.find(splitit[(len(splitit) -1)])
#print 'thisentry : ' + thisentry
#print 'decimal location : ' + str(findstart)
if thisentry[findstart-1] == '.':
comic_iss = splitit[(len(splitit)-2)] + '.' + splitit[(len(splitit)-1)]
if thisentry[findstart -1] == '.':
comic_iss = splitit[(len(splitit) -2)] + '.' + splitit[(len(splitit) -1)]
else:
comic_iss = splitit[(len(splitit)-2)] + splitit[(len(splitit)-1)]
comic_iss = splitit[(len(splitit) -2)] + splitit[(len(splitit) -1)]
logger.fdebug('comic_iss is : ' + str(comic_iss))
splitst = len(splitit) - 2
else:
@ -1042,7 +1042,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# make sure that things like - in watchcomic are accounted for when comparing to nzb.
findcomic = re.sub('[\/]', ' ', findcomic)
watchcomic_split = helpers.cleanName(str(findcomic))
if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]','and', watchcomic_split)
if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]', 'and', watchcomic_split)
watchcomic_nonsplit = re.sub('[\-\:\,\.\?]', ' ', watchcomic_split)
watchcomic_nonsplit = re.sub('\'', '', watchcomic_nonsplit)
watchcomic_split = watchcomic_nonsplit.split(None)
@ -1075,7 +1075,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('volume detected - stripping and re-analzying for volume label.')
if '.' in tmpsplit:
tmpsplit = re.sub('.', '', tmpsplit).strip()
tmpsplit = re.sub('vol','', tmpsplit.lower()).strip()
tmpsplit = re.sub('vol', '', tmpsplit.lower()).strip()
#if vol label set as 'Vol 4' it will obliterate the Vol, but pass over the '4' - set
#volfound to True so that it can loop back around.
if not tmpsplit.isdigit():
@ -1239,7 +1239,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("search-length: " + str(splitst))
logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
if cvers == "true": splitst = splitst + 1
while ( n <= (splitst)-1 ):
while (n <= (splitst) -1):
logger.fdebug("splitit: " + str(splitit[n]))
logger.fdebug("scount : " + str(scount))
if n < (splitst) and n < len(watchcomic_split):
@ -1277,7 +1277,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
totalcnt = int(splitst)
logger.fdebug("splitit-len:" + str(totalcnt))
try:
spercent = (wordcnt/totalcnt) * 100
spercent = (wordcnt /totalcnt) * 100
except ZeroDivisionError:
spercent = 0
logger.fdebug("Match to series : " + str(spercent) + " %.")
@ -1351,11 +1351,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
foundcomic.append("yes")
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov)
else:
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + '[' + alt_nzbname + ']')
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
# #send out the notifications for the snatch.
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
@ -1543,7 +1543,7 @@ def provider_sequence(nzbprovider, torprovider, newznab_hosts):
# this is for nzb providers
for np in nzbprovider:
logger.fdebug('checking against nzb provider: ' + str(np))
if all( [ 'newznab' in np, pr_order[1].lower() in np.lower() ] ):
if all(['newznab' in np, pr_order[1].lower() in np.lower()]):
logger.fdebug('newznab match against: ' + str(np))
for newznab_host in newznab_hosts:
#logger.fdebug('comparing ' + str(pr_order[1]).lower() + ' against: ' + str(newznab_host[0]).lower())
@ -1573,7 +1573,7 @@ def provider_sequence(nzbprovider, torprovider, newznab_hosts):
logger.fdebug('provider order sequence is now to start with ' + pr_order[1] + ' at spot #' + str(pr_order[0]))
return prov_order,newznab_info
return prov_order, newznab_info
def nzbname_create(provider, title=None, info=None):
#the nzbname here is used when post-processing
@ -1605,7 +1605,7 @@ def nzbname_create(provider, title=None, info=None):
elif provider == '32P' or provider == 'KAT':
#filesafe the name cause people are idiots when they post sometimes.
nzbname = re.sub('\s{2,}',' ', helpers.filesafe(title)).strip()
nzbname = re.sub('\s{2,}', ' ', helpers.filesafe(title)).strip()
#let's change all space to decimals for simplicity
nzbname = re.sub(" ", ".", nzbname)
#gotta replace & or escape it
@ -1625,7 +1625,7 @@ def nzbname_create(provider, title=None, info=None):
nzbname = re.sub('.cbr', '', nzbname).strip()
nzbname = re.sub('.cbz', '', nzbname).strip()
nzbname = re.sub('[\.\_]', ' ', nzbname).strip()
nzbname = re.sub('\s+',' ', nzbname) #make sure we remove the extra spaces.
nzbname = re.sub('\s+', ' ', nzbname) #make sure we remove the extra spaces.
logger.fdebug('[SEARCHER] nzbname (\s): ' + nzbname)
nzbname = re.sub(' ', '.', nzbname)
logger.fdebug('[SEARCHER] end nzbname: ' + nzbname)
@ -1683,7 +1683,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if nzbprov == 'newznab':
name_newznab = newznab[0].rstrip()
host_newznab = newznab[1].rstrip()
if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/':
if host_newznab[len(host_newznab) -1:len(host_newznab)] != '/':
host_newznab_fix = str(host_newznab) + "/"
else:
host_newznab_fix = host_newznab
@ -1772,13 +1772,13 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
logger.fdebug('[FILENAME] filename (remove chars): ' + filen)
filen = re.sub('.cbr', '', filen).strip()
filen = re.sub('.cbz', '', filen).strip()
filen = re.sub('\s+',' ', filen) #make sure we remove the extra spaces.
filen = re.sub('\s+', ' ', filen) #make sure we remove the extra spaces.
logger.fdebug('[FILENAME] nzbname (\s): ' + filen)
filen = re.sub(' ', '.', filen)
logger.fdebug('[FILENAME] end nzbname: ' + filen)
if re.sub('.nzb','', filen.lower()).strip() != re.sub('.nzb','', nzbname.lower()).strip():
alt_nzbname = re.sub('.nzb','', filen).strip()
if re.sub('.nzb', '', filen.lower()).strip() != re.sub('.nzb', '', nzbname.lower()).strip():
alt_nzbname = re.sub('.nzb', '', filen).strip()
alt_nzbname = re.sub('[\s+]', ' ', alt_nzbname)
alt_nzbname = re.sub('[\s\_]', '.', alt_nzbname)
logger.info('filen: ' + alt_nzbname + ' -- nzbname: ' + nzbname + ' are not identical. Storing extra value as : ' + alt_nzbname)
@ -1834,7 +1834,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
pass
logger.fdebug('issues match!')
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov) )
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov))
logger.fdebug("link given by: " + str(nzbprov))
@ -1927,7 +1927,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
# changed to just work with direct links now...
tmpapi = mylar.SAB_HOST + "/api?apikey=" + mylar.SAB_APIKEY
logger.fdebug("send-to-SAB host &api initiation string : " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("send-to-SAB host &api initiation string : " + str(helpers.apiremove(tmpapi, '&')))
SABtype = "&mode=addlocalfile&name="
tmpapi = tmpapi + SABtype
@ -1935,24 +1935,24 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
logger.fdebug("...selecting API type: " + str(tmpapi))
tmpapi = tmpapi + urllib.quote_plus(nzbpath)
logger.fdebug("...attaching nzb provider link: " + str(helpers.apiremove(tmpapi,'$')))
logger.fdebug("...attaching nzb provider link: " + str(helpers.apiremove(tmpapi, '$')))
# determine SAB priority
if mylar.SAB_PRIORITY:
tmpapi = tmpapi + "&priority=" + sabpriority
logger.fdebug("...setting priority: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("...setting priority: " + str(helpers.apiremove(tmpapi, '&')))
# if category is blank, let's adjust
if mylar.SAB_CATEGORY:
tmpapi = tmpapi + "&cat=" + mylar.SAB_CATEGORY
logger.fdebug("...attaching category: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("...attaching category: " + str(helpers.apiremove(tmpapi, '&')))
if mylar.POST_PROCESSING: #or mylar.RENAME_FILES:
if mylar.POST_PROCESSING_SCRIPT:
#this is relative to the SABnzbd script directory (ie. no path)
tmpapi = tmpapi + "&script=" + mylar.POST_PROCESSING_SCRIPT
else:
tmpapi = tmpapi + "&script=ComicRN.py"
logger.fdebug("...attaching rename script: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("...attaching rename script: " + str(helpers.apiremove(tmpapi, '&')))
#final build of send-to-SAB
logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi,'&')))
logger.fdebug("Completed send-to-SAB link: " + str(helpers.apiremove(tmpapi, '&')))
logger.fdebug('sab-to-send:' + str(tmpapi))
@ -1996,11 +1996,11 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
#update the db on the snatch.
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov)
else:
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + ' [' + alt_nzbname + ']')
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
@ -2013,24 +2013,24 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov)
if mylar.PROWL_ENABLED and mylar.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification")
prowl = notifiers.PROWL()
prowl.notify(nzbname,"Download started using " + sent_to)
prowl.notify(nzbname, "Download started using " + sent_to)
if mylar.NMA_ENABLED and mylar.NMA_ONSNATCH:
logger.info(u"Sending NMA notification")
nma = notifiers.NMA()
nma.notify(snline=snline,snatched_nzb=nzbname,sent_to=sent_to,prov=nzbprov)
nma.notify(snline=snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov)
if mylar.PUSHOVER_ENABLED and mylar.PUSHOVER_ONSNATCH:
logger.info(u"Sending Pushover notification")
thisline = 'Mylar has snatched: ' + nzbname + ' from ' + nzbprov + ' and has sent it to ' + sent_to
pushover = notifiers.PUSHOVER()
pushover.notify(thisline,snline)
pushover.notify(thisline, snline)
if mylar.BOXCAR_ENABLED and mylar.BOXCAR_ONSNATCH:
logger.info(u"Sending Boxcar notification")
boxcar = notifiers.BOXCAR()
boxcar.notify(snatched_nzb=nzbname,sent_to=sent_to,snline=snline)
boxcar.notify(snatched_nzb=nzbname, sent_to=sent_to, snline=snline)
if mylar.PUSHBULLET_ENABLED and mylar.PUSHBULLET_ONSNATCH:
logger.info(u"Sending Pushbullet notification")
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(snline=snline,snatched=nzbname,sent_to=sent_to,prov=nzbprov,method='POST')
pushbullet.notify(snline=snline, snatched=nzbname, sent_to=sent_to, prov=nzbprov, method='POST')
return
@ -2084,11 +2084,11 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
logger.fdebug('possible decimal - referencing position from original title.')
chkme = orignzb.find(decit[0])
chkend = orignzb.find(decit[1], chkme + len(decit[0]))
chkspot = orignzb[chkme:chkend+1]
chkspot = orignzb[chkme:chkend +1]
print chkme, chkend
print chkspot
# we add +1 to decit totals in order to account for the '.' that's missing and we assume is there.
if len(chkspot) == ( len(decit[0]) + len(decit[1]) + 1 ):
if len(chkspot) == (len(decit[0]) + len(decit[1]) + 1):
logger.fdebug('lengths match for possible decimal issue.')
if '.' in chkspot:
logger.fdebug('decimal located within : ' + str(chkspot))
@ -2154,7 +2154,7 @@ def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstw
logger.fdebug('isstitle_match count : ' + str(isstitle_match))
if isstitle_match > 0:
iss_calc = ( ( isstitle_match + misword ) / watch_split_count ) * 100
iss_calc = ((isstitle_match + misword) / watch_split_count) * 100
logger.fdebug('iss_calc: ' + str(iss_calc) + ' % with ' + str(misword) + ' unaccounted for words')
else:
iss_calc = 0
@ -2182,7 +2182,7 @@ def generate_id(nzbprov, link):
path_parts = url_parts[2].rpartition('/')
nzbtempid = path_parts[0].rpartition('/')
nzblen = len(nzbtempid)
nzbid = nzbtempid[nzblen-1]
nzbid = nzbtempid[nzblen -1]
elif nzbprov == '32P':
#32P just has the torrent id stored.
nzbid = link
@ -2200,7 +2200,7 @@ def generate_id(nzbprov, link):
elif nzbprov == 'dognzb':
url_parts = urlparse.urlparse(link)
path_parts = url_parts[2].rpartition('/')
nzbid = path_parts[0].rsplit('/',1)[1]
nzbid = path_parts[0].rsplit('/', 1)[1]
elif nzbprov == 'newznab':
#if in format of http://newznab/getnzb/<id>.nzb&i=1&r=apikey
tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url.
@ -2209,7 +2209,7 @@ def generate_id(nzbprov, link):
else:
# for the geek in all of us...
st = tmpid.find('&id')
end = tmpid.find('&',st+1)
nzbid = re.sub('&id=','', tmpid[st:end]).strip()
end = tmpid.find('&', st +1)
nzbid = re.sub('&id=', '', tmpid[st:end]).strip()
return nzbid

View File

@ -29,7 +29,7 @@ def solicit(month, year):
mnloop = 0
upcoming = []
publishers = {'DC Comics':'DC Comics', 'DC\'s': 'DC Comics', 'Marvel':'Marvel Comics', 'Image':'Image Comics', 'IDW':'IDW Publishing', 'Dark Horse':'Dark Horse'}
publishers = {'DC Comics': 'DC Comics', 'DC\'s': 'DC Comics', 'Marvel': 'Marvel Comics', 'Image': 'Image Comics', 'IDW': 'IDW Publishing', 'Dark Horse': 'Dark Horse'}
# -- this is no longer needed (testing)
@ -82,7 +82,7 @@ def solicit(month, year):
#logger.info('datestring:' + datestring)
#logger.info('checking:' + pagelinks)
pageresponse = urllib2.urlopen ( pagelinks )
pageresponse = urllib2.urlopen (pagelinks)
soup = BeautifulSoup (pageresponse)
cntlinks = soup.findAll('h3')
lenlinks = len(cntlinks)
@ -103,7 +103,7 @@ def solicit(month, year):
headName = headt.findNext(text=True)
#print ('headName: ' + headName)
if 'Image' in headName: print 'IMAGE FOUND'
if not all( ['Marvel' in headName, 'DC' in headName, 'Image' in headName] ) and ('Solicitations' in headName or 'Solicits' in headName):
if not all(['Marvel' in headName, 'DC' in headName, 'Image' in headName]) and ('Solicitations' in headName or 'Solicits' in headName):
# test for month here (int(month) + 5)
if not any(d.get('month', None) == str(headName).lower() for d in monthlist):
for mt in monthlist:
@ -126,7 +126,7 @@ def solicit(month, year):
#publish.append( headName[:pubstart].strip() )
abc = headt.findAll('a', href=True)[0]
ID_som = abc['href'] #first instance will have the right link...
resultURL.append( ID_som )
resultURL.append(ID_som)
#print '(' + str(cnt) + ') [ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
cnt+=1
@ -138,17 +138,17 @@ def solicit(month, year):
if cnt == 0:
return #break # no results means, end it
loopthis = (cnt-1)
loopthis = (cnt -1)
#this loops through each 'found' solicit page
#shipdate = str(month_string) + '-' + str(year) - not needed.
while ( loopthis >= 0 ):
while (loopthis >= 0):
#print 'loopthis is : ' + str(loopthis)
#print 'resultURL is : ' + str(resultURL[loopthis])
shipdate = str(resultmonth[loopthis]) + '-' + str(resultyear[loopthis])
upcoming += populate(resultURL[loopthis], publish[loopthis], shipdate)
loopthis -=1
logger.info( str(len(upcoming)) + ' upcoming issues discovered.' )
logger.info(str(len(upcoming)) + ' upcoming issues discovered.')
newfl = mylar.CACHE_DIR + "/future-releases.txt"
newtxtfile = open(newfl, 'wb')
@ -165,7 +165,7 @@ def solicit(month, year):
newtxtfile.close()
logger.fdebug( 'attempting to populate future upcoming...' )
logger.fdebug('attempting to populate future upcoming...')
mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")
@ -204,11 +204,11 @@ def solicit(month, year):
mylar.weeklypull.pullitcheck(futurepull="yes")
#.end
def populate(link,publisher,shipdate):
def populate(link, publisher, shipdate):
#this is the secondary url call to populate
input = 'http://www.comicbookresources.com/' + link
#print 'checking ' + str(input)
response = urllib2.urlopen ( input )
response = urllib2.urlopen (input)
soup = BeautifulSoup (response)
abc = soup.findAll('p')
lenabc = len(abc)
@ -247,7 +247,7 @@ def populate(link,publisher,shipdate):
if prev_chk == True:
tempName = titlet.findNext(text=True)
if not any( [' TPB' in tempName, 'HC' in tempName, 'GN-TPB' in tempName, 'for $1' in tempName.lower(), 'subscription variant' in tempName.lower(), 'poster' in tempName.lower() ] ):
if not any([' TPB' in tempName, 'HC' in tempName, 'GN-TPB' in tempName, 'for $1' in tempName.lower(), 'subscription variant' in tempName.lower(), 'poster' in tempName.lower()]):
if '#' in tempName[:50]:
#tempName = tempName.replace(u'.',u"'")
tempName = tempName.encode('ascii', 'replace') #.decode('utf-8')
@ -255,8 +255,8 @@ def populate(link,publisher,shipdate):
tempName = tempName.replace('???', ' ')
stissue = tempName.find('#')
endissue = tempName.find(' ', stissue)
if tempName[stissue+1] == ' ': #if issue has space between # and number, adjust.
endissue = tempName.find(' ', stissue+2)
if tempName[stissue +1] == ' ': #if issue has space between # and number, adjust.
endissue = tempName.find(' ', stissue +2)
if endissue == -1: endissue = len(tempName)
issue = tempName[stissue:endissue].lstrip(' ')
if ':'in issue: issue = re.sub(':', '', issue).rstrip()
@ -269,15 +269,15 @@ def populate(link,publisher,shipdate):
#print ('multiple issues detected. Splitting.')
ststart = issue.find('-')
issue1 = issue[:ststart]
issue2 = '#' + str(issue[ststart+1:])
issue2 = '#' + str(issue[ststart +1:])
if '&' in exinfo:
#print ('multiple issues detected. Splitting.')
ststart = exinfo.find('&')
issue1 = issue # this detects fine
issue2 = '#' + str(exinfo[ststart+1:])
issue2 = '#' + str(exinfo[ststart +1:])
if '& ' in issue2: issue2 = re.sub("&\\b", "", issue2)
exinfo = exinfo.replace(exinfo[ststart+1:len(issue2)], '').strip()
exinfo = exinfo.replace(exinfo[ststart +1:len(issue2)], '').strip()
if exinfo == '&': exinfo = 'N/A'
comic = tempName[:stissue].strip()
@ -289,11 +289,11 @@ def populate(link,publisher,shipdate):
issuedate = shipdate
if 'on sale' in str(titlet).lower():
onsale_start = str(titlet).lower().find('on sale') + 8
onsale_end = str(titlet).lower().find('<br>',onsale_start)
onsale_end = str(titlet).lower().find('<br>', onsale_start)
thedate = str(titlet)[onsale_start:onsale_end]
m = None
basemonths = {'january':'1','jan':'1','february':'2','feb':'2','march':'3','mar':'3','april':'4','apr':'4','may':'5','june':'6','july':'7','august':'8','aug':'8','september':'9','sept':'9','october':'10','oct':'10','november':'11','nov':'11','december':'12','dec':'12'}
basemonths = {'january': '1', 'jan': '1', 'february': '2', 'feb': '2', 'march': '3', 'mar': '3', 'april': '4', 'apr': '4', 'may': '5', 'june': '6', 'july': '7', 'august': '8', 'aug': '8', 'september': '9', 'sept': '9', 'october': '10', 'oct': '10', 'november': '11', 'nov': '11', 'december': '12', 'dec': '12'}
for month in basemonths:
if month in thedate.lower():
m = basemonths[month]
@ -302,7 +302,7 @@ def populate(link,publisher,shipdate):
if m is not None:
theday = len(month) + 1 # account for space between month & day
thedaystart = thedate[theday:(theday+2)].strip() # day numeric won't exceed 2
thedaystart = thedate[theday:(theday +2)].strip() # day numeric won't exceed 2
if len(str(thedaystart)) == 1:
thedaystart = '0' + str(thedaystart)
if len(str(m)) == 1:
@ -318,7 +318,7 @@ def populate(link,publisher,shipdate):
upcome.append({
'Shipdate': issuedate,
'Publisher': publisher.upper(),
'Issue': re.sub('#', '',issue1).lstrip(),
'Issue': re.sub('#', '', issue1).lstrip(),
'Comic': comic.upper(),
'Extra': exinfo.upper()
})

View File

@ -41,9 +41,9 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
recentstatus = 'Unknown'
elif comlist['ForceContinuing'] == 1:
recentstatus = 'Continuing'
elif 'present' in comlist['ComicPublished'].lower() or ( helpers.today()[:4] in comlist['LatestDate']):
elif 'present' in comlist['ComicPublished'].lower() or (helpers.today()[:4] in comlist['LatestDate']):
latestdate = comlist['LatestDate']
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if comlist['NewPublish']:
@ -101,7 +101,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
if CV_EXcomicid['variloop'] == '99':
mismatch = "yes"
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID)
else: importer.addComictoDB(ComicID,mismatch)
else: importer.addComictoDB(ComicID, mismatch)
else:
if mylar.CV_ONETIMER == 1:
logger.fdebug("CV_OneTimer option enabled...")
@ -151,7 +151,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
logger.fdebug("Refreshing the series and pulling in new data using only CV.")
if whack == False:
mylar.importer.addComictoDB(ComicID,mismatch,calledfrom='dbupdate',annload=annload)
mylar.importer.addComictoDB(ComicID, mismatch, calledfrom='dbupdate', annload=annload)
#reload the annuals here.
issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID])
@ -198,7 +198,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
newVAL = {"Status": issue['Status']}
if newVAL['Status'] == None:
datechk = re.sub('-','', newissue['ReleaseDate']).strip() # converts date to 20140718 format
datechk = re.sub('-', '', newissue['ReleaseDate']).strip() # converts date to 20140718 format
if mylar.AUTOWANT_ALL:
newVAL = {"Status": "Wanted"}
elif int(datechk) >= int(nowtime) and mylar.AUTOWANT_UPCOMING:
@ -263,10 +263,10 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
forceRescan(ComicID)
else:
mylar.importer.addComictoDB(ComicID,mismatch,annload=annload)
mylar.importer.addComictoDB(ComicID, mismatch, annload=annload)
else:
mylar.importer.addComictoDB(ComicID,mismatch)
mylar.importer.addComictoDB(ComicID, mismatch)
cnt +=1
time.sleep(5) #pause for 5 secs so dont hammer CV and get 500 error
@ -336,7 +336,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
og_status = None
logger.fdebug(adjComicName + ' Issue: ' + str(IssueNumber) + ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.')
# we need to either decrease the total issue count, OR indicate that an issue is upcoming.
upco_results = myDB.select("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",[ComicID])
upco_results = myDB.select("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?", [ComicID])
upco_iss = upco_results[0][0]
#logger.info("upco_iss: " + str(upco_iss))
if int(upco_iss) > 0:
@ -353,7 +353,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
if hours > 5 or forcecheck == 'yes':
pullupd = "yes"
logger.fdebug('Now Refreshing comic ' + ComicName + ' to make sure it is up-to-date')
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd)
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID, pullupd)
else: mylar.importer.updateissuedata(ComicID, ComicName, calledfrom='weeklycheck')#mylar.importer.addComictoDB(ComicID,mismatch,pullupd)
else:
logger.fdebug('It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.')
@ -376,7 +376,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
#check for 'out-of-whack' series here.
whackness = dbUpdate([ComicID], calledfrom='weekly')
if whackness == True:
if any( [issuechk['Status'] == 'Downloaded', issuechk['Status'] == 'Archived', issuechk['Status'] == 'Snatched'] ):
if any([issuechk['Status'] == 'Downloaded', issuechk['Status'] == 'Archived', issuechk['Status'] == 'Snatched']):
logger.fdebug('Forcibly maintaining status of : ' + og_status + ' for #' + issuechk['Issue_Number'] + ' to ensure integrity.')
logger.fdebug('Comic series has an incorrect total count. Forcily refreshing series to ensure data is current.')
dbUpdate([ComicID])
@ -394,32 +394,32 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
control = {"IssueID": issuechk['IssueID']}
newValue['IssueID'] = issuechk['IssueID']
if og_status == "Snatched":
values = { "Status": "Snatched"}
values = {"Status": "Snatched"}
newValue['Status'] = "Snatched"
elif og_status == "Downloaded":
values = { "Status": "Downloaded"}
values = {"Status": "Downloaded"}
newValue['Status'] = "Downloaded"
#if the status is Downloaded and it's on the pullist - let's mark it so everyone can bask in the glory
elif og_status == "Wanted":
values = { "Status": "Wanted"}
values = {"Status": "Wanted"}
newValue['Status'] = "Wanted"
elif og_status == "Archived":
values = { "Status": "Archived"}
values = {"Status": "Archived"}
newValue['Status'] = "Archived"
elif og_status == 'Failed':
if mylar.FAILED_DOWNLOAD_HANDLING:
if mylar.FAILED_AUTO:
values = { "Status": "Wanted" }
values = {"Status": "Wanted"}
newValue['Status'] = "Wanted"
else:
values = { "Status": "Failed" }
values = {"Status": "Failed"}
newValue['Status'] = "Failed"
else:
values = { "Status": "Skipped" }
values = {"Status": "Skipped"}
newValue['Status'] = "Skipped"
else:
values = { "Status": "Skipped"}
values = {"Status": "Skipped"}
newValue['Status'] = "Skipped"
#was in wrong place :(
else:
@ -472,7 +472,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
else:
myDB.upsert("issues", values, control)
if any( [og_status == 'Downloaded', og_status == 'Archived', og_status == 'Snatched', og_status == 'Wanted', newValue['Status'] == 'Wanted'] ):
if any([og_status == 'Downloaded', og_status == 'Archived', og_status == 'Snatched', og_status == 'Wanted', newValue['Status'] == 'Wanted']):
logger.fdebug('updating Pull-list to reflect status change: ' + og_status + '[' + newValue['Status'] + ']')
if og_status != 'Skipped':
downstats = {"Status": og_status,
@ -485,7 +485,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
return downstats
def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None,altissuenumber=None):
def weekly_update(ComicName, IssueNumber, CStatus, CID, futurepull=None, altissuenumber=None):
if futurepull:
logger.fdebug('future_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber) + ' to a status of ' + str(CStatus))
else:
@ -499,11 +499,11 @@ def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None,altissuenumb
# added CStatus to update status flags on Pullist screen
myDB = db.DBConnection()
if futurepull is None:
issuecheck = myDB.selectone("SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName,IssueNumber]).fetchone()
issuecheck = myDB.selectone("SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone()
else:
issuecheck = myDB.selectone("SELECT * FROM future WHERE COMIC=? AND ISSUE=?", [ComicName,IssueNumber]).fetchone()
issuecheck = myDB.selectone("SELECT * FROM future WHERE COMIC=? AND ISSUE=?", [ComicName, IssueNumber]).fetchone()
if issuecheck is not None:
controlValue = { "COMIC": str(ComicName),
controlValue = {"COMIC": str(ComicName),
"ISSUE": str(IssueNumber)}
try:
@ -547,7 +547,7 @@ def no_searchresults(ComicID):
# when there's a mismatch between CV & GCD - let's change the status to
# something other than 'Loaded'
myDB = db.DBConnection()
controlValue = { "ComicID": ComicID}
controlValue = {"ComicID": ComicID}
newValue = {"Status": "Error",
"LatestDate": "Error",
"LatestIssue": "Error"}
@ -685,7 +685,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
#this will update the weeklypull list immediately after sntaching to reflect the new status.
#-is ugly, should be linked directly to other table (IssueID should be populated in weekly pull at this point hopefully).
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=?",[ComicID, IssueID]).fetchone()
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=?", [ComicID, IssueID]).fetchone()
if chkit is not None:
@ -739,7 +739,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
myDB.upsert("issues", newValue, controlValue)
#this will update the weeklypull list immediately after post-processing to reflect the new status.
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=? AND Status='Snatched'",[ComicID, IssueID]).fetchone()
chkit = myDB.selectone("SELECT * FROM weekly WHERE ComicID=? AND IssueID=? AND Status='Snatched'", [ComicID, IssueID]).fetchone()
if chkit is not None:
@ -751,7 +751,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
logger.info(module + ' Updating Status (' + downstatus + ') now complete for ' + ComicName + ' issue: ' + IssueNum)
return
def forceRescan(ComicID,archive=None,module=None):
def forceRescan(ComicID, archive=None, module=None):
if module is None:
module = ''
module += '[FILE-RESCAN]'
@ -771,7 +771,7 @@ def forceRescan(ComicID,archive=None,module=None):
if ascan['ReleaseComicName'] not in altnames:
altnames += ascan['ReleaseComicName'] + '!!' + ascan['ReleaseComicID'] + '##'
altnames = altnames[:-2]
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in ' + rescan['ComicLocation'] )
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in ' + rescan['ComicLocation'])
fca = []
if archive is None:
tmpval = filechecker.listFiles(dir=rescan['ComicLocation'], watchcomic=rescan['ComicName'], Publisher=rescan['ComicPublisher'], AlternateSearch=altnames)
@ -783,7 +783,7 @@ def forceRescan(ComicID,archive=None,module=None):
logger.fdebug(module + 'dir: ' + rescan['ComicLocation'])
logger.fdebug(module + 'os.path.basename: ' + os.path.basename(rescan['ComicLocation']))
pathdir = os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation']))
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in :' + pathdir )
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in :' + pathdir)
tmpv = filechecker.listFiles(dir=pathdir, watchcomic=rescan['ComicName'], Publisher=rescan['ComicPublisher'], AlternateSearch=altnames)
logger.fdebug(module + 'tmpv filecount: ' + str(tmpv['comiccount']))
comiccnt += int(tmpv['comiccount'])
@ -872,7 +872,7 @@ def forceRescan(ComicID,archive=None,module=None):
logger.fdebug(module + ' temploc: ' + str(temploc))
if 'annual' not in temploc.lower():
#remove the extension here
extensions = ('.cbr','.cbz','.cb7')
extensions = ('.cbr', '.cbz', '.cb7')
if temploc.lower().endswith(extensions):
logger.fdebug(module + ' Removed extension for issue: ' + str(temploc))
temploc = temploc[:-4]
@ -1221,7 +1221,7 @@ def forceRescan(ComicID,archive=None,module=None):
#here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
update_iss = []
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?']*(len(issID_to_ignore)-1)))
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(issID_to_ignore) -1)))
chkthis = myDB.select(tmpsql, issID_to_ignore)
# chkthis = None
if chkthis is None:

View File

@ -26,7 +26,7 @@ branch = "development"
def runGit(args):
if mylar.GIT_PATH:
git_locations = ['"'+mylar.GIT_PATH+'"']
git_locations = ['"' +mylar.GIT_PATH +'"']
else:
git_locations = ['git']
@ -38,7 +38,7 @@ def runGit(args):
for cur_git in git_locations:
cmd = cur_git+' '+args
cmd = cur_git +' ' +args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + mylar.PROG_DIR)
@ -172,7 +172,7 @@ def update():
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: '+line)
logger.error('Unable to update from git: ' +line)
logger.info('Output: ' + str(output))
else:
@ -182,10 +182,10 @@ def update():
version_path = os.path.join(mylar.PROG_DIR, 'version.txt')
try:
logger.info('Downloading update from: '+tar_download_url)
logger.info('Downloading update from: ' +tar_download_url)
data = urllib2.urlopen(tar_download_url)
except (IOError, urllib2.URLError):
logger.error("Unable to retrieve new version from "+tar_download_url+", can't update")
logger.error("Unable to retrieve new version from " +tar_download_url +", can't update")
return
#try sanitizing the name here...
@ -210,13 +210,13 @@ def update():
# Find update dir name
update_dir_contents = [x for x in os.listdir(update_dir) if os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error(u"Invalid update data, update failed: "+str(update_dir_contents))
logger.error(u"Invalid update data, update failed: " +str(update_dir_contents))
return
content_dir = os.path.join(update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
for dirname, dirnames, filenames in os.walk(content_dir):
dirname = dirname[len(content_dir)+1:]
dirname = dirname[len(content_dir) +1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(mylar.PROG_DIR, dirname, curfile)
@ -231,5 +231,5 @@ def update():
ver_file.write(mylar.LATEST_VERSION)
ver_file.close()
except IOError, e:
logger.error(u"Unable to write current version to version.txt, update not complete: "+ex(e))
logger.error(u"Unable to write current version to version.txt, update not complete: " +ex(e))
return

File diff suppressed because it is too large Load Diff

View File

@ -81,27 +81,27 @@ def initialize(options):
'/': {
'tools.staticdir.root': os.path.join(mylar.PROG_DIR, 'data')
},
'/interfaces':{
'/interfaces': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "interfaces"
},
'/images':{
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "images"
},
'/css':{
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "css"
},
'/js':{
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "js"
},
'/favicon.ico':{
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(os.path.abspath(os.curdir), 'images' + os.sep + 'favicon.ico')
},
'/cache':{
'/cache': {
'tools.staticdir.on': True,
'tools.staticdir.dir': mylar.CACHE_DIR,
'tools.auth_basic.on': False
@ -113,7 +113,7 @@ def initialize(options):
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Mylar',
'tools.auth_basic.checkpassword': cherrypy.lib.auth_basic.checkpassword_dict(
{options['http_username']:options['http_password']})
{options['http_username']: options['http_password']})
})
conf['/api'] = {'tools.auth_basic.on': False}

View File

@ -42,7 +42,7 @@ def pullit(forcecheck=None):
pulldate = '00000000'
else:
pulldate = pull_date['SHIPDATE']
except (sqlite3.OperationalError, TypeError),msg:
except (sqlite3.OperationalError, TypeError), msg:
logger.info(u"Error Retrieving weekly pull list - attempting to adjust")
myDB.action("DROP TABLE weekly")
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text)")
@ -56,7 +56,7 @@ def pullit(forcecheck=None):
PULLURL = 'http://www.previewsworld.com/shipping/newreleases.txt'
#Prepare the Substitute name switch for pulllist to comic vine conversion
substitutes = os.path.join(mylar.DATA_DIR,"substitutes.csv")
substitutes = os.path.join(mylar.DATA_DIR, "substitutes.csv")
if not os.path.exists(substitutes):
logger.debug('no substitues.csv file located - not performing substitutions on weekly pull list')
substitute_check = False
@ -70,7 +70,7 @@ def pullit(forcecheck=None):
reader = csv.reader(f, delimiter='|')
for row in reader:
if not row[0].startswith('#'):
logger.fdebug("Substitutes file read : "+str(row))
logger.fdebug("Substitutes file read : " +str(row))
shortrep.append(row[0])
longrep.append(row[1])
f.close()
@ -121,7 +121,7 @@ def pullit(forcecheck=None):
#denotes issues that contain special characters within that would normally fail when checked if issue ONLY contained numerics.
#add freely, just lowercase and exclude decimals (they get stripped during comparisons)
specialissues = {'au','ai','inh','now'}
specialissues = {'au', 'ai', 'inh', 'now'}
pub = "COMICS"
prevcomic = ""
@ -202,7 +202,7 @@ def pullit(forcecheck=None):
break
else:
#logger.info('chkchk not in i - i.findcomics: ' + str(i.find("COMICS")) + ' length: ' + str(len(i.strip())))
if all( [i.find("COMICS") < 1, len(i.strip()) == 6 ] ) or ("GRAPHIC NOVELS" in i):
if all([i.find("COMICS") < 1, len(i.strip()) == 6]) or ("GRAPHIC NOVELS" in i):
# if i.find("COMICS") < 1 and (len(i.strip()) == 6 or "& GRAPHIC NOVELS" in i):
pub = "COMICS"
#logger.info("i.find comics & len =6 : " + pub)
@ -213,7 +213,7 @@ def pullit(forcecheck=None):
break
else:
#logger.info('yesyes not found: ' + yesyes + ' i.findcomics: ' + str(i.find("COMICS")) + ' length: ' + str(len(i.strip())))
if all( [i.find("COMICS") < 1, len(i.strip()) == 6 ] ) or ("GRAPHIC NOVELS" in i):
if all([i.find("COMICS") < 1, len(i.strip()) == 6]) or ("GRAPHIC NOVELS" in i):
#logger.info("format string not comics & i.find < 1: " + pub)
pub = "COMICS"
break
@ -240,9 +240,9 @@ def pullit(forcecheck=None):
#this is to ensure we don't get any comps added by removing them entirely (ie. #1-4, etc)
x = None
try:
x = float( re.sub('#','', issname[n].strip()) )
x = float(re.sub('#', '', issname[n].strip()))
except ValueError, e:
if any(d in re.sub(r'[^a-zA-Z0-9]','',issname[n]).strip() for d in specialissues):
if any(d in re.sub(r'[^a-zA-Z0-9]', '', issname[n]).strip() for d in specialissues):
issue = issname[n]
else:
logger.fdebug('Comp issue set detected as : ' + str(issname[n]) + '. Ignoring.')
@ -250,7 +250,7 @@ def pullit(forcecheck=None):
else:
issue = issname[n]
if 'ongoing' not in issname[n-1].lower() and '(vu)' not in issname[n-1].lower():
if 'ongoing' not in issname[n -1].lower() and '(vu)' not in issname[n -1].lower():
#print ("issue found : " + issname[n])
comicend = n - 1
else:
@ -264,7 +264,7 @@ def pullit(forcecheck=None):
while (n < comicend + 1):
comicnm = comicnm + " " + issname[n]
n+=1
comcnm = re.sub('1 FOR \$1','', comicnm).strip()
comcnm = re.sub('1 FOR \$1', '', comicnm).strip()
#logger.info("Comicname: " + str(comicnm) )
#get remainder
try:
@ -295,7 +295,7 @@ def pullit(forcecheck=None):
# issue_decimal = re.compile(r'[^\d.]+')
# issue = issue_decimal.sub('', str(issue))
# else: issue = re.sub('#','', issue)
issue = re.sub('#','', issue)
issue = re.sub('#', '', issue)
#issue = re.sub("\D", "", str(issue))
#store the previous comic/issue for comparison to filter out duplicate issues/alt covers
#print ("Previous Comic & Issue: " + str(prevcomic) + "--" + str(previssue))
@ -311,7 +311,7 @@ def pullit(forcecheck=None):
while (n < issnamec):
#find the type of non-issue (TP,HC,GN,SC,OS,PI etc)
for cm in cmty:
if "ONE" in issue and "SHOT" in issname[n+1]: issue = "OS"
if "ONE" in issue and "SHOT" in issname[n +1]: issue = "OS"
if cm == (issname[n]):
if issname[n] == 'PI':
issue = 'NA'
@ -364,11 +364,11 @@ def pullit(forcecheck=None):
#-- remove html tags when alt_pull is enabled
if mylar.ALT_PULL:
if '&amp;' in comicnm:
comicnm = re.sub('&amp;','&',comicnm).strip()
comicnm = re.sub('&amp;', '&', comicnm).strip()
if '&amp;' in pub:
pub = re.sub('&amp;','&',pub).strip()
pub = re.sub('&amp;', '&', pub).strip()
if '&amp;' in comicrm:
comicrm = re.sub('&amp;','&',comicrm).strip()
comicrm = re.sub('&amp;', '&', comicrm).strip()
#--start duplicate comic / issue chk
# pullist has shortforms of a series' title sometimes and causes problems
@ -377,11 +377,11 @@ def pullit(forcecheck=None):
if substitute_check == True:
#Step through the list - storing an index
for repindex,repcheck in enumerate(shortrep):
for repindex, repcheck in enumerate(shortrep):
if len(comicnm) >= len(repcheck):
#if the leftmost chars match the short text then replace them with the long text
if comicnm[:len(repcheck)]==repcheck:
logger.fdebug("Switch worked on "+comicnm + " replacing " + str(repcheck) + " with " + str(longrep[repindex]))
logger.fdebug("Switch worked on " +comicnm + " replacing " + str(repcheck) + " with " + str(longrep[repindex]))
comicnm = re.sub(repcheck, longrep[repindex], comicnm)
for excl in excludes:
@ -425,11 +425,11 @@ def pullit(forcecheck=None):
logger.debug("Row: %s" % row)
controlValueDict = {'COMIC': row[3],
'ISSUE': row[2],
'EXTRA': row[4] }
'EXTRA': row[4]}
newValueDict = {'SHIPDATE': row[0],
'PUBLISHER': row[1],
'STATUS': row[5],
'COMICID': None }
'COMICID': None}
myDB.upsert("weekly", newValueDict, controlValueDict)
#cursor.execute("INSERT INTO weekly VALUES (?,?,?,?,?,?,null);", row)
except Exception, e:
@ -442,8 +442,8 @@ def pullit(forcecheck=None):
logger.info(u"Weekly Pull List successfully loaded.")
#let's delete the files
pullpath = str(mylar.CACHE_DIR) + "/"
os.remove( str(pullpath) + "Clean-newreleases.txt" )
os.remove( str(pullpath) + "newreleases.txt" )
os.remove(str(pullpath) + "Clean-newreleases.txt")
os.remove(str(pullpath) + "newreleases.txt")
pullitcheck(forcecheck=forcecheck)
def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurepull=None, issue=None):
@ -528,7 +528,7 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
latest_day = '01'
else:
latest_day = latestdate[8:]
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),int(latest_day))
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), int(latest_day))
n_date = datetime.date.today()
logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
recentchk = (n_date - c_date).days
@ -570,8 +570,8 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
comicid.append(alt_cid)
pubdate.append(week['ComicPublished'])
latestissue.append(week['LatestIssue'])
lines.append(a_list[w+wc].strip())
unlines.append(a_list[w+wc].strip())
lines.append(a_list[w +wc].strip())
unlines.append(a_list[w +wc].strip())
logger.fdebug('loading in Alternate name for ' + str(cleanedname))
n+=1
wc+=1
@ -579,8 +579,8 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
else:
logger.fdebug("Determined to not be a Continuing series at this time.")
cnt = int(w-1)
cntback = int(w-1)
cnt = int(w -1)
cntback = int(w -1)
kp = []
ki = []
kc = []
@ -860,7 +860,7 @@ def loaditup(comicname, comicid, issue, chktype):
logger.fdebug('Store date of 0000-00-00 returned for ' + str(typedisplay) + ' # ' + str(issue) + '. Refreshing series to see if valid date present')
mismatch = 'no'
#issuerecheck = mylar.importer.addComictoDB(comicid,mismatch,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
issuerecheck = mylar.importer.updateissuedata(comicid,comicname,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
issuerecheck = mylar.importer.updateissuedata(comicid, comicname, calledfrom='weekly', issuechk=issue_number, issuetype=chktype)
if issuerecheck is not None:
for il in issuerecheck:
#this is only one record..
@ -887,7 +887,7 @@ def loaditup(comicname, comicid, issue, chktype):
return dataissue
def checkthis(datecheck,datestatus,usedate):
def checkthis(datecheck, datestatus, usedate):
logger.fdebug('Now checking date comparison using an issue store date of ' + str(datecheck))
logger.fdebug('Using a compare date (usedate) of ' + str(usedate))
@ -921,14 +921,14 @@ def weekly_singlecopy(comicid, issuenum, file, path, module=None, issueid=None):
logger.fdebug(module + ' Weekly pull list detected as : ' + str(pulldate))
except (sqlite3.OperationalError, TypeError),msg:
except (sqlite3.OperationalError, TypeError), msg:
logger.info(module + ' Error determining current weekly pull-list date - you should refresh the pull-list manually probably.')
return
if issueid is None:
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND ISSUE=?',[comicid, issuenum]).fetchone()
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND ISSUE=?', [comicid, issuenum]).fetchone()
else:
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND IssueID=?',[comicid, issueid]).fetchone()
chkit = myDB.selectone('SELECT * FROM weekly WHERE ComicID=? AND IssueID=?', [comicid, issueid]).fetchone()
if chkit is None:
logger.fdebug(module + ' ' + file + ' is not on the weekly pull-list or it is a one-off download that is not supported as of yet.')
@ -956,7 +956,7 @@ def weekly_singlecopy(comicid, issuenum, file, path, module=None, issueid=None):
logger.error(module + ' Could not copy ' + str(srcfile) + ' to ' + str(desfile))
return
logger.info(module + ' Sucessfully copied to ' + desfile.encode('utf-8').strip() )
logger.info(module + ' Sucessfully copied to ' + desfile.encode('utf-8').strip())
if mylar.SEND2READ:
send2read(comicid, issueid, issuenum)
@ -966,8 +966,8 @@ def send2read(comicid, issueid, issuenum):
if mylar.SEND2READ:
logger.info(module + " Send to Reading List enabled for new pulls. Adding to your readlist in the status of 'Added'")
if issueid is None:
chkthis = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?',[comicid, helpers.issuedigits(issuenum)]).fetchone()
annchk = myDB.selectone('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?',[comicid, helpers.issuedigits(issuenum)]).fetchone()
chkthis = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, helpers.issuedigits(issuenum)]).fetchone()
annchk = myDB.selectone('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?', [comicid, helpers.issuedigits(issuenum)]).fetchone()
if chkthis is None and annchk is None:
logger.warn(module + ' Unable to locate issue within your series watchlist.')
return
@ -1045,8 +1045,8 @@ def future_check():
logger.info('More than one result returned - this may have to be a manual add')
matches = []
for sr in searchresults:
tmpsername = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]','', ser['ComicName']).strip()
tmpsrname = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]','', sr['name']).strip()
tmpsername = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]', '', ser['ComicName']).strip()
tmpsrname = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]', '', sr['name']).strip()
if tmpsername.lower() == tmpsrname.lower() and len(tmpsername) <= len(tmpsrname):
logger.info('name & lengths matched : ' + sr['name'])
if str(sr['comicyear']) == str(theissdate):