FIXED: versioncheck via git, post-processing, adding new series, searching & downloading (possibly)

This commit is contained in:
evilhero 2020-01-08 17:00:43 -05:00
parent 69acf99781
commit 41a6e98fe5
9 changed files with 81 additions and 73 deletions

View File

@ -438,7 +438,7 @@ class PostProcessor(object):
alt_db = myDB.select("SELECT * FROM Comics WHERE AlternateSearch != 'None'")
if alt_db is not None:
for aldb in alt_db:
as_d = filechecker.FileChecker(AlternateSearch=helpers.conversion(aldb['AlternateSearch']))
as_d = filechecker.FileChecker(AlternateSearch=aldb['AlternateSearch']) #helpers.conversion(aldb['AlternateSearch']))
as_dinfo = as_d.altcheck()
alt_list.append({'AS_Alt': as_dinfo['AS_Alt'],
'AS_Tuple': as_dinfo['AS_Tuple'],
@ -450,13 +450,13 @@ class PostProcessor(object):
for fl in filelist['comiclist']:
self.matched = False
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(helpers.conversion(fl['series_name']))
as_dinfo = as_d.dynamic_replace(fl['series_name']) #helpers.conversion(fl['series_name']))
orig_seriesname = as_dinfo['mod_seriesname']
mod_seriesname = as_dinfo['mod_seriesname']
loopchk = []
if fl['alt_series'] is not None:
logger.fdebug('%s Alternate series naming detected: %s' % (module, fl['alt_series']))
as_sinfo = as_d.dynamic_replace(helpers.conversion(fl['alt_series']))
as_sinfo = as_d.dynamic_replace(fl['alt_series']) #helpers.conversion(fl['alt_series']))
mod_altseriesname = as_sinfo['mod_seriesname']
if all([mylar.CONFIG.ANNUALS_ON, 'annual' in mod_altseriesname.lower()]) or all([mylar.CONFIG.ANNUALS_ON, 'special' in mod_altseriesname.lower()]):
mod_altseriesname = re.sub('annual', '', mod_altseriesname, flags=re.I).strip()
@ -497,10 +497,10 @@ class PostProcessor(object):
annchk = 'no'
if fl['sub']:
logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, fl['sub'], fl['comiclocation']))
clocation = os.path.join(fl['comiclocation'], fl['sub'], helpers.conversion(fl['comicfilename']))
clocation = os.path.join(fl['comiclocation'], fl['sub'], fl['comicfilename']) #helpers.conversion(fl['comicfilename']))
else:
logger.fdebug('%s[CLOCATION] %s' % (module, fl['comiclocation']))
clocation = os.path.join(fl['comiclocation'],helpers.conversion(fl['comicfilename']))
clocation = os.path.join(fl['comiclocation'],fl['comicfilename']) #helpers.conversion(fl['comicfilename']))
annualtype = None
if annchk == 'yes':
if 'Annual' in csi['ReleaseComicName']:
@ -558,7 +558,7 @@ class PostProcessor(object):
wv_seriesyear = wv['ComicYear']
wv_comicversion = wv['ComicVersion']
wv_publisher = wv['ComicPublisher']
wv_total = wv['Total']
wv_total = int(wv['Total'])
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('Queuing to Check: %s [%s] -- %s' % (wv['ComicName'], wv['ComicYear'], wv['ComicID']))
@ -833,7 +833,7 @@ class PostProcessor(object):
if datematch == 'True':
if watchmatch['sub']:
logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, watchmatch['sub'], watchmatch['comiclocation']))
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], watchmatch['comicfilename']) #helpers.conversion(watchmatch['comicfilename']))
if not os.path.exists(clocation):
scrubs = re.sub(watchmatch['comiclocation'], '', watchmatch['sub']).strip()
if scrubs[:2] == '//' or scrubs[:2] == '\\':
@ -846,7 +846,7 @@ class PostProcessor(object):
if self.issueid is not None and os.path.isfile(watchmatch['comiclocation']):
clocation = watchmatch['comiclocation']
else:
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
clocation = os.path.join(watchmatch['comiclocation'],watchmatch['comicfilename']) #helpers.conversion(watchmatch['comicfilename']))
annualtype = None
if annchk == 'yes':
if 'Annual' in isc['ReleaseComicName']:
@ -879,9 +879,9 @@ class PostProcessor(object):
if datematch == 'True':
xmld = filechecker.FileChecker()
xmld1 = xmld.dynamic_replace(helpers.conversion(cs['ComicName']))
xmld1 = xmld.dynamic_replace(cs['ComicName']) #helpers.conversion(cs['ComicName']))
xseries = xmld1['mod_seriesname'].lower()
xmld2 = xmld.dynamic_replace(helpers.conversion(watchmatch['series_name']))
xmld2 = xmld.dynamic_replace(watchmatch['series_name']) #helpers.conversion(watchmatch['series_name']))
xfile = xmld2['mod_seriesname'].lower()
if re.sub('\|', '', xseries) == re.sub('\|', '', xfile):
@ -893,7 +893,7 @@ class PostProcessor(object):
continue #break
if datematch == 'True':
logger.fdebug('%s[SUCCESSFUL MATCH: %s-%s] Match verified for %s' % (module, cs['ComicName'], cs['ComicID'], helpers.conversion(fl['comicfilename'])))
logger.fdebug('%s[SUCCESSFUL MATCH: %s-%s] Match verified for %s' % (module, cs['ComicName'], cs['ComicID'], fl['comicfilename'])) #helpers.conversion(fl['comicfilename'])))
break
elif self.matched is True:
logger.warn('%s[MATCH: %s - %s] We matched by name for this series, but cannot find a corresponding issue number in the series list.' % (module, cs['ComicName'], cs['ComicID']))
@ -949,7 +949,7 @@ class PostProcessor(object):
"ComicVersion": av['Volume'],
"ComicID": av['ComicID'],
"Publisher": av['IssuePublisher'],
"Total": av['TotalIssues'], # this will return the total issues in the arc (not needed for this)
"Total": int(av['TotalIssues']), # this will return the total issues in the arc (not needed for this)
"Type": av['Type'],
"IsArc": True}
})
@ -1189,7 +1189,7 @@ class PostProcessor(object):
passit = True
if passit == False:
tmpfilename = helpers.conversion(arcmatch['comicfilename'])
tmpfilename = arcmatch['comicfilename'] #helpers.conversion(arcmatch['comicfilename'])
if arcmatch['sub']:
clocation = os.path.join(arcmatch['comiclocation'], arcmatch['sub'], tmpfilename)
else:
@ -1242,7 +1242,7 @@ class PostProcessor(object):
"LatestDate": None,
"ComicVersion": None,
"Publisher": ofl['PUBLISHER'],
"Total": None,
"Total": 0,
"Type": ofl['format'],
"ComicID": ofl['ComicID'],
"IsArc": False}})
@ -1297,7 +1297,7 @@ class PostProcessor(object):
if temploc is not None and fcdigit == helpers.issuedigits(ofv['Issue_Number']) or all([temploc is None, helpers.issuedigits(ofv['Issue_Number']) == '1']):
if watchmatch['sub']:
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], watchmatch['comicfilename']) #helpers.conversion(watchmatch['comicfilename']))
if not os.path.exists(clocation):
scrubs = re.sub(watchmatch['comiclocation'], '', watchmatch['sub']).strip()
if scrubs[:2] == '//' or scrubs[:2] == '\\':
@ -1310,7 +1310,7 @@ class PostProcessor(object):
if self.issueid is not None and os.path.isfile(watchmatch['comiclocation']):
clocation = watchmatch['comiclocation']
else:
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
clocation = os.path.join(watchmatch['comiclocation'],watchmatch['comicfilename']) #helpers.conversion(watchmatch['comicfilename']))
oneoff_issuelist.append({"ComicLocation": clocation,
"ComicID": ofv['ComicID'],
"IssueID": ofv['IssueID'],
@ -1322,7 +1322,7 @@ class PostProcessor(object):
logger.fdebug('%s No corresponding issue # in dB found for %s # %s' % (module, ofv['ComicName'], ofv['Issue_Number']))
continue
logger.fdebug('%s[SUCCESSFUL MATCH: %s-%s] Match Verified for %s' % (module, ofv['ComicName'], ofv['ComicID'], helpers.conversion(fl['comicfilename'])))
logger.fdebug('%s[SUCCESSFUL MATCH: %s-%s] Match Verified for %s' % (module, ofv['ComicName'], ofv['ComicID'], fl['comicfilename'])) #helpers.conversion(fl['comicfilename'])))
self.matched = True
break

View File

@ -337,7 +337,7 @@ class GC(object):
cf_cookievalue, cf_user_agent = s.get_tokens(mainlink, headers=self.headers, timeout=30)
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True, timeout=30)
filename = os.path.basename(urllib.parse.unquote(t.url).decode('utf-8'))
filename = os.path.basename(urllib.parse.unquote(t.url)) #.decode('utf-8'))
if 'GetComics.INFO' in filename:
filename = re.sub('GetComics.INFO', '', filename, re.I).strip()
@ -348,7 +348,7 @@ class GC(object):
if 'go.php-urls' not in link:
link = re.sub('go.php-url=', 'go.php-urls', link)
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True, timeout=30)
filename = os.path.basename(urllib.parse.unquote(t.url).decode('utf-8'))
filename = os.path.basename(urllib.parse.unquote(t.url)) #.decode('utf-8'))
if 'GetComics.INFO' in filename:
filename = re.sub('GetComics.INFO', '', filename, re.I).strip()
try:
@ -385,9 +385,9 @@ class GC(object):
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
buf = StringIO(t.content)
f = gzip.GzipFile(fileobj=buf)
#if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
# buf = StringIO(t.content)
# f = gzip.GzipFile(fileobj=buf)
if resume is not None:
with open(path, 'ab') as f:

View File

@ -692,7 +692,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
return rename_this
def apiremove(apistring, type):
def apiremove(apistring, apitype):
if type == 'nzb':
value_regex = re.compile("(?<=apikey=)(?P<value>.*?)(?=$)")
#match = value_regex.search(apistring)
@ -700,10 +700,10 @@ def apiremove(apistring, type):
else:
#type = $ to denote end of string
#type = & to denote up until next api variable
value_regex1 = re.compile("(?<=%26i=1%26r=)(?P<value>.*?)(?=" + str(type) +")")
value_regex1 = re.compile("(?<=%26i=1%26r=)(?P<value>.*?)(?=" + str(apitype) +")")
#match = value_regex.search(apistring)
apiremoved1 = value_regex1.sub("xUDONTNEEDTOKNOWTHISx", apistring)
value_regex = re.compile("(?<=apikey=)(?P<value>.*?)(?=" + str(type) +")")
value_regex = re.compile("(?<=apikey=)(?P<value>.*?)(?=" + str(apitype) +")")
apiremoved = value_regex.sub("xUDONTNEEDTOKNOWTHISx", apiremoved1)
#need to remove the urlencoded-portions as well in future
@ -1474,8 +1474,14 @@ def filesafe(comic):
except TypeError:
u_comic = comic.encode('ASCII', 'ignore').strip()
comicname_filesafe = re.sub('[\:\'\"\,\?\!\\\]', '', u_comic)
comicname_filesafe = re.sub('[\/\*]', '-', comicname_filesafe)
logger.info('comic-type: %s' % type(u_comic))
if type(u_comic) != bytes:
comicname_filesafe = re.sub('[\:\'\"\,\?\!\\\]', '', u_comic)
comicname_filesafe = re.sub('[\/\*]', '-', comicname_filesafe)
else:
comicname_filesafe = re.sub('[\:\'\"\,\?\!\\\]', '', u_comic.decode('utf-8'))
comicname_filesafe = re.sub('[\/\*]', '-', comicname_filesafe)
return comicname_filesafe
@ -2838,7 +2844,7 @@ def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
else:
shell_cmd = sys.executable
curScriptName = shell_cmd + ' ' + str(mylar.CONFIG.AUTO_SNATCH_SCRIPT).decode("string_escape")
curScriptName = shell_cmd + ' ' + str(mylar.CONFIG.AUTO_SNATCH_SCRIPT) #.decode("string_escape")
if torrent_files > 1:
downlocation = torrent_folder.encode('utf-8')
else:
@ -3343,7 +3349,7 @@ def script_env(mode, vars):
else:
shell_cmd = sys.executable
curScriptName = shell_cmd + ' ' + runscript.decode("string_escape")
curScriptName = shell_cmd + ' ' + runscript #.decode("string_escape")
logger.fdebug("snatch script detected...enabling: " + str(curScriptName))
script_cmd = shlex.split(curScriptName)
@ -3719,9 +3725,9 @@ def getImage(comicid, url, issueid=None):
logger.warn('Unable to download image from CV URL link: %s [Status Code returned: %s]' % (url, statuscode))
coversize = 0
else:
if r.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
#if r.headers.get('Content-Encoding') == 'gzip':
# buf = StringIO(r.content)
# f = gzip.GzipFile(fileobj=buf)
with open(coverfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Mylar.

View File

@ -458,7 +458,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
logger.fdebug('[' + mod_series + '] Adding to the import-queue!')
isd = filechecker.FileChecker()
is_dyninfo = isd.dynamic_replace(helpers.conversion(mod_series))
is_dyninfo = isd.dynamic_replace(mod_series) #helpers.conversion(mod_series))
logger.fdebug('Dynamic-ComicName: ' + is_dyninfo['mod_seriesname'])
#impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
@ -500,7 +500,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
"issuenumber": issuenumber, #issuenumber,
"volume": issuevolume,
"comfilename": comfilename,
"comlocation": helpers.conversion(comlocation)
"comlocation": comlocation #helpers.conversion(comlocation)
})
cnt+=1
#logger.fdebug('import_by_ids: ' + str(import_by_comicids))
@ -661,16 +661,16 @@ def scanLibrary(scan=None, queue=None):
#these all have related ComicID/IssueID's...just add them as is.
controlValue = {"impID": ghi['impid']}
newValue = {"Status": "Not Imported",
"ComicName": helpers.conversion(i['ComicName']),
"DisplayName": helpers.conversion(i['ComicName']),
"DynamicName": helpers.conversion(nspace_dynamicname),
"ComicName": i['ComicName'], #helpers.conversion(i['ComicName']),
"DisplayName": i['ComicName'], #helpers.conversion(i['ComicName']),
"DynamicName": nspace_dynamicname, #helpers.conversion(nspace_dynamicname),
"ComicID": i['ComicID'],
"IssueID": i['IssueID'],
"IssueNumber": helpers.conversion(i['Issue_Number']),
"IssueNumber": i['Issue_Number'], #helpers.conversion(i['Issue_Number']),
"Volume": ghi['volume'],
"ComicYear": ghi['comicyear'],
"ComicFilename": helpers.conversion(ghi['comfilename']),
"ComicLocation": helpers.conversion(ghi['comlocation']),
"ComicFilename": ghi['comfilename'], #helpers.conversion(ghi['comfilename']),
"ComicLocation": ghi['comlocation'], #helpers.conversion(ghi['comlocation']),
"ImportDate": helpers.today(),
"WatchMatch": None} #i['watchmatch']}
myDB.upsert("importresults", newValue, controlValue)
@ -683,15 +683,15 @@ def scanLibrary(scan=None, queue=None):
controlValue = {"impID": ss['impid']}
newValue = {"ComicYear": ss['comicyear'],
"Status": "Not Imported",
"ComicName": helpers.conversion(ss['comicname']),
"DisplayName": helpers.conversion(ss['displayname']),
"DynamicName": helpers.conversion(nspace_dynamicname),
"ComicName": ss['comicname'], #helpers.conversion(ss['comicname']),
"DisplayName": ss['displayname'], #helpers.conversion(ss['displayname']),
"DynamicName": nspace_dynamicname, #helpers.conversion(nspace_dynamicname),
"ComicID": ss['comicid'], #if it's been scanned in for cvinfo, this will be the CID - otherwise it's None
"IssueID": None,
"Volume": ss['volume'],
"IssueNumber": helpers.conversion(ss['issuenumber']),
"ComicFilename": helpers.conversion(ss['comfilename']),
"ComicLocation": helpers.conversion(ss['comlocation']),
"IssueNumber": ss['issuenumber'], #helpers.conversion(ss['issuenumber']),
"ComicFilename": ss['comfilename'], #helpers.conversion(ss['comfilename']),
"ComicLocation": ss['comlocation'], #helpers.conversion(ss['comlocation']),
"ImportDate": helpers.today(),
"WatchMatch": ss['watchmatch']}
myDB.upsert("importresults", newValue, controlValue)

View File

@ -36,18 +36,18 @@ class Process(object):
elif self.failed == '1':
self.failed = True
queue = queue.Queue()
ppqueue = queue.Queue()
retry_outside = False
if self.failed is False:
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall, ddl=self.ddl)
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=ppqueue, comicid=self.comicid, apicall=self.apicall, ddl=self.ddl)
if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.apicall is True, self.issueid is not None]):
threading.Thread(target=PostProcess.Process).start()
else:
thread_ = threading.Thread(target=PostProcess.Process, name="Post-Processing")
thread_.start()
thread_.join()
chk = queue.get()
chk = ppqueue.get()
while True:
if chk[0]['mode'] == 'fail':
logger.info('Initiating Failed Download handling')
@ -70,11 +70,11 @@ class Process(object):
if mylar.CONFIG.FAILED_DOWNLOAD_HANDLING is True:
#drop the if-else continuation so we can drop down to this from the above if statement.
logger.info('Initiating Failed Download handling for this download.')
FailProcess = mylar.Failed.FailedProcessor(nzb_name=self.nzb_name, nzb_folder=self.nzb_folder, queue=queue)
FailProcess = mylar.Failed.FailedProcessor(nzb_name=self.nzb_name, nzb_folder=self.nzb_folder, queue=ppqueue)
thread_ = threading.Thread(target=FailProcess.Process, name="FAILED Post-Processing")
thread_.start()
thread_.join()
failchk = queue.get()
failchk = ppqueue.get()
if failchk[0]['mode'] == 'retry':
logger.info('Attempting to return to search module with ' + str(failchk[0]['issueid']))
if failchk[0]['annchk'] == 'no':
@ -91,11 +91,11 @@ class Process(object):
logger.warn('Failed Download Handling is not enabled. Leaving Failed Download as-is.')
if retry_outside:
PostProcess = mylar.PostProcessor.PostProcessor('Manual Run', self.nzb_folder, queue=queue)
PostProcess = mylar.PostProcessor.PostProcessor('Manual Run', self.nzb_folder, queue=ppqueue)
thread_ = threading.Thread(target=PostProcess.Process, name="Post-Processing")
thread_.start()
thread_.join()
chk = queue.get()
chk = ppqueue.get()
while True:
if chk[0]['mode'] == 'fail':
logger.info('Initiating Failed Download handling')

View File

@ -525,7 +525,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#print ("-------SEARCH FOR MISSING------------------")
#ComicName is unicode - let's unicode and ascii it cause we'll be comparing filenames against it.
u_ComicName = ComicName.encode('ascii', 'replace').strip()
u_ComicName = ComicName #.encode('ascii', 'replace').strip()
findcomic = u_ComicName
cm1 = re.sub("[\/\-]", " ", findcomic)
@ -2524,7 +2524,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
#generate the api key to download here and then kill it immediately after.
if mylar.DOWNLOAD_APIKEY is None:
import hashlib, random
mylar.DOWNLOAD_APIKEY = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[0:32]
mylar.DOWNLOAD_APIKEY = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()[0:32]
#generate the mylar host address if applicable.
if mylar.CONFIG.ENABLE_HTTPS:

View File

@ -40,28 +40,27 @@ def runGit(args):
cmd = '%s %s' % (cur_git, args)
try:
#logger.debug('Trying to execute: %s with shell in %s' % (cmd, mylar.PROG_DIR))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=mylar.PROG_DIR)
output, err = p.communicate
logger.debug('Trying to execute: %s with shell in %s' % (cmd, mylar.PROG_DIR))
output = subprocess.run(cmd, text=True, capture_output=True, shell=True, cwd=mylar.PROG_DIR)
logger.debug('Git output: %s' % output)
except Exception as e:
logger.error('Command %s didn\'t work [%s]' % (cmd, e))
continue
else:
if all([err is not None, err != '']):
logger.error('Encountered error: %s' % err)
if all([output.stderr is not None, output.stderr != '']):
logger.error('Encountered error: %s' % output.stderr)
if "not found" in output or "not recognized as an internal or external command" in output:
logger.error('[%s] Unable to find git with command: %s' % (output, cmd))
if "not found" in output.stdout or "not recognized as an internal or external command" in output.stdout:
logger.error('[%s] Unable to find git with command: %s' % (output.stdout, cmd))
output = None
elif 'fatal:' in output or err:
logger.error('Error: %s' % err)
logger.error('Git returned bad info. Are you sure this is a git installation? [%s]' % output)
elif 'fatal:' in output.stdout or output.stderr:
logger.error('Error: %s' % output.stderr)
logger.error('Git returned bad info. Are you sure this is a git installation? [%s]' % output.stdout)
output = None
elif output:
break
return (output, err)
return (output.stdout, output.stderr)
def getVersion():
@ -75,7 +74,7 @@ def getVersion():
elif os.path.isdir(os.path.join(mylar.PROG_DIR, '.git')):
mylar.INSTALL_TYPE = 'git'
output, err = runGit('rev-parse HEAD')
output, err = runGit('rev-parse HEAD --abbrev-ref HEAD')
if not output:
logger.error('Couldn\'t find latest installed version.')
@ -87,13 +86,17 @@ def getVersion():
#bh.append(branch_history.split('\n'))
#print ("bh1: " + bh[0])
cur_commit_hash = str(output).strip()
opp = output.find('\n')
cur_commit_hash = output[:opp]
cur_branch = output[opp:output.find('\n', opp+1)].strip()
logger.info('cur_commit_hash: %s' % cur_commit_hash)
logger.info('cur_branch: %s' % cur_branch)
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.error('Output does not look like a hash, not using it')
cur_commit_hash = None
if mylar.CONFIG.GIT_BRANCH:
if mylar.CONFIG.GIT_BRANCH == cur_branch:
branch = mylar.CONFIG.GIT_BRANCH
else:

View File

@ -4317,8 +4317,8 @@ class WebInterface(object):
importResults.exposed = True
def ImportFilelisting(self, comicname, dynamicname, volume):
comicname = urllib.parse.unquote_plus(helpers.conversion(comicname))
dynamicname = helpers.conversion(urllib.parse.unquote_plus(dynamicname)) #urllib.unquote(dynamicname).decode('utf-8')
comicname = urllib.parse.unquote_plus(comicname)
dynamicname = urllib.parse.unquote_plus(dynamicname) #urllib.unquote(dynamicname).decode('utf-8')
myDB = db.DBConnection()
if volume is None or volume == 'None':
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[dynamicname])
@ -4702,7 +4702,7 @@ class WebInterface(object):
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
"description": helpers.conversion(sres['description']),
"description": sres['description'],
"comicimage": sres['comicimage'],
"issues": sres['issues'],
"ogcname": ogcname,