FIX:(#1796) When performing mass import or a large import via selection, would thread each import which would result in a db lock/error in some cases when there was a combination of files with and without metadata, FIX: Search results would include some common words when filtering, got rid of them, FIX: Total parse counter during an import would not reset to 0 on subsequent imports resulting in inflated numbers

This commit is contained in:
evilhero 2017-12-11 13:49:40 -05:00
parent 549378c4fb
commit 6f133be1ef
4 changed files with 75 additions and 55 deletions

View File

@ -61,7 +61,7 @@
<form action="markImports" method="get" id="markImports"> <form action="markImports" method="get" id="markImports">
<div id="markcomic"> <div id="markcomic">
<select name="action" onChange="doAjaxCall('markImports',$(this),'table',true);" data-error="You didn't select any comics"> <select name="action" onChange="doAjaxCall('markImports',$(this),'table',true);" data-success="Now running background Import" data-error="You didn't select any comics">
<option disabled="disabled" selected="selected">Choose...</option> <option disabled="disabled" selected="selected">Choose...</option>
<option value="importselected">Start Import</option> <option value="importselected">Start Import</option>
<option value="removeimport">Remove</option> <option value="removeimport">Remove</option>

View File

@ -611,6 +611,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
def scanLibrary(scan=None, queue=None): def scanLibrary(scan=None, queue=None):
mylar.IMPORT_FILES = 0 mylar.IMPORT_FILES = 0
mylar.IMPORT_PARSED_COUNT = 0
valreturn = [] valreturn = []
if scan: if scan:
try: try:

View File

@ -85,10 +85,17 @@ def findComic(name, mode, issue, limityear=None, type=None):
comiclist = [] comiclist = []
arcinfolist = [] arcinfolist = []
commons = [' and ', ' the '] commons = ['and', 'the', '&', '-']
for x in commons: for x in commons:
if x in name.lower(): if x in name.lower():
name = re.sub(x, ' ', name.lower()).strip() for m in re.finditer(x, name.lower()):
tehstart = m.start()
tehend = m.end()
if any([x == 'the', x == 'and']):
if not all([tehstart == 0, name[tehend] == ' ']) or not all([tehstart != 0, name[tehstart-1] == ' ', name[tehend] == ' ']):
continue
else:
name = name[tehstart:tehend].replace(x, ' ').strip() + name[tehend+1:]
pattern = re.compile(ur'\w+', re.UNICODE) pattern = re.compile(ur'\w+', re.UNICODE)
name = pattern.findall(name) name = pattern.findall(name)

View File

@ -374,11 +374,14 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid) raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
addComic.exposed = True addComic.exposed = True
def addbyid(self, comicid, calledby=None, imported=None, ogcname=None): def addbyid(self, comicid, calledby=None, imported=None, ogcname=None, nothread=False):
mismatch = "no" mismatch = "no"
logger.info('Attempting to add directly by ComicVineID: ' + str(comicid)) logger.info('Attempting to add directly by ComicVineID: ' + str(comicid))
if comicid.startswith('4050-'): comicid = re.sub('4050-', '', comicid) if comicid.startswith('4050-'): comicid = re.sub('4050-', '', comicid)
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch, None, imported, ogcname]).start() if nothread is False:
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch, None, imported, ogcname]).start()
else:
return importer.addComictoDB(comicid, mismatch, None, imported, ogcname)
if calledby == True or calledby == 'True': if calledby == True or calledby == 'True':
return return
elif calledby == 'web-import': elif calledby == 'web-import':
@ -3760,7 +3763,7 @@ class WebInterface(object):
'Volume': comicinfo['Volume'], 'Volume': comicinfo['Volume'],
'filelisting': files, 'filelisting': files,
'srid': SRID} 'srid': SRID}
self.addbyid(comicinfo['ComicID'], calledby=True, imported=imported, ogcname=comicinfo['ComicName']) self.addbyid(comicinfo['ComicID'], calledby=True, imported=imported, ogcname=comicinfo['ComicName'], nothread=True)
#status update. #status update.
ctrlVal = {"ComicID": comicinfo['ComicID']} ctrlVal = {"ComicID": comicinfo['ComicID']}
@ -4032,48 +4035,64 @@ class WebInterface(object):
myDB.upsert("importresults", newVal, ctrlVal) myDB.upsert("importresults", newVal, ctrlVal)
if len(search_matches) > 1: if resultset == 0:
# if we matched on more than one series above, just save those results instead of the entire search result set. if len(search_matches) > 1:
for sres in search_matches: # if we matched on more than one series above, just save those results instead of the entire search result set.
cVal = {"SRID": SRID, for sres in search_matches:
"comicid": sres['comicid']} cVal = {"SRID": SRID,
#should store ogcname in here somewhere to account for naming conversions above. "comicid": sres['comicid']}
nVal = {"Series": ComicName, #should store ogcname in here somewhere to account for naming conversions above.
"results": len(search_matches), nVal = {"Series": ComicName,
"publisher": sres['publisher'], "results": len(search_matches),
"haveit": sres['haveit'], "publisher": sres['publisher'],
"name": sres['name'], "haveit": sres['haveit'],
"deck": sres['deck'], "name": sres['name'],
"url": sres['url'], "deck": sres['deck'],
"description": sres['description'], "url": sres['url'],
"comicimage": sres['comicimage'], "description": sres['description'],
"issues": sres['issues'], "comicimage": sres['comicimage'],
"ogcname": ogcname, "issues": sres['issues'],
"comicyear": sres['comicyear']} "ogcname": ogcname,
myDB.upsert("searchresults", nVal, cVal) "comicyear": sres['comicyear']}
myDB.upsert("searchresults", nVal, cVal)
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
newVal = {'SRID': SRID,
'Status': 'Manual Intervention'}
myDB.upsert("importresults", newVal, ctrlVal)
elif len(sresults) > 1:
# store the search results for series that returned more than one result for user to select later / when they want.
# should probably assign some random numeric for an id to reference back at some point.
for sres in sresults:
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(sresults),
"publisher": sres['publisher'],
"haveit": sres['haveit'],
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
"description": sres['description'],
"comicimage": sres['comicimage'],
"issues": sres['issues'],
"ogcname": ogcname,
"comicyear": sres['comicyear']}
myDB.upsert("searchresults", nVal, cVal)
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
newVal = {'SRID': SRID,
'Status': 'Manual Intervention'}
myDB.upsert("importresults", newVal, ctrlVal)
else:
logger.info('[IMPORT] Could not find any matching results against CV. Check the logs and perhaps rename the attempted file(s)')
newVal = {'SRID': SRID,
'Status': 'No Results'}
myDB.upsert("importresults", newVal, ctrlVal)
else: else:
# store the search results for series that returned more than one result for user to select later / when they want.
# should probably assign some random numeric for an id to reference back at some point.
for sres in sresults:
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(sresults),
"publisher": sres['publisher'],
"haveit": sres['haveit'],
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
"description": sres['description'],
"comicimage": sres['comicimage'],
"issues": sres['issues'],
"ogcname": ogcname,
"comicyear": sres['comicyear']}
myDB.upsert("searchresults", nVal, cVal)
if resultset == 1:
logger.info('[IMPORT] Now adding %s...' % ComicName) logger.info('[IMPORT] Now adding %s...' % ComicName)
if volume is None or volume == 'None': if volume is None or volume == 'None':
@ -4095,17 +4114,10 @@ class WebInterface(object):
'filelisting': files, 'filelisting': files,
'srid': SRID} 'srid': SRID}
self.addbyid(sr['comicid'], calledby=True, imported=imported, ogcname=ogcname) #imported=yes) self.addbyid(sr['comicid'], calledby=True, imported=imported, ogcname=ogcname, nothread=True)
else:
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
newVal = {'SRID': SRID,
'Status': 'Manual Intervention'}
myDB.upsert("importresults", newVal, ctrlVal)
mylar.IMPORTLOCK = False mylar.IMPORTLOCK = False
logger.info('[IMPORT] Initial Import complete (I might still be populating the series data).') logger.info('[IMPORT] Import completed.')
preSearchit.exposed = True preSearchit.exposed = True