1
0
Fork 0
mirror of https://github.com/evilhero/mylar synced 2025-03-09 05:13:35 +00:00

FIX:(#1239) Error during import when issues scanned didn't need to have filenames parsed, but contained metadata, FIX: (#1237) If issue was '9-5', would fail to update series data/refresh, FIX: failure to use utf-8 encoded filenames when determining file paths in several modules, FIX: If date within filename was a fulldate or contained a dash, would incorrectly assume it was part of an issue title, FIX: If numbers occured after a dash within a filename, issue number would be incorrectly adjusted, IMP: Added another date regex (YYYY-mm) to the list that Mylar can recognize within filenames, FIX: When manual post-processing, would fail to match up to watchlist series if series/filenames contained extra characters (dynamic names would fail), FIX: Fixed some incorrect utf-8/str references when post-processing, FIX: Turned off alert notice when viewing issue details, FIX: When adding a series (or refreshing existing ones), would not regenerate the dynamic name of the series which would cause problems when manually post-processing issues for the given series

This commit is contained in:
evilhero 2016-04-09 14:36:08 -04:00
parent 19bf048fbb
commit 77500d16b9
7 changed files with 114 additions and 49 deletions

View file

@ -702,7 +702,7 @@
<script>
function runMetaIssue(filelink, comicname, issue, date, title) {
alert(filelink);
//alert(filelink);
$.ajax({
type: "GET",
url: "IssueInfo",

View file

@ -260,7 +260,7 @@ class PostProcessor(object):
as_dinfo = as_d.dynamic_replace(fl['series_name'])
mod_seriesname = as_dinfo['mod_seriesname']
logger.fdebug('Dynamic-ComicName: ' + mod_seriesname)
comicseries = myDB.select('SELECT * FROM comics Where DynamicComicName=?', [mod_seriesname])
comicseries = myDB.select('SELECT * FROM comics Where DynamicComicName=? COLLATE NOCASE', [mod_seriesname])
if comicseries is None:
logger.error(module + ' No Series in Watchlist - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?')
break
@ -400,7 +400,7 @@ class PostProcessor(object):
logger.info(module + ' Found matching issue # ' + str(fcdigit) + ' for ComicID: ' + str(cs['ComicID']) + ' / IssueID: ' + str(issuechk['IssueID']))
if datematch == "True":
manual_list.append({"ComicLocation": os.path.join(watchmatch['comiclocation'],watchmatch['comicfilename']),
manual_list.append({"ComicLocation": os.path.join(watchmatch['comiclocation'],watchmatch['comicfilename'].decode('utf-8')),
"ComicID": cs['ComicID'],
"IssueID": issuechk['IssueID'],
"IssueNumber": issuechk['Issue_Number'],
@ -409,7 +409,7 @@ class PostProcessor(object):
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
continue
#ccnt+=1
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + fl['comicfilename'])
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + fl['comicfilename'].decode('utf-8'))
break
logger.fdebug(module + ' There are ' + str(len(manual_list)) + ' files found that match on your watchlist, ' + str(int(filelist['comiccount'] - len(manual_list))) + ' do not match anything and will be ignored.')
@ -1505,7 +1505,7 @@ class PostProcessor(object):
nfilename = re.sub('[\,\:\?]', '', nfilename)
nfilename = re.sub('[\/]', '-', nfilename)
self._log("New Filename: " + nfilename)
logger.fdebug(module + ' New Filename: ' + str(nfilename))
logger.fdebug(module + ' New Filename: ' + nfilename)
#src = os.path.join(self.nzb_folder, ofilename)
src = os.path.join(odir, ofilename)
@ -1535,7 +1535,7 @@ class PostProcessor(object):
logger.fdebug(module + ' ofilename:' + ofilename)
logger.fdebug(module + ' nfilename:' + nfilename + ext)
if mylar.RENAME_FILES:
if str(ofilename) != str(nfilename + ext):
if ofilename != (nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename) + ' ..to.. ' + os.path.join(odir, nfilename + ext))
#if mylar.FILE_OPTS == 'move':
# os.rename(os.path.join(odir, ofilename), os.path.join(odir, nfilename + ext))
@ -1577,8 +1577,8 @@ class PostProcessor(object):
#Manual Run, this is the portion.
src = os.path.join(odir, ofilename)
if mylar.RENAME_FILES:
if str(ofilename) != str(nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, str(ofilename))) #' ..to.. ' + os.path.join(odir, self.nzb_folder, str(nfilename + ext)))
if ofilename != (nfilename + ext):
logger.fdebug(module + ' Renaming ' + os.path.join(odir, ofilename)) #' ..to.. ' + os.path.join(odir, self.nzb_folder, str(nfilename + ext)))
#os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir, str(nfilename + ext)))
#src = os.path.join(odir, str(nfilename + ext))
else:
@ -1717,13 +1717,13 @@ class PostProcessor(object):
if mylar.WEEKFOLDER or mylar.SEND2READ:
#mylar.WEEKFOLDER = will *copy* the post-processed file to the weeklypull list folder for the given week.
#mylar.SEND2READ = will add the post-processed file to the readinglits
weeklypull.weekly_check(comicid, issuenum, file=str(nfilename +ext), path=dst, module=module, issueid=issueid)
weeklypull.weekly_check(comicid, issuenum, file=(nfilename +ext), path=dst, module=module, issueid=issueid)
# retrieve/create the corresponding comic objects
if mylar.ENABLE_EXTRA_SCRIPTS:
folderp = str(dst) #folder location after move/rename
folderp = dst #folder location after move/rename
nzbn = self.nzb_name #original nzb name
filen = str(nfilename + ext) #new filename
filen = nfilename + ext #new filename
#name, comicyear, comicid , issueid, issueyear, issue, publisher
#create the dic and send it.
seriesmeta = []

View file

@ -32,7 +32,7 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
# Force mylar to use cmtagger_path = mylar.PROG_DIR to force the use of the included lib.
logger.fdebug(module + ' Filename is : ' + str(filename))
logger.fdebug(module + ' Filename is : ' + filename)
filepath = filename
@ -183,7 +183,17 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
tmpfilename = re.sub('Archive exported successfully to: ', '', out.rstrip())
if mylar.FILE_OPTS == 'move':
tmpfilename = re.sub('\(Original deleted\)', '', tmpfilename).strip()
filepath = os.path.join(comicpath, tmpfilename)
tmpf = tmpfilename.decode('utf-8')
filepath = os.path.join(comicpath, tmpf)
if not os.path.isfile(filepath):
logger.fdebug(module + 'Trying utf-8 conversion.')
tmpf = tmpfilename.encode('utf-8')
filepath = os.path.join(comicpath, tmpf)
if not os.path.isfile(filepath):
logger.fdebug(module + 'Trying latin-1 conversion.')
tmpf = tmpfilename.encode('Latin-1')
filepath = os.path.join(comicpath, tmpf)
logger.fdebug(module + '[COMIC-TAGGER][CBR-TO-CBZ] New filename: ' + filepath)
initial_ctrun = False
elif initial_ctrun and 'Archive is not a RAR' in out:

View file

@ -570,13 +570,14 @@ class FileChecker(object):
if int(ab) > int(sctd):
logger.fdebug('year is in the future, ignoring and assuming part of series title.')
yearposition = None
yearmodposition = None
continue
else:
issue_year = dc['date']
logger.fdebug('year verified as : ' + str(issue_year))
if highest_series_pos > dc['position']: highest_series_pos = dc['position']
yearposition = dc['position']
yearmodposition = dc['mod_position']
if len(ab) == 4:
issue_year = ab
logger.fdebug('year verified as: ' + str(issue_year))
@ -585,9 +586,11 @@ class FileChecker(object):
logger.fdebug('date verified as: ' + str(issue_year))
if highest_series_pos > dc['position']: highest_series_pos = dc['position']
yearposition = dc['position']
yearmodposition = dc['mod_position']
else:
issue_year = None
yearposition = None
yearmodposition = None
logger.fdebug('No year present within title - ignoring as a variable.')
logger.fdebug('highest_series_position: ' + str(highest_series_pos))
@ -595,6 +598,7 @@ class FileChecker(object):
issue_number = None
if len(possible_issuenumbers) > 0:
logger.fdebug('possible_issuenumbers: ' + str(possible_issuenumbers))
dash_numbers = []
if len(possible_issuenumbers) > 1:
p = 1
if '-' not in split_file[0]:
@ -622,8 +626,12 @@ class FileChecker(object):
if highest_series_pos > pis['position']: highest_series_pos = pis['position']
break
if pis['mod_position'] > finddash and finddash != -1:
logger.fdebug('issue number is positioned after a dash - probably not an issue number, but part of an issue title')
continue
if finddash < yearposition and finddash > (yearmodposition + len(split_file.index(position))):
logger.fdebug('issue number is positioned after a dash - probably not an issue number, but part of an issue title')
dash_numbers.append({'mod_position': pis['mod_position'],
'number': pis['number'],
'position': pis['position']})
continue
if yearposition == pis['position']:
logger.fdebug('Already validated year, ignoring as possible issue number: ' + str(pis['number']))
continue
@ -639,8 +647,24 @@ class FileChecker(object):
logger.fdebug('issue verified as : ' + issue_number)
if highest_series_pos > possible_issuenumbers[0]['position']: highest_series_pos = possible_issuenumbers[0]['position']
if issue_number:
issue_number = re.sub('#', '', issue_number).strip()
if issue_number:
issue_number = re.sub('#', '', issue_number).strip()
else:
if len(dash_numbers) > 0 and finddash !=-1 :
#there are numbers after a dash, which was incorrectly accounted for.
fin_num_position = finddash
fin_num = None
for dn in dash_numbers:
if dn['mod_position'] > finddash and dn['mod_position'] > fin_num_position:
fin_num_position = dn['mod_position']
fin_num = dn['number']
fin_pos = dn['position']
if fin_num:
print 'Issue number re-corrected to : ' + fin_num
issue_number = fin_num
if highest_series_pos > fin_pos: highest_series_pos = fin_pos
issue_volume = None
if len(volume_found) > 0:
@ -974,7 +998,7 @@ class FileChecker(object):
# Jan 1990
# January1990'''
fmts = ('%Y','%b %d, %Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','%b %Y','%B%Y','%b %d,%Y','%m-%Y','%B %Y', '%Y-%m-%d')
fmts = ('%Y','%b %d, %Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','%b %Y','%B%Y','%b %d,%Y','%m-%Y','%B %Y','%Y-%m-%d','%Y-%m')
parsed=[]
for e in txt.splitlines():

View file

@ -512,24 +512,30 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
#cdes_removed = Cdesc[:cdes_find]
#logger.fdebug('description: ' + cdes_removed)
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
"ComicSortName": sortname,
#dynamic-name generation here.
as_d = filechecker.FileChecker(watchcomic=comic['ComicName'])
as_dinfo = as_d.dynamic_replace(comic['ComicName'])
dynamic_seriesname = as_dinfo['mod_seriesname']
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
"ComicSortName": sortname,
"ComicName_Filesafe": comicname_filesafe,
"ComicYear": SeriesYear,
"ComicImage": ComicImage,
"ComicImageURL": comic.get("ComicImage", ""),
"ComicImageALTURL": comic.get("ComicImageALT", ""),
"Total": comicIssues,
"ComicVersion": comicVol,
"ComicLocation": comlocation,
"ComicPublisher": comic['ComicPublisher'],
# "Description": Cdesc, #.dencode('utf-8', 'replace'),
"DetailURL": comic['ComicURL'],
# "ComicPublished": gcdinfo['resultPublished'],
"ComicPublished": "Unknown",
"DateAdded": helpers.today(),
"Status": "Loading"}
"DynamicComicName": dynamic_seriesname,
"ComicYear": SeriesYear,
"ComicImage": ComicImage,
"ComicImageURL": comic.get("ComicImage", ""),
"ComicImageALTURL": comic.get("ComicImageALT", ""),
"Total": comicIssues,
"ComicVersion": comicVol,
"ComicLocation": comlocation,
"ComicPublisher": comic['ComicPublisher'],
# "Description": Cdesc, #.dencode('utf-8', 'replace'),
"DetailURL": comic['ComicURL'],
# "ComicPublished": gcdinfo['resultPublished'],
"ComicPublished": "Unknown",
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
@ -1335,8 +1341,13 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
logger.fdebug('this does not have an issue # that I can parse properly.')
return
else:
logger.error(str(issnum) + ' this has an alpha-numeric in the issue # which I cannot account for.')
return
if issnum == '9-5':
issnum = u'9\xbd'
logger.fdebug('issue: 9-5 is an invalid entry. Correcting to : ' + issnum)
int_issnum = (9 * 1000) + (.5 * 1000)
else:
logger.error(issnum + ' this has an alpha-numeric in the issue # which I cannot account for.')
return
#get the latest issue / date using the date.
#logger.fdebug('issue : ' + str(issnum))
#logger.fdebug('latest date: ' + str(latestdate))

View file

@ -232,7 +232,6 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
with open(os.path.join(os.path.dirname(comlocation), 'cvinfo')) as f:
urllink = f.readline()
print 'urllink: ' + str(urllink)
if urllink:
cid = urllink.split('/')
if '4050-' in cid[-2]:
@ -556,7 +555,8 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
updater.forceRescan(c)
if not len(import_by_comicids):
return "Completed"
if len(import_by_comicids) > 0:
if len(import_by_comicids) > 0 or len(vals) > 0:
#import_comicids['comic_info'] = import_by_comicids
#if vals:
# import_comicids['issueid_info'] = vals
@ -568,14 +568,15 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
else:
cvimport_comicids = None
import_cv_ids = 0
#logger.fdebug('import comicids: ' + str(import_by_comicids))
return {'import_by_comicids': import_by_comicids,
'import_count': len(import_by_comicids),
'CV_import_comicids': cvimport_comicids,
'import_cv_ids': import_cv_ids,
'issueid_list': issueid_list,
'failure_list': failure_list}
else:
import_cv_ids = 0
return {'import_by_comicids': import_by_comicids,
'import_count': len(import_by_comicids),
'CV_import_comicids': cvimport_comicids,
'import_cv_ids': import_cv_ids,
'issueid_list': issueid_list,
'failure_list': failure_list}
def scanLibrary(scan=None, queue=None):

View file

@ -2976,7 +2976,7 @@ class WebInterface(object):
confirmResult.exposed = True
def Check_ImportStatus(self):
logger.info('import_status: ' + mylar.IMPORT_STATUS)
#logger.info('import_status: ' + mylar.IMPORT_STATUS)
return mylar.IMPORT_STATUS
Check_ImportStatus.exposed = True
@ -3390,6 +3390,25 @@ class WebInterface(object):
if resultset == 1:
logger.info('now adding...')
#imported = {'ComicName': ComicName,
# 'DynamicName': DynamicName,
# 'Volume': volume,
# 'impid': impid,
# somehow need to pass in all of the files in this particular group sequence that can be added
# 'comiclocation': comiclocation,
# 'comicfilename': comicfilename,
# 'issuenumber': issuenumber}
#if volume is None or volume == 'None':
# results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[dynamicname])
#else:
# if not volume.lower().startswith('v'):
# volume = 'v' + str(volume)
# results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[dynamicname,volume])
#files = []
#for result in results:
# files.append({'comicfilename': result['ComicFilename'],
# 'comiclocation': result['ComicLocation']})
self.addbyid(sr['comicid'], calledby=True, imported='yes', ogcname=ogcname)
#implog = implog + "ogcname -- " + str(ogcname) + "\n"
#cresults = self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported='yes',ogcname=ogcname) #imported=comicstoIMP,ogcname=ogcname)