1
0
Fork 0
mirror of https://github.com/evilhero/mylar synced 2025-03-19 18:15:25 +00:00

FIX: When have total for a series was > 1000, updating status would fail when creating query, FIX: Filechecker/Importer will now account for filenames in the format of 'filename - year - issue', IMP: Added story arc diretory location in GUI when viewing story arc details, FIX: Removed inactive options on Story Arc main page

This commit is contained in:
evilhero 2016-10-11 11:08:30 -04:00
parent edd2bfb1a3
commit df0eb49d9e
7 changed files with 100 additions and 97 deletions

View file

@ -96,8 +96,8 @@
<small>Rename files to configuration settings</small>
</div>
<br/>
<input type="button" value="Save Changes and Scan" onclick="addScanAction();doAjaxCall('comicScan',$(this),'tabs',true);return true;" data-success="Import Scan now submitted.">
<input type="button" value="Save Changes without Scanning Library" onclick="doAjaxCall('comicScan',$(this),'tabs',true);return false;" data-success="Changes Saved Successfully">
<input type="button" value="Save Changes and Scan" onclick="addScanAction();doAjaxCall('comicScan',$(this),'tabs',true);return true;" data-success="Import Scan now submitted." data-error="Unable to start the scan. Check the logs.">
<input type="button" value="Save Changes without Scanning Library" onclick="doAjaxCall('comicScan',$(this),'tabs',true);return false;" data-success="Changes Saved Successfully" data-error="Unable to save settings. Check the logs.">
%if mylar.IMPORTBUTTON:
<input type="button" value="Import Results Management" style="float: right;" onclick="location.href='importResults';" />
%endif

View file

@ -49,7 +49,10 @@
<fieldset>
<legend>Options</legend>
<div class="row checkbox left clearfix">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="storyarcdir" id="storyarcdir" value="1" ${checked(mylar.STORYARCDIR)} /><label>Arcs in StoryArc Directory (off of ComicLocationRoot)?</label>
<%
storyarcdest = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs')
%>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="storyarcdir" id="storyarcdir" value="1" ${checked(mylar.STORYARCDIR)} /><label>Arcs in StoryArc Directory </br><small>(${storyarcdest})</small></label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.READ2FILENAME)} /><label>Append Reading # to filename</label>
</div>
</fieldset>

View file

@ -146,7 +146,7 @@
<td id="action">
%if item['Status'] is None or item['Status'] == None:
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Now searching for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Grab it</a>
<a title="Remove Issue from Story Arc" onclick="doAjaxCall('removefromreadlist?&IssueArcID=${item['IssueArcID']}',$(this),'table')" data-success='Successfully deleted ${item['IssueArcID']}'><span class="ui-icon ui-icon-minus"></span>Remove it</a>
<a title="Remove Issue from Story Arc" onclick="doAjaxCall('removefromreadlist?IssueArcID=${item['IssueArcID']}',$(this),'table')" data-success='Successfully deleted ${item['IssueArcID']}'><span class="ui-icon ui-icon-minus"></span>Remove it</a>
%elif item['Status'] == 'Snatched':
<a href="#" onclick="doAjaxCall('queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Trying to Retry"><span class="ui-icon ui-icon-plus"></span>Retry</a>
%endif

View file

@ -302,13 +302,13 @@ class FileChecker(object):
ret_sf1 = re.sub('\&', 'f11', ret_sf1).strip()
ret_sf1 = re.sub('\'', 'g11', ret_sf1).strip()
#split_file = re.findall('\([\w\s-]+\)|[\w-]+', ret_sf1, re.UNICODE)
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
#split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
if len(split_file) == 1:
logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.')
ret_sf1 = re.sub('\-',' ', ret_sf1).strip()
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+||#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
possible_issuenumbers = []
@ -411,7 +411,7 @@ class FileChecker(object):
logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label))
validcountchk = True
if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None and '#' not in sf:
if all([lastissue_position == (split_file.index(sf) -1), lastissue_label is not None and '#' not in sf]):
#find it in the original file to see if there's a decimal between.
findst = lastissue_mod_position+1
if findst > len(modfilename):
@ -444,8 +444,8 @@ class FileChecker(object):
logger.fdebug('[DECiMAL-DETECTION] Issue being stored for validation as : ' + modfilename[findst:cf+len(sf)])
for x in possible_issuenumbers:
possible_issuenumbers = []
logger.fdebug('compare: ' + str(x['position']) + ' .. ' + str(lastissue_position))
logger.fdebug('compare: ' + str(x['position']) + ' .. ' + str(split_file.index(sf, lastissue_position)))
#logger.fdebug('compare: ' + str(x['position']) + ' .. ' + str(lastissue_position))
#logger.fdebug('compare: ' + str(x['position']) + ' .. ' + str(split_file.index(sf, lastissue_position)))
if int(x['position']) != int(lastissue_position) and int(x['position']) != split_file.index(sf, lastissue_position):
possible_issuenumbers.append({'number': x['number'],
'position': x['position'],
@ -488,15 +488,20 @@ class FileChecker(object):
'validcountchk': validcountchk})
#now we try to find the series title &/or volume lablel.
if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower()] ):
if sf[1:].isdigit() or sf[3:].isdigit() or volumeprior == True:
if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower()] ) and sf.lower() not in {'one','two','three','four','five','six'}:
if sf[1:].isdigit() or sf[3:].isdigit():# or volumeprior == True:
volume = re.sub("[^0-9]", "", sf)
volume_found['volume'] = volume
if volumeprior:
volume_found['position'] = split_file.index(volumeprior_label)
try:
volumetmp = split_file.index(volumeprior_label, current_pos -1) #if this passes, then we're ok, otherwise will try exception
volume_found['position'] = split_file.index(sf, current_pos)
except:
sep_volume = False
continue
else:
volume_found['position'] = split_file.index(sf)
#logger.fdebug('volume label detected as : Volume ' + str(volume) + ' @ position: ' + str(split_file.index(sf)))
volume_found['position'] = split_file.index(sf, current_pos)
volume_found['volume'] = volume
volumeprior = False
volumeprior_label = None
elif 'vol' in sf.lower() and len(sf) == 3:
@ -516,6 +521,9 @@ class FileChecker(object):
sep_volume = True
else:
#reset the sep_volume indicator here in case a false Volume detected above
sep_volume = False
#check here for numeric or negative number
if sf.isdigit() and split_file.index(sf, current_pos) == 0:
continue
@ -571,6 +579,7 @@ class FileChecker(object):
highest_series_pos = len(split_file)
issue_year = None
possible_years = []
yearmodposition = None
logger.fdebug('datecheck: ' + str(datecheck))
if len(datecheck) > 0:
for dc in sorted(datecheck, key=operator.itemgetter('position'), reverse=True):
@ -598,6 +607,7 @@ class FileChecker(object):
else:
issue_year = ab
logger.fdebug('date verified as: ' + str(issue_year))
if highest_series_pos > dc['position']: highest_series_pos = dc['position']
yearposition = dc['position']
yearmodposition = dc['mod_position']
@ -611,9 +621,9 @@ class FileChecker(object):
issue_number = None
issue_number_position = 0
dash_numbers = []
if len(possible_issuenumbers) > 0:
logger.fdebug('possible_issuenumbers: ' + str(possible_issuenumbers))
dash_numbers = []
if len(possible_issuenumbers) > 1:
p = 1
if '-' not in split_file[0]:
@ -665,23 +675,23 @@ class FileChecker(object):
issue_number_position = possible_issuenumbers[0]['position']
if highest_series_pos > possible_issuenumbers[0]['position']: highest_series_pos = possible_issuenumbers[0]['position']
if issue_number:
issue_number = re.sub('#', '', issue_number).strip()
else:
if len(dash_numbers) > 0 and finddash !=-1 :
#there are numbers after a dash, which was incorrectly accounted for.
fin_num_position = finddash
fin_num = None
for dn in dash_numbers:
if dn['mod_position'] > finddash and dn['mod_position'] > fin_num_position:
fin_num_position = dn['mod_position']
fin_num = dn['number']
fin_pos = dn['position']
if issue_number:
issue_number = re.sub('#', '', issue_number).strip()
else:
if len(dash_numbers) > 0 and finddash !=-1 :
#there are numbers after a dash, which was incorrectly accounted for.
fin_num_position = finddash
fin_num = None
for dn in dash_numbers:
if dn['mod_position'] > finddash and dn['mod_position'] > fin_num_position:
fin_num_position = dn['mod_position']
fin_num = dn['number']
fin_pos = dn['position']
if fin_num:
logger.fdebug('Issue number re-corrected to : ' + fin_num)
issue_number = fin_num
if highest_series_pos > fin_pos: highest_series_pos = fin_pos
if fin_num:
logger.fdebug('Issue number re-corrected to : ' + fin_num)
issue_number = fin_num
if highest_series_pos > fin_pos: highest_series_pos = fin_pos
#--- this is new - 2016-09-18 /account for unicode in issue number when issue number is not deteted above
logger.fdebug('issue_position: ' + str(issue_number_position))
@ -691,6 +701,7 @@ class FileChecker(object):
issue_number = re.sub('XCV', x, split_file[issue_number_position-1])
highest_series_pos -=1
issue_number_position -=1
if issue_number is None:
logger.fdebug('No issue number present in filename.')
else:
@ -703,6 +714,7 @@ class FileChecker(object):
split_file.insert(int(issue_number_position), split_file.pop(volume_found['position'])) #highest_series_pos-1, split_file.pop(volume_found['position']))
logger.fdebug('new split: ' + str(split_file))
highest_series_pos = volume_found['position'] -1
issue_number_position -=1
else:
if highest_series_pos > volume_found['position']:
if sep_volume:
@ -731,6 +743,13 @@ class FileChecker(object):
logger.fdebug('year ' + str(x['year']) + ' is outside of series title range. Accepting of year.')
issue_year = x['year']
else:
try:
if possible_years[0]['yearposition'] <= highest_series_pos:
highest_series_pos = possible_years[0]['yearposition']
except:
pass
match_type = None #folder/file based on how it was matched.
#logger.fdebug('highest_series_pos is : ' + str(highest_series_pos)
@ -766,18 +785,6 @@ class FileChecker(object):
series_name = re.sub('annual', '', series_name, flags=re.I).strip()
series_name_decoded = re.sub('annual', '', series_name_decoded, flags=re.I).strip()
#if path_list is not None:
# clocation = os.path.join(path, path_list, filename)
#else:
# clocation = self.dir
#if issue_number is None:
# sntmp = series_name.split()
# for sn in sorted(sntmp):
# if sn.isdigit():
# issue_number = sn
# series_name = re.sub(sn, '' , series_name).strip()
# break
if issue_number is None or series_name is None:
logger.fdebug('Cannot parse the filename properly. I\'m going to make note of this filename so that my evil ruler can make it work.')

View file

@ -2103,6 +2103,10 @@ def conversion(value):
value = value.decode('windows-1252')
return value
def chunker(seq, size):
#returns a list from a large group of tuples by size (ie. for group in chunker(seq, 3))
return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
#def file_ops(path,dst):
# # path = source path + filename
# # dst = destination path + filename

View file

@ -1230,63 +1230,51 @@ def forceRescan(ComicID, archive=None, module=None):
}
issID_to_ignore.append(str(iss_id))
if ANNComicID:
# if 'annual' in temploc.lower():
#issID_to_write.append({"tableName": "annuals",
# "newValueDict": newValueDict,
# "controlValueDict": controlValueDict})
myDB.upsert("annuals", newValueDict, controlValueDict)
ANNComicID = None
else:
#issID_to_write.append({"tableName": "issues",
# "valueDict": newValueDict,
# "keyDict": controlValueDict})
myDB.upsert("issues", newValueDict, controlValueDict)
else:
ANNComicID = None
fn+=1
# if len(issID_to_write) > 0:
# for iss in issID_to_write:
# logger.info('writing ' + str(iss))
# writethis = myDB.upsert(iss['tableName'], iss['valueDict'], iss['keyDict'])
#logger.fdebug(module + ' IssueID to ignore: ' + str(issID_to_ignore))
#here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
update_iss = []
#break this up in sequnces of 200 so it doesn't break the sql statement.
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(issID_to_ignore) -1)))
chkthis = myDB.select(tmpsql, issID_to_ignore)
# chkthis = None
if chkthis is None:
pass
else:
for chk in chkthis:
old_status = chk['Status']
#logger.fdebug('old_status:' + str(old_status))
if old_status == "Skipped":
if mylar.AUTOWANT_ALL:
cnt = 0
for genlist in helpers.chunker(issID_to_ignore, 200):
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(genlist) -1)))
chkthis = myDB.select(tmpsql, genlist)
if chkthis is None:
pass
else:
for chk in chkthis:
a = [True for x in update_iss if str(x['IssueID']) == str(chk['IssueID'])]
if a is True:
continue
old_status = chk['Status']
if old_status == "Skipped":
if mylar.AUTOWANT_ALL:
issStatus = "Wanted"
else:
issStatus = "Skipped"
elif old_status == "Archived":
issStatus = "Archived"
elif old_status == "Downloaded":
issStatus = "Archived"
elif old_status == "Wanted":
issStatus = "Wanted"
elif old_status == "Ignored":
issStatus = "Ignored"
elif old_status == "Snatched": #this is needed for torrents, or else it'll keep on queuing..
issStatus = "Snatched"
else:
issStatus = "Skipped"
elif old_status == "Archived":
issStatus = "Archived"
elif old_status == "Downloaded":
issStatus = "Archived"
elif old_status == "Wanted":
issStatus = "Wanted"
elif old_status == "Ignored":
issStatus = "Ignored"
elif old_status == "Snatched": #this is needed for torrents, or else it'll keep on queuing..
issStatus = "Snatched"
else:
issStatus = "Skipped"
#logger.fdebug('[' + chk['IssueID'] + '] new status: ' + str(issStatus))
update_iss.append({"IssueID": chk['IssueID'],
"Status": issStatus})
update_iss.append({"IssueID": chk['IssueID'],
"Status": issStatus})
if len(update_iss) > 0:
i = 0
@ -1304,17 +1292,18 @@ def forceRescan(ComicID, archive=None, module=None):
arcanns = 0
# if filechecker returns 0 files (it doesn't find any), but some issues have a status of 'Archived'
# the loop below won't work...let's adjust :)
arcissues = myDB.select("SELECT count(*) FROM issues WHERE ComicID=? and Status='Archived'", [ComicID])
if int(arcissues[0][0]) > 0:
arcfiles = arcissues[0][0]
arcannuals = myDB.select("SELECT count(*) FROM annuals WHERE ComicID=? and Status='Archived'", [ComicID])
if int(arcannuals[0][0]) > 0:
arcanns = arcannuals[0][0]
if havefiles == 0:
arcissues = myDB.select("SELECT count(*) FROM issues WHERE ComicID=? and Status='Archived'", [ComicID])
if int(arcissues[0][0]) > 0:
arcfiles = arcissues[0][0]
arcannuals = myDB.select("SELECT count(*) FROM annuals WHERE ComicID=? and Status='Archived'", [ComicID])
if int(arcannuals[0][0]) > 0:
arcanns = arcannuals[0][0]
if arcfiles > 0 or arcanns > 0:
arcfiles = arcfiles + arcanns
havefiles = havefiles + arcfiles
logger.fdebug(module + ' Adjusting have total to ' + str(havefiles) + ' because of this many archive files:' + str(arcfiles))
if arcfiles > 0 or arcanns > 0:
arcfiles = arcfiles + arcanns
havefiles = havefiles + arcfiles
logger.fdebug(module + ' Adjusting have total to ' + str(havefiles) + ' because of this many archive files:' + str(arcfiles))
ignorecount = 0
if mylar.IGNORE_HAVETOTAL: # if this is enabled, will increase Have total as if in Archived Status

View file

@ -2359,7 +2359,7 @@ class WebInterface(object):
"percent": percent,
"Have": havearc,
"SpanYears": spanyears,
"Total": totalissues[0][0],
"Total": totalarc,
"CV_ArcID": al['CV_ArcID']})
return serve_template(templatename="storyarc.html", title="Story Arcs", arclist=arclist, delete_type=0)
storyarc_main.exposed = True