mirror of
https://github.com/evilhero/mylar
synced 2025-03-10 05:52:48 +00:00
FIX:(#1242) Problem with # in filenames when issue number is a digit or series title contains numeric, FIX:(#1243) Fixfor incorrect volume label being removed during search parsing, FIX: Removed excessive logging from experimental search, FIX: Removed 200 status code being returned on getting results from provider api, FIX: If annual existed in series directory, and annuals were enabled - but annual was not discovered, would fail to update status' for other issues in series
This commit is contained in:
parent
a9518a3ed9
commit
a4b2bad38e
4 changed files with 40 additions and 40 deletions
|
@ -274,12 +274,12 @@ class FileChecker(object):
|
||||||
ret_sf1 = re.sub('\'', 'g11', ret_sf1).strip()
|
ret_sf1 = re.sub('\'', 'g11', ret_sf1).strip()
|
||||||
|
|
||||||
#split_file = re.findall('\([\w\s-]+\)|[\w-]+', ret_sf1, re.UNICODE)
|
#split_file = re.findall('\([\w\s-]+\)|[\w-]+', ret_sf1, re.UNICODE)
|
||||||
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d+|\)', ret_sf1, re.UNICODE)
|
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|\)', ret_sf1, re.UNICODE)
|
||||||
|
|
||||||
if len(split_file) == 1:
|
if len(split_file) == 1:
|
||||||
logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.')
|
logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.')
|
||||||
ret_sf1 = re.sub('\-',' ', ret_sf1).strip()
|
ret_sf1 = re.sub('\-',' ', ret_sf1).strip()
|
||||||
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+', ret_sf1, re.UNICODE)
|
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|\)', ret_sf1, re.UNICODE)
|
||||||
|
|
||||||
|
|
||||||
possible_issuenumbers = []
|
possible_issuenumbers = []
|
||||||
|
@ -394,7 +394,7 @@ class FileChecker(object):
|
||||||
logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label))
|
logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label))
|
||||||
validcountchk = True
|
validcountchk = True
|
||||||
|
|
||||||
if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None:
|
if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None and '#' not in sf:
|
||||||
#find it in the original file to see if there's a decimal between.
|
#find it in the original file to see if there's a decimal between.
|
||||||
#logger.fdebug('lastissue_label: ' + str(lastissue_label))
|
#logger.fdebug('lastissue_label: ' + str(lastissue_label))
|
||||||
#logger.fdebug('current sf: ' + str(sf))
|
#logger.fdebug('current sf: ' + str(sf))
|
||||||
|
@ -404,7 +404,7 @@ class FileChecker(object):
|
||||||
findst = lastissue_mod_position+1
|
findst = lastissue_mod_position+1
|
||||||
#findst = modfilename.find(lastissue_label, lastissue_mod_position+1) #lastissue_mod_position) #file_length - len(lastissue_label))
|
#findst = modfilename.find(lastissue_label, lastissue_mod_position+1) #lastissue_mod_position) #file_length - len(lastissue_label))
|
||||||
#logger.fdebug('findst: ' + str(findst))
|
#logger.fdebug('findst: ' + str(findst))
|
||||||
if findst != '.': #== -1:
|
if findst != '.' and findst != '#':
|
||||||
if sf.isdigit():
|
if sf.isdigit():
|
||||||
logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue')
|
logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue')
|
||||||
possible_issuenumbers.append({'number': sf,
|
possible_issuenumbers.append({'number': sf,
|
||||||
|
|
|
@ -21,9 +21,9 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||||
searchName = searchName.replace("%20", " ")
|
searchName = searchName.replace("%20", " ")
|
||||||
if "," in searchName:
|
if "," in searchName:
|
||||||
searchName = searchName.replace(",", "")
|
searchName = searchName.replace(",", "")
|
||||||
logger.fdebug("name:" + str(searchName))
|
#logger.fdebug("name:" + str(searchName))
|
||||||
logger.fdebug("issue:" + str(searchIssue))
|
#logger.fdebug("issue:" + str(searchIssue))
|
||||||
logger.fdebug("year:" + str(searchYear))
|
#logger.fdebug("year:" + str(searchYear))
|
||||||
encodeSearch = urllib.quote_plus(searchName)
|
encodeSearch = urllib.quote_plus(searchName)
|
||||||
splitSearch = encodeSearch.split(" ")
|
splitSearch = encodeSearch.split(" ")
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||||
regList = []
|
regList = []
|
||||||
countUp = 0
|
countUp = 0
|
||||||
|
|
||||||
logger.fdebug(str(totNum) + " results")
|
#logger.fdebug(str(totNum) + " results")
|
||||||
|
|
||||||
while countUp < totNum:
|
while countUp < totNum:
|
||||||
urlParse = feed.entries[countUp].enclosures[0]
|
urlParse = feed.entries[countUp].enclosures[0]
|
||||||
|
@ -83,7 +83,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||||
"length": urlParse["length"],
|
"length": urlParse["length"],
|
||||||
"pubdate": feed.entries[countUp].updated})
|
"pubdate": feed.entries[countUp].updated})
|
||||||
countUp=countUp +1
|
countUp=countUp +1
|
||||||
logger.fdebug('keypair: ' + str(keyPair))
|
#logger.fdebug('keypair: ' + str(keyPair))
|
||||||
|
|
||||||
|
|
||||||
# thanks to SpammyHagar for spending the time in compiling these regEx's!
|
# thanks to SpammyHagar for spending the time in compiling these regEx's!
|
||||||
|
@ -105,13 +105,13 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||||
|
|
||||||
for entry in keyPair:
|
for entry in keyPair:
|
||||||
title = entry['title']
|
title = entry['title']
|
||||||
logger.fdebug("titlesplit: " + str(title.split("\"")))
|
#logger.fdebug("titlesplit: " + str(title.split("\"")))
|
||||||
splitTitle = title.split("\"")
|
splitTitle = title.split("\"")
|
||||||
noYear = 'False'
|
noYear = 'False'
|
||||||
_digits = re.compile('\d')
|
_digits = re.compile('\d')
|
||||||
|
|
||||||
for subs in splitTitle:
|
for subs in splitTitle:
|
||||||
logger.fdebug('sub:' + subs)
|
#logger.fdebug('sub:' + subs)
|
||||||
regExCount = 0
|
regExCount = 0
|
||||||
if len(subs) >= len(cName) and not any(d in subs.lower() for d in except_list) and bool(_digits.search(subs)) is True:
|
if len(subs) >= len(cName) and not any(d in subs.lower() for d in except_list) and bool(_digits.search(subs)) is True:
|
||||||
#Looping through dictionary to run each regEx - length + regex is determined by regexList up top.
|
#Looping through dictionary to run each regEx - length + regex is determined by regexList up top.
|
||||||
|
@ -132,7 +132,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||||
#this is the crap we ignore. Continue (commented else, as it spams the logs)
|
#this is the crap we ignore. Continue (commented else, as it spams the logs)
|
||||||
#logger.fdebug('this starts with FOR : ' + str(subs) + '. This is not present in the series - ignoring.')
|
#logger.fdebug('this starts with FOR : ' + str(subs) + '. This is not present in the series - ignoring.')
|
||||||
continue
|
continue
|
||||||
logger.fdebug('match.')
|
#logger.fdebug('match.')
|
||||||
if IssDateFix != "no":
|
if IssDateFix != "no":
|
||||||
if IssDateFix == "01" or IssDateFix == "02": ComicYearFix = str(int(searchYear) - 1)
|
if IssDateFix == "01" or IssDateFix == "02": ComicYearFix = str(int(searchYear) - 1)
|
||||||
else: ComicYearFix = str(int(searchYear) + 1)
|
else: ComicYearFix = str(int(searchYear) + 1)
|
||||||
|
|
|
@ -600,7 +600,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
||||||
break
|
break
|
||||||
data = False
|
data = False
|
||||||
|
|
||||||
logger.info('status code: ' + str(r.status_code))
|
#logger.fdebug('status code: ' + str(r.status_code))
|
||||||
|
|
||||||
if str(r.status_code) != '200':
|
if str(r.status_code) != '200':
|
||||||
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
|
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||||
|
@ -914,7 +914,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if fndcomicversion:
|
if fndcomicversion:
|
||||||
cleantitle = re.sub(origvol, '', cleantitle).strip()
|
cleantitle = re.sub(fndcomicversion, '', cleantitle).strip()
|
||||||
logger.fdebug('Newly finished reformed cleantitle (with NO volume label): ' + cleantitle)
|
logger.fdebug('Newly finished reformed cleantitle (with NO volume label): ' + cleantitle)
|
||||||
versionfound = "yes"
|
versionfound = "yes"
|
||||||
break
|
break
|
||||||
|
|
|
@ -1195,9 +1195,9 @@ def forceRescan(ComicID, archive=None, module=None):
|
||||||
logger.warn(module + ' you should either Refresh the series, and/or submit an issue on github in regards to the series and the error.')
|
logger.warn(module + ' you should either Refresh the series, and/or submit an issue on github in regards to the series and the error.')
|
||||||
return
|
return
|
||||||
|
|
||||||
if writeit == True:
|
if writeit == True and haveissue == 'yes':
|
||||||
logger.fdebug(module + ' issueID to write to db:' + str(iss_id))
|
#logger.fdebug(module + ' issueID to write to db:' + str(iss_id))
|
||||||
controlValueDict = {"IssueID": iss_id}
|
controlValueDict = {"IssueID": str(iss_id)}
|
||||||
|
|
||||||
#if Archived, increase the 'Have' count.
|
#if Archived, increase the 'Have' count.
|
||||||
if archive:
|
if archive:
|
||||||
|
@ -1205,27 +1205,26 @@ def forceRescan(ComicID, archive=None, module=None):
|
||||||
else:
|
else:
|
||||||
issStatus = "Downloaded"
|
issStatus = "Downloaded"
|
||||||
|
|
||||||
if haveissue == "yes":
|
newValueDict = {"Location": isslocation,
|
||||||
newValueDict = {"Location": isslocation,
|
"ComicSize": issSize,
|
||||||
"ComicSize": issSize,
|
"Status": issStatus
|
||||||
"Status": issStatus
|
}
|
||||||
}
|
|
||||||
|
|
||||||
issID_to_ignore.append(str(iss_id))
|
issID_to_ignore.append(str(iss_id))
|
||||||
|
if ANNComicID:
|
||||||
if ANNComicID:
|
# if 'annual' in temploc.lower():
|
||||||
# if 'annual' in temploc.lower():
|
#issID_to_write.append({"tableName": "annuals",
|
||||||
#issID_to_write.append({"tableName": "annuals",
|
# "newValueDict": newValueDict,
|
||||||
# "newValueDict": newValueDict,
|
# "controlValueDict": controlValueDict})
|
||||||
# "controlValueDict": controlValueDict})
|
myDB.upsert("annuals", newValueDict, controlValueDict)
|
||||||
myDB.upsert("annuals", newValueDict, controlValueDict)
|
ANNComicID = None
|
||||||
ANNComicID = None
|
else:
|
||||||
else:
|
#issID_to_write.append({"tableName": "issues",
|
||||||
logger.fdebug(newValueDict)
|
# "valueDict": newValueDict,
|
||||||
#issID_to_write.append({"tableName": "issues",
|
# "keyDict": controlValueDict})
|
||||||
# "valueDict": newValueDict,
|
myDB.upsert("issues", newValueDict, controlValueDict)
|
||||||
# "keyDict": controlValueDict})
|
else:
|
||||||
myDB.upsert("issues", newValueDict, controlValueDict)
|
ANNComicID = None
|
||||||
fn+=1
|
fn+=1
|
||||||
|
|
||||||
# if len(issID_to_write) > 0:
|
# if len(issID_to_write) > 0:
|
||||||
|
@ -1235,6 +1234,7 @@ def forceRescan(ComicID, archive=None, module=None):
|
||||||
|
|
||||||
#logger.fdebug(module + ' IssueID to ignore: ' + str(issID_to_ignore))
|
#logger.fdebug(module + ' IssueID to ignore: ' + str(issID_to_ignore))
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
#here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
|
#here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
|
||||||
update_iss = []
|
update_iss = []
|
||||||
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(issID_to_ignore) -1)))
|
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(issID_to_ignore) -1)))
|
||||||
|
@ -1264,7 +1264,7 @@ def forceRescan(ComicID, archive=None, module=None):
|
||||||
else:
|
else:
|
||||||
issStatus = "Skipped"
|
issStatus = "Skipped"
|
||||||
|
|
||||||
#logger.fdebug("new status: " + str(issStatus))
|
#logger.fdebug('[' + chk['IssueID'] + '] new status: ' + str(issStatus))
|
||||||
|
|
||||||
update_iss.append({"IssueID": chk['IssueID'],
|
update_iss.append({"IssueID": chk['IssueID'],
|
||||||
"Status": issStatus})
|
"Status": issStatus})
|
||||||
|
@ -1340,7 +1340,7 @@ def forceRescan(ComicID, archive=None, module=None):
|
||||||
comicpath = os.path.join(rescan['ComicLocation'], down['Location'])
|
comicpath = os.path.join(rescan['ComicLocation'], down['Location'])
|
||||||
if os.path.exists(comicpath):
|
if os.path.exists(comicpath):
|
||||||
continue
|
continue
|
||||||
#print "Issue exists - no need to change status."
|
print "Issue exists - no need to change status."
|
||||||
else:
|
else:
|
||||||
if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None':
|
if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None':
|
||||||
if os.path.exists(os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation']))):
|
if os.path.exists(os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation']))):
|
||||||
|
|
Loading…
Add table
Reference in a new issue