mirror of
https://github.com/evilhero/mylar
synced 2025-03-09 13:24:53 +00:00
FIX:(#1242) Problem with # in filenames when issue number is a digit or series title contains numeric, FIX:(#1243) Fixfor incorrect volume label being removed during search parsing, FIX: Removed excessive logging from experimental search, FIX: Removed 200 status code being returned on getting results from provider api, FIX: If annual existed in series directory, and annuals were enabled - but annual was not discovered, would fail to update status' for other issues in series
This commit is contained in:
parent
a9518a3ed9
commit
a4b2bad38e
4 changed files with 40 additions and 40 deletions
|
@ -184,7 +184,7 @@ class FileChecker(object):
|
|||
|
||||
if len(self.failed_files) > 0:
|
||||
logger.info('FAILED FILES: %s', self.failed_files)
|
||||
|
||||
|
||||
return watchmatch
|
||||
|
||||
def parseit(self, path, filename, subpath=None):
|
||||
|
@ -274,12 +274,12 @@ class FileChecker(object):
|
|||
ret_sf1 = re.sub('\'', 'g11', ret_sf1).strip()
|
||||
|
||||
#split_file = re.findall('\([\w\s-]+\)|[\w-]+', ret_sf1, re.UNICODE)
|
||||
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d+|\)', ret_sf1, re.UNICODE)
|
||||
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|\)', ret_sf1, re.UNICODE)
|
||||
|
||||
if len(split_file) == 1:
|
||||
logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.')
|
||||
ret_sf1 = re.sub('\-',' ', ret_sf1).strip()
|
||||
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+', ret_sf1, re.UNICODE)
|
||||
split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|\)', ret_sf1, re.UNICODE)
|
||||
|
||||
|
||||
possible_issuenumbers = []
|
||||
|
@ -394,7 +394,7 @@ class FileChecker(object):
|
|||
logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label))
|
||||
validcountchk = True
|
||||
|
||||
if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None:
|
||||
if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None and '#' not in sf:
|
||||
#find it in the original file to see if there's a decimal between.
|
||||
#logger.fdebug('lastissue_label: ' + str(lastissue_label))
|
||||
#logger.fdebug('current sf: ' + str(sf))
|
||||
|
@ -404,7 +404,7 @@ class FileChecker(object):
|
|||
findst = lastissue_mod_position+1
|
||||
#findst = modfilename.find(lastissue_label, lastissue_mod_position+1) #lastissue_mod_position) #file_length - len(lastissue_label))
|
||||
#logger.fdebug('findst: ' + str(findst))
|
||||
if findst != '.': #== -1:
|
||||
if findst != '.' and findst != '#':
|
||||
if sf.isdigit():
|
||||
logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue')
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
|
|
|
@ -21,9 +21,9 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
searchName = searchName.replace("%20", " ")
|
||||
if "," in searchName:
|
||||
searchName = searchName.replace(",", "")
|
||||
logger.fdebug("name:" + str(searchName))
|
||||
logger.fdebug("issue:" + str(searchIssue))
|
||||
logger.fdebug("year:" + str(searchYear))
|
||||
#logger.fdebug("name:" + str(searchName))
|
||||
#logger.fdebug("issue:" + str(searchIssue))
|
||||
#logger.fdebug("year:" + str(searchYear))
|
||||
encodeSearch = urllib.quote_plus(searchName)
|
||||
splitSearch = encodeSearch.split(" ")
|
||||
|
||||
|
@ -72,7 +72,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
regList = []
|
||||
countUp = 0
|
||||
|
||||
logger.fdebug(str(totNum) + " results")
|
||||
#logger.fdebug(str(totNum) + " results")
|
||||
|
||||
while countUp < totNum:
|
||||
urlParse = feed.entries[countUp].enclosures[0]
|
||||
|
@ -83,7 +83,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
"length": urlParse["length"],
|
||||
"pubdate": feed.entries[countUp].updated})
|
||||
countUp=countUp +1
|
||||
logger.fdebug('keypair: ' + str(keyPair))
|
||||
#logger.fdebug('keypair: ' + str(keyPair))
|
||||
|
||||
|
||||
# thanks to SpammyHagar for spending the time in compiling these regEx's!
|
||||
|
@ -105,13 +105,13 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
|
||||
for entry in keyPair:
|
||||
title = entry['title']
|
||||
logger.fdebug("titlesplit: " + str(title.split("\"")))
|
||||
#logger.fdebug("titlesplit: " + str(title.split("\"")))
|
||||
splitTitle = title.split("\"")
|
||||
noYear = 'False'
|
||||
_digits = re.compile('\d')
|
||||
|
||||
for subs in splitTitle:
|
||||
logger.fdebug('sub:' + subs)
|
||||
#logger.fdebug('sub:' + subs)
|
||||
regExCount = 0
|
||||
if len(subs) >= len(cName) and not any(d in subs.lower() for d in except_list) and bool(_digits.search(subs)) is True:
|
||||
#Looping through dictionary to run each regEx - length + regex is determined by regexList up top.
|
||||
|
@ -132,7 +132,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
#this is the crap we ignore. Continue (commented else, as it spams the logs)
|
||||
#logger.fdebug('this starts with FOR : ' + str(subs) + '. This is not present in the series - ignoring.')
|
||||
continue
|
||||
logger.fdebug('match.')
|
||||
#logger.fdebug('match.')
|
||||
if IssDateFix != "no":
|
||||
if IssDateFix == "01" or IssDateFix == "02": ComicYearFix = str(int(searchYear) - 1)
|
||||
else: ComicYearFix = str(int(searchYear) + 1)
|
||||
|
|
|
@ -600,7 +600,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
break
|
||||
data = False
|
||||
|
||||
logger.info('status code: ' + str(r.status_code))
|
||||
#logger.fdebug('status code: ' + str(r.status_code))
|
||||
|
||||
if str(r.status_code) != '200':
|
||||
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
|
@ -914,7 +914,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
continue
|
||||
|
||||
if fndcomicversion:
|
||||
cleantitle = re.sub(origvol, '', cleantitle).strip()
|
||||
cleantitle = re.sub(fndcomicversion, '', cleantitle).strip()
|
||||
logger.fdebug('Newly finished reformed cleantitle (with NO volume label): ' + cleantitle)
|
||||
versionfound = "yes"
|
||||
break
|
||||
|
|
|
@ -1195,9 +1195,9 @@ def forceRescan(ComicID, archive=None, module=None):
|
|||
logger.warn(module + ' you should either Refresh the series, and/or submit an issue on github in regards to the series and the error.')
|
||||
return
|
||||
|
||||
if writeit == True:
|
||||
logger.fdebug(module + ' issueID to write to db:' + str(iss_id))
|
||||
controlValueDict = {"IssueID": iss_id}
|
||||
if writeit == True and haveissue == 'yes':
|
||||
#logger.fdebug(module + ' issueID to write to db:' + str(iss_id))
|
||||
controlValueDict = {"IssueID": str(iss_id)}
|
||||
|
||||
#if Archived, increase the 'Have' count.
|
||||
if archive:
|
||||
|
@ -1205,27 +1205,26 @@ def forceRescan(ComicID, archive=None, module=None):
|
|||
else:
|
||||
issStatus = "Downloaded"
|
||||
|
||||
if haveissue == "yes":
|
||||
newValueDict = {"Location": isslocation,
|
||||
"ComicSize": issSize,
|
||||
"Status": issStatus
|
||||
}
|
||||
newValueDict = {"Location": isslocation,
|
||||
"ComicSize": issSize,
|
||||
"Status": issStatus
|
||||
}
|
||||
|
||||
issID_to_ignore.append(str(iss_id))
|
||||
|
||||
if ANNComicID:
|
||||
# if 'annual' in temploc.lower():
|
||||
#issID_to_write.append({"tableName": "annuals",
|
||||
# "newValueDict": newValueDict,
|
||||
# "controlValueDict": controlValueDict})
|
||||
myDB.upsert("annuals", newValueDict, controlValueDict)
|
||||
ANNComicID = None
|
||||
else:
|
||||
logger.fdebug(newValueDict)
|
||||
#issID_to_write.append({"tableName": "issues",
|
||||
# "valueDict": newValueDict,
|
||||
# "keyDict": controlValueDict})
|
||||
myDB.upsert("issues", newValueDict, controlValueDict)
|
||||
issID_to_ignore.append(str(iss_id))
|
||||
if ANNComicID:
|
||||
# if 'annual' in temploc.lower():
|
||||
#issID_to_write.append({"tableName": "annuals",
|
||||
# "newValueDict": newValueDict,
|
||||
# "controlValueDict": controlValueDict})
|
||||
myDB.upsert("annuals", newValueDict, controlValueDict)
|
||||
ANNComicID = None
|
||||
else:
|
||||
#issID_to_write.append({"tableName": "issues",
|
||||
# "valueDict": newValueDict,
|
||||
# "keyDict": controlValueDict})
|
||||
myDB.upsert("issues", newValueDict, controlValueDict)
|
||||
else:
|
||||
ANNComicID = None
|
||||
fn+=1
|
||||
|
||||
# if len(issID_to_write) > 0:
|
||||
|
@ -1235,6 +1234,7 @@ def forceRescan(ComicID, archive=None, module=None):
|
|||
|
||||
#logger.fdebug(module + ' IssueID to ignore: ' + str(issID_to_ignore))
|
||||
|
||||
sys.exit()
|
||||
#here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
|
||||
update_iss = []
|
||||
tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?'] *(len(issID_to_ignore) -1)))
|
||||
|
@ -1264,7 +1264,7 @@ def forceRescan(ComicID, archive=None, module=None):
|
|||
else:
|
||||
issStatus = "Skipped"
|
||||
|
||||
#logger.fdebug("new status: " + str(issStatus))
|
||||
#logger.fdebug('[' + chk['IssueID'] + '] new status: ' + str(issStatus))
|
||||
|
||||
update_iss.append({"IssueID": chk['IssueID'],
|
||||
"Status": issStatus})
|
||||
|
@ -1340,7 +1340,7 @@ def forceRescan(ComicID, archive=None, module=None):
|
|||
comicpath = os.path.join(rescan['ComicLocation'], down['Location'])
|
||||
if os.path.exists(comicpath):
|
||||
continue
|
||||
#print "Issue exists - no need to change status."
|
||||
print "Issue exists - no need to change status."
|
||||
else:
|
||||
if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None':
|
||||
if os.path.exists(os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation']))):
|
||||
|
|
Loading…
Add table
Reference in a new issue