diff --git a/data/interfaces/default/base.html b/data/interfaces/default/base.html
index 891d9258..2205f0c3 100755
--- a/data/interfaces/default/base.html
+++ b/data/interfaces/default/base.html
@@ -35,7 +35,7 @@
You're running an unknown version of Mylar. Update or
Close
- % elif mylar.CURRENT_VERSION != mylar.LATEST_VERSION and mylar.INSTALL_TYPE != 'win':
+ % elif mylar.CURRENT_VERSION != mylar.LATEST_VERSION and mylar.INSTALL_TYPE != 'win' and mylar.COMMITS_BEHIND > 0:
diff --git a/data/interfaces/default/readinglist.html b/data/interfaces/default/readinglist.html
index fca6334b..fef93e35 100755
--- a/data/interfaces/default/readinglist.html
+++ b/data/interfaces/default/readinglist.html
@@ -46,7 +46,7 @@
%for issue in issuelist:
- ${issue['ComicName']} (${issue['SeriesYear']}) |
+ ${issue['ComicName']} (${issue['SeriesYear']}) |
${issue['Issue_Number']} |
${issue['IssueDate']} |
${issue['Status']} |
diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py
index 80f0f86a..6e2fb1d2 100755
--- a/mylar/PostProcessor.py
+++ b/mylar/PostProcessor.py
@@ -822,8 +822,30 @@ class PostProcessor(object):
updater.forceRescan(comicid)
logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)
+
+ # retrieve/create the corresponding comic objects
+ if mylar.ENABLE_EXTRA_SCRIPTS:
+ folderp = str(dst) #folder location after move/rename
+ nzbn = self.nzb_name #original nzb name
+ filen = str(nfilename + ext) #new filename
+ #name, comicyear, comicid , issueid, issueyear, issue, publisher
+ #create the dic and send it.
+ seriesmeta = []
+ seriesmetadata = {}
+ seriesmeta.append({
+ 'name': series,
+ 'comicyear': seriesyear,
+ 'comicid': comicid,
+ 'issueid': issueid,
+ 'issueyear': issueyear,
+ 'issue': issuenum,
+ 'publisher': publisher
+ })
+ seriesmetadata['seriesmeta'] = seriesmeta
+ self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata )
+
if ml is not None:
- return
+ return self.log
else:
if mylar.PROWL_ENABLED:
pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
@@ -846,27 +868,5 @@ class PostProcessor(object):
boxcar.notify(series, str(issueyear), str(issuenumOG))
- # retrieve/create the corresponding comic objects
-
- if mylar.ENABLE_EXTRA_SCRIPTS:
- folderp = str(dst) #folder location after move/rename
- nzbn = self.nzb_name #original nzb name
- filen = str(nfilename + ext) #new filename
- #name, comicyear, comicid , issueid, issueyear, issue, publisher
- #create the dic and send it.
- seriesmeta = []
- seriesmetadata = {}
- seriesmeta.append({
- 'name': series,
- 'comicyear': seriesyear,
- 'comicid': comicid,
- 'issueid': issueid,
- 'issueyear': issueyear,
- 'issue': issuenum,
- 'publisher': publisher
- })
- seriesmetadata['seriesmeta'] = seriesmeta
- self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )
-
return self.log
diff --git a/mylar/findcomicfeed.py b/mylar/findcomicfeed.py
index 7fb4a9bc..bbef9c2c 100755
--- a/mylar/findcomicfeed.py
+++ b/mylar/findcomicfeed.py
@@ -14,6 +14,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
#searchName = "Uncanny Avengers"
#searchIssue = "01"
#searchYear = "2012"
+ cName = searchName
#clean up searchName due to webparse.
searchName = searchName.replace("%20", " ")
if "," in searchName:
@@ -104,7 +105,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
for subs in splitTitle:
#logger.fdebug('sub:' + subs)
regExCount = 0
- if len(subs) > 10 and not any(d in subs.lower() for d in except_list):
+ if len(subs) >= len(cName) and not any(d in subs.lower() for d in except_list):
#Looping through dictionary to run each regEx - length + regex is determined by regexList up top.
# while regExCount < len(regexList):
# regExTest = re.findall(regexList[regExCount], subs, flags=re.IGNORECASE)
@@ -145,8 +146,6 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
if tallycount >= 1:
mres['entries'] = entries
return mres
-# print("Title: "+regList[0])
-# print("Link: "+keyPair[regList[0]])
else:
logger.fdebug("No Results Found")
return "no results"
diff --git a/mylar/mb.py b/mylar/mb.py
index 250f8945..808e377e 100755
--- a/mylar/mb.py
+++ b/mylar/mb.py
@@ -140,7 +140,7 @@ def findComic(name, mode, issue, limityear=None):
'description': xmldesc
})
else:
- print ("year: " + str(xmlYr) + " - contraint not met. Has to be within " + str(limityear))
+ logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
n+=1
#search results are limited to 100 and by pagination now...let's account for this.
countResults = countResults + 100
diff --git a/mylar/search.py b/mylar/search.py
index b7c992b8..f782f3d0 100755
--- a/mylar/search.py
+++ b/mylar/search.py
@@ -666,7 +666,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
bb = findcomicfeed.Startit(u_ComicName, isssearch, comyear, ComicVersion, IssDateFix)
# since the regexs in findcomicfeed do the 3 loops, lets force the exit after
- cmloopit == 1
+ #cmloopit == 1
done = False
foundc = "no"
@@ -1238,7 +1238,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#following is JUST for nzb.su
if nzbprov == 'nzb.su' or nzbprov == 'newznab':
linkit = os.path.splitext(entry['link'])[1]
- linkit = linkit.replace("&", "%26")
+ if mylar.USE_SABNZBD:
+ linkit = linkit.replace("&", "%26")
+ logger.fdebug('new linkit:' + linkit)
linkapi = str(linkstart) + str(linkit)
else:
# this should work for every other provider