FIX: If no providers are selected, will abort search so it doesn't spam, FIX: (#650) Changed nzb.su to use new api url, FIX: (#649) Filechecker parsing failed on recheck/post-process on Windows machines, FIX: Annuals would not get manually post-processed, FIX: Added missing icon for manually adding a series to another series, IMP: Removed dead provider (nzbx) references

This commit is contained in:
evilhero 2014-03-21 14:48:25 -04:00
parent b7347a80ef
commit af41c43b4c
6 changed files with 86 additions and 181 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

View File

@ -150,26 +150,27 @@ class PostProcessor(object):
self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
logger.fdebug('SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder)
#lookup nzb_name in nzblog table to get issueid
# -- start. not used.
#query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
#http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
#querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
#logger.info("querysab_string:" + str(querysab))
file = urllib2.urlopen(querysab)
data = file.read()
file.close()
dom = parseString(data)
#file = urllib2.urlopen(querysab)
#data = file.read()
#file.close()
#dom = parseString(data)
#try:
# sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
#except:
# errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
# logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
# return
#sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
#logger.fdebug("SAB Replace Spaces: " + str(sabreps))
#logger.fdebug("SAB Replace Dots: " + str(sabrepd))
# -- end. not used.
try:
sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
except:
errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
return
sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
logger.fdebug("SAB Replace Spaces: " + str(sabreps))
logger.fdebug("SAB Replace Dots: " + str(sabrepd))
if mylar.USE_NZBGET==1:
logger.fdebug("Using NZBGET")
logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
@ -217,84 +218,47 @@ class PostProcessor(object):
break
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
#logger.fdebug("temploc: " + str(temploc))
ww = shlex.split(temploc)
lnw = len(ww)
wdc = 0
while (wdc < lnw):
#counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
if ".cbr" in ww[wdc].lower():
ww[wdc] = ww[wdc].replace(".cbr", "")
elif ".cbz" in ww[wdc].lower():
ww[wdc] = ww[wdc].replace(".cbz", "")
if "(c2c)" in ww[wdc].lower():
ww[wdc] = ww[wdc].replace("(c2c)", " ")
get_issue = shlex.split(str(ww[wdc]))
if ww[wdc] != " ":
ww[wdc] = get_issue[0]
if 'annual' in temploc.lower():
logger.info("annual detected.")
annchk = "yes"
fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip())
issuechk = myDB.action("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
else:
fcdigit = helpers.issuedigits(temploc)
issuechk = myDB.action("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
if '.' in ww[wdc]:
#logger.fdebug("decimal detected...adjusting.")
try:
i = float(ww[wdc])
except ValueError, TypeError:
#not numeric
#logger.fdebug("NOT NUMERIC - new word: " + str(ww[wdc]))
ww[wdc] = ww[wdc].replace(".", "")
else:
#numeric
pass
if ww[wdc].isdigit():
if int(ww[wdc]) > 0:
if wdc+1 < len(ww) and 'au' in ww[wdc+1].lower():
if len(ww[wdc+1]) == 2:
#if the 'AU' is in 005AU vs 005 AU it will yield different results.
ww[wdc] = ww[wdc] + 'AU'
ww[wdc+1] = '93939999919190933'
logger.info("AU Detected seperate from issue - combining and continuing")
fcdigit = helpers.issuedigits(ww[wdc])
if 'annual' in self.nzb_name.lower():
logger.info("annual detected.")
annchk = "yes"
issuechk = myDB.action("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
else:
issuechk = myDB.action("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
if issuechk is None:
logger.info("No corresponding issue # found for " + str(cs['ComicID']))
else:
datematch = "True"
if len(watchmatch) > 1:
#if the # of matches is more than 1, we need to make sure we get the right series
#compare the ReleaseDate for the issue, to the found issue date in the filename.
#if ReleaseDate doesn't exist, use IssueDate
#if no issue date was found, then ignore.
if issuechk['ReleaseDate'] is not None:
if int(issuechk['ReleaseDate'][:4]) < int(tmpfc['ComicYear']):
logger.fdebug(str(issuechk['ReleaseDate']) + ' is before the issue year of ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
datematch = "False"
else:
if int(issuechk['IssueDate'][:4]) < int(tmpfc['ComicYear']):
logger.fdebug(str(issuechk['IssueDate']) + ' is before the issue year ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
datematch = "False"
if issuechk is None:
logger.info("No corresponding issue # found for " + str(cs['ComicID']))
else:
datematch = "True"
if len(watchmatch) > 1:
#if the # of matches is more than 1, we need to make sure we get the right series
#compare the ReleaseDate for the issue, to the found issue date in the filename.
#if ReleaseDate doesn't exist, use IssueDate
#if no issue date was found, then ignore.
if issuechk['ReleaseDate'] is not None:
if int(issuechk['ReleaseDate'][:4]) < int(tmpfc['ComicYear']):
logger.fdebug(str(issuechk['ReleaseDate']) + ' is before the issue year of ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
datematch = "False"
else:
if int(issuechk['IssueDate'][:4]) < int(tmpfc['ComicYear']):
logger.fdebug(str(issuechk['IssueDate']) + ' is before the issue year ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
datematch = "False"
else:
logger.info("Found matching issue # " + str(fcdigit) + " for ComicID: " + str(cs['ComicID']) + " / IssueID: " + str(issuechk['IssueID']))
else:
logger.info("Found matching issue # " + str(fcdigit) + " for ComicID: " + str(cs['ComicID']) + " / IssueID: " + str(issuechk['IssueID']))
if datematch == "True":
manual_list.append({"ComicLocation": tmpfc['ComicLocation'],
"ComicID": cs['ComicID'],
"IssueID": issuechk['IssueID'],
"IssueNumber": issuechk['Issue_Number'],
"ComicName": cs['ComicName']})
else:
logger.fdebug('Incorrect series - not populating..continuing post-processing')
ccnt+=1
#print manual_list
wdc+=1
if datematch == "True":
manual_list.append({"ComicLocation": tmpfc['ComicLocation'],
"ComicID": cs['ComicID'],
"IssueID": issuechk['IssueID'],
"IssueNumber": issuechk['Issue_Number'],
"ComicName": cs['ComicName']})
else:
logger.fdebug('Incorrect series - not populating..continuing post-processing')
#ccnt+=1
fn+=1
logger.fdebug("There are " + str(len(manual_list)) + " files found that match on your watchlist, " + str(nm) + " do not match anything and will be ignored.")
@ -816,11 +780,14 @@ class PostProcessor(object):
#update snatched table to change status to Downloaded
if annchk == "no":
updater.foundsearch(comicid, issueid, down='True')
dispiss = 'issue: ' + str(issuenumOG)
else:
updater.foundsearch(comicid, issueid, mode='want_ann', down='True')
dispiss = 'annual issue: ' + str(issuenumOG)
#force rescan of files
updater.forceRescan(comicid)
logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
logger.info(u"Post-Processing completed for: " + series + " " + dispiss )
self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)
# retrieve/create the corresponding comic objects

View File

@ -22,6 +22,7 @@ import re
import logger
import mylar
import sys
import platform
def file2comicmatch(watchmatch):
#print ("match: " + str(watchmatch))
@ -73,11 +74,14 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
for fname in dirlist:
# at a later point, we should store the basedir and scan it in for additional info, since some users
# have their structure setup as 'Batman v2 (2011)/Batman #1.cbz' or 'Batman/V2-(2011)/Batman #1.cbz'
#logger.fdebug('fname[directory]:' + fname['directory'])
#logger.fdebug('fname[filename]:' + fname['filename'])
if fname['directory'] == '':
basedir = dir
else:
basedir = dir + fname['directory']
#print 'basedir is now: ' + str(basedir)
basedir = fname['directory']
item = fname['filename']
if item == 'cover.jpg' or item == 'cvinfo': continue
@ -664,6 +668,8 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if 'annual' in subname.lower():
subname = re.sub('annual', '', subname.lower())
subname = re.sub('\s+', ' ', subname)
#if the sub has an annual, let's remove it from the modwatch as well
modwatchcomic = re.sub('annual', '', modwatchcomic.lower())
#tmpitem = item[:jtd_len]
# if it's an alphanumeric with a space, rejoin, so we can remove it cleanly just below this.
@ -776,12 +782,15 @@ def traverse_directories(dir):
filelist = []
for (dirname, subs, files) in os.walk(dir):
#print('[' + dirname.replace('/mnt/mediavg/Comics/testing/','') + ']')
direc = dirname.replace(dir,'')
for fname in files:
#print(os.path.join(dirname, fname).replace(dirname + '/',''))
if dirname == dir:
direc = ''
else:
direc = dirname
filelist.append({"directory": direc,
"filename": os.path.join(dirname, fname).replace(dirname + '/','')})
"filename": fname})
logger.fdebug('there are ' + str(len(filelist)) + ' files.')
#logger.fdeubg(filelist)

View File

@ -1,64 +0,0 @@
import lib.simplejson as json
import mylar
from mylar import logger, helpers
import urllib2, datetime
def searchit(cm):
entries = []
mres = {}
if mylar.NZBX:
provider = "nzbx"
#stringsearch = str(cm) + "%20" + str(issue) + "%20" + str(year)
searchURL = 'https://nzbx.co/api/search?cat=7030&q=' + str(cm)
logger.fdebug(u'Parsing results from <a href="%s">nzbx.co</a>' % searchURL)
request = urllib2.Request(searchURL)
request.add_header('User-Agent', str(mylar.USER_AGENT))
opener = urllib2.build_opener()
try:
data = opener.open(request).read()
except Exception, e:
logger.warn('Error fetching data from nzbx.co : %s' % str(e))
data = False
return "no results"
if data:
d = json.loads(data)
if not len(d):
logger.info(u"No results found from nzbx.co")
return "no results"
else:
for item in d:
try:
url = item['nzb']
title = item['name']
size = item['size']
nzbdate = datetime.datetime.fromtimestamp(item['postdate'])
nzbage = abs(( datetime.datetime.now()-nzbdate).days )
if nzbage <= int(mylar.USENET_RETENTION):
entries.append({
'title': str(title),
'link': str(url)
})
#logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
logger.fdebug('%s outside usenet retention: %s days.' % (title, nzbage))
#resultlist.append((title, size, url, provider))
#logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
except Exception, e:
logger.error(u"An unknown error occurred trying to parse the feed: %s" % e)
if len(entries) >= 1:
mres['entries'] = entries
else:
mres = "no results"
return mres

View File

@ -259,7 +259,7 @@ def nzbs(provider=None):
elif nzbprovider[nzbpr] == 'nzb.su':
if mylar.NZBSU_UID is None:
mylar.NZBSU_UID = '1'
feed = 'http://nzb.su/rss?t=7030&dl=1&i=' + mylar.NZBSU_UID + '&r=' + mylar.NZBSU_APIKEY
feed = 'http://api.nzb.su/rss?t=7030&dl=1&i=' + mylar.NZBSU_UID + '&r=' + mylar.NZBSU_APIKEY
feedme = feedparser.parse(feed)
site = nzbprovider[nzbpr]
feedthis.append({"feed": feedme,

View File

@ -17,7 +17,7 @@
from __future__ import division
import mylar
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, prov_nzbx, notifiers, rsscheck
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck
LOG = mylar.LOG_DIR
@ -40,7 +40,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if ComicYear == None: ComicYear = '2014'
else: ComicYear = str(ComicYear)[:4]
if Publisher == 'IDW Publishing': Publisher = 'IDW'
logger.info('Publisher is : ' + str(Publisher))
logger.fdebug('Publisher is : ' + str(Publisher))
if mode == 'want_ann':
logger.info("Annual issue search detected. Appending to issue #")
#anything for mode other than None indicates an annual.
@ -80,9 +80,6 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mylar.DOGNZB == 1:
nzbprovider.append('dognzb')
nzbp+=1
if mylar.NZBX == 1:
nzbprovider.append('nzbx')
nzbp+=1
# --------
# Xperimental
if mylar.EXPERIMENTAL == 1:
@ -123,6 +120,12 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
nzbpr == 0
findit = 'no'
if (providercount + torp) == 0:
logger.ERROR('[WARNING] You have 0 search providers enabled. I need at least ONE provider to work. Aborting search.')
findit = "no"
nzbprov = None
return findit, nzbprov
#provider order sequencing here.
#prov_order = []
@ -370,8 +373,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
apikey = mylar.NZBSU_APIKEY
elif nzbprov == 'dognzb':
apikey = mylar.DOGNZB_APIKEY
elif nzbprov == 'nzbx':
apikey = 'none'
elif nzbprov == 'experimental':
apikey = 'none'
elif nzbprov == 'newznab':
@ -590,15 +591,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == 'dognzb':
findurl = "https://dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
elif nzbprov == 'nzb.su':
findurl = "https://nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it.
if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/':
host_newznab_fix = str(host_newznab) + "/"
else: host_newznab_fix = host_newznab
findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch) + "&o=xml&cat=" + str(category_newznab)
elif nzbprov == 'nzbx':
bb = prov_nzbx.searchit(comsearch)
if nzbprov != 'nzbx':
# helper function to replace apikey here so we avoid logging it ;)
findurl = findurl + "&apikey=" + str(apikey)
@ -1332,12 +1331,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("nzbname used for post-processing:" + str(nzbname))
#we need to change the nzbx string now to allow for the nzbname rename.
if nzbprov == 'nzbx':
nzbxlink_st = linkapi.find("*|*")
linkapi = linkapi[:(nzbxlink_st + 3)] + str(nzbname)
logger.fdebug("new linkapi (this should =nzbname) :" + str(linkapi))
# #test nzb.get
if mylar.USE_NZBGET:
from xmlrpclib import ServerProxy
@ -1502,7 +1495,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
else:
ComicYear = str(result['IssueDate'])[:4]
mode = result['mode']
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'])
if foundNZB == "yes":
#print ("found!")
@ -1534,7 +1527,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
IssueYear = str(result['IssueDate'])[:4]
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'])
if foundNZB == "yes":
logger.fdebug("I found " + comic['ComicName'] + ' #:' + str(result['Issue_Number']))
@ -1567,7 +1560,7 @@ def searchIssueIDList(issuelist):
IssueYear = comic['ComicYear']
else:
IssueYear = str(issue['IssueDate'])[:4]
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX or mylar.ENABLE_CBT or mylar.ENABLE_KAT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_CBT or mylar.ENABLE_KAT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'])
if foundNZB == "yes":
#print ("found!")