mirror of
https://github.com/evilhero/mylar
synced 2025-03-09 05:13:35 +00:00
FIX:(#944) When using RSS Feeds, and grabbing off of them - would sometimes take an incorrect value for the nzbname which would result in a failed post-processing message (due to the nzbname being generated off of the d-nzbname-proper)
This commit is contained in:
parent
a27a18619e
commit
dea9ee73a2
4 changed files with 107 additions and 21 deletions
|
@ -343,22 +343,23 @@ class PostProcessor(object):
|
|||
nzbname = re.sub(' ', '.', str(nzbname))
|
||||
nzbname = re.sub('[\,\:\?\']', '', str(nzbname))
|
||||
nzbname = re.sub('[\&]', 'and', str(nzbname))
|
||||
nzbname = re.sub('_', '.', str(nzbname))
|
||||
|
||||
logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname))
|
||||
# if mylar.USE_NZBGET==1:
|
||||
# nzbname=self.nzb_name
|
||||
self._log("nzbname: " + str(nzbname))
|
||||
|
||||
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
|
||||
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname,nzbname]).fetchone()
|
||||
|
||||
if nzbiss is None:
|
||||
self._log("Failure - could not initially locate nzbfile in my database to rename.")
|
||||
logger.fdebug(module + ' Failure - could not locate nzbfile initially')
|
||||
# if failed on spaces, change it all to decimals and try again.
|
||||
nzbname = re.sub('_', '.', str(nzbname))
|
||||
nzbname = re.sub('[\(\)]', '', str(nzbname))
|
||||
self._log("trying again with this nzbname: " + str(nzbname))
|
||||
logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname))
|
||||
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
|
||||
nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=? or altnzbname=?", [nzbname,nzbname]).fetchone()
|
||||
if nzbiss is None:
|
||||
logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
|
||||
self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
|
||||
|
|
|
@ -1414,7 +1414,7 @@ def dbcheck():
|
|||
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT, PROVIDER TEXT, ID TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT, PROVIDER TEXT, ID TEXT, AltNZBName TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)')
|
||||
# c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT)')
|
||||
|
@ -1625,6 +1625,10 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE nzblog ADD COLUMN ID TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT AltNZBName from nzblog')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE nzblog ADD COLUMN ALTNZBName TEXT')
|
||||
|
||||
## -- Annuals Table --
|
||||
|
||||
|
|
|
@ -533,7 +533,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (nzbprov, e))
|
||||
data = False
|
||||
#logger.info('data: ' + data)
|
||||
|
||||
if data:
|
||||
bb = feedparser.parse(data)
|
||||
else:
|
||||
|
@ -1276,7 +1276,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
nzbid = searchresult[0]['nzbid']
|
||||
nzbname = searchresult[0]['nzbname']
|
||||
sent_to = searchresult[0]['sent_to']
|
||||
|
||||
alt_nzbname = searchresult[0]['alt_nzbname']
|
||||
foundc = "yes"
|
||||
done = True
|
||||
break
|
||||
|
@ -1294,10 +1294,16 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
findloop+=1
|
||||
if foundc == "yes":
|
||||
foundcomic.append("yes")
|
||||
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
|
||||
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC, IssueArcID, nzbid, tmpprov)
|
||||
#send out the notifications for the snatch.
|
||||
if alt_nzbname is None or alt_nzbname == '':
|
||||
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
|
||||
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov)
|
||||
else:
|
||||
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + '[' + alt_nzbname + ']')
|
||||
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
|
||||
|
||||
# #send out the notifications for the snatch.
|
||||
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
|
||||
prov_count == 0
|
||||
#break
|
||||
|
@ -1560,6 +1566,7 @@ def nzbname_create(provider, title=None, info=None):
|
|||
return nzbname
|
||||
|
||||
def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, directsend=None):
|
||||
alt_nzbname = None
|
||||
|
||||
#load in the details of the issue from the tuple.
|
||||
ComicName = comicinfo[0]['ComicName']
|
||||
|
@ -1612,6 +1619,61 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
|
|||
path_parts = url_parts[2].rpartition('/')
|
||||
nzbid = re.sub('.nzb&','', path_parts[2]).strip()
|
||||
elif nzbprov == 'dognzb':
|
||||
if link:
|
||||
opener = urllib.FancyURLopener({})
|
||||
opener.addheaders = []
|
||||
opener.addheader('User-Agent', str(mylar.USER_AGENT))
|
||||
nzo_info = {}
|
||||
filen = None
|
||||
try:
|
||||
fn, header = opener.retrieve(link)
|
||||
except:
|
||||
fn = None
|
||||
|
||||
for tup in header.items():
|
||||
try:
|
||||
item = tup[0].lower()
|
||||
value = tup[1].strip()
|
||||
except:
|
||||
continue
|
||||
if item in ('category_id', 'x-dnzb-category'):
|
||||
category = value
|
||||
elif item in ('x-dnzb-moreinfo',):
|
||||
nzo_info['more_info'] = value
|
||||
elif item in ('x-dnzb-name',):
|
||||
filen = value
|
||||
if not filen.endswith('.nzb'):
|
||||
filen += '.nzb'
|
||||
nzo_info['filename'] = filen
|
||||
elif item == 'x-dnzb-propername':
|
||||
nzo_info['propername'] = value
|
||||
elif item == 'x-dnzb-episodename':
|
||||
nzo_info['episodename'] = value
|
||||
elif item == 'x-dnzb-year':
|
||||
nzo_info['year'] = value
|
||||
elif item == 'x-dnzb-failure':
|
||||
nzo_info['failure'] = value
|
||||
elif item == 'x-dnzb-details':
|
||||
nzo_info['details'] = value
|
||||
elif item in ('content-length',):
|
||||
try:
|
||||
ivalue = int(value)
|
||||
except:
|
||||
ivalue = 0
|
||||
length = ivalue
|
||||
nzo_info['length'] = length
|
||||
|
||||
if not filen:
|
||||
for item in tup:
|
||||
if "filename=" in item:
|
||||
filen = item[item.index("filename=") + 9:].strip(';').strip('"')
|
||||
|
||||
logger.info('nzo_info:' + str(nzo_info))
|
||||
if re.sub('.nzb','', filen.lower()).strip() != re.sub('.nzb','', nzbname.lower()).strip():
|
||||
alt_nzbname = re.sub('.nzb','', filen).strip()
|
||||
logger.info('filen: ' + filen + ' -- nzbname: ' + nzbname + ' are not identical. Storing extra value as : ' + alt_nzbname)
|
||||
|
||||
|
||||
url_parts = urlparse.urlparse(link)
|
||||
path_parts = url_parts[2].rpartition('/')
|
||||
nzbid = path_parts[0].rsplit('/',1)[1]
|
||||
|
@ -1784,18 +1846,24 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
|
|||
|
||||
#nzbid, nzbname, sent_to
|
||||
return_val = []
|
||||
return_val.append({"nzbid": nzbid,
|
||||
"nzbname": nzbname,
|
||||
"sent_to": sent_to})
|
||||
return_val.append({"nzbid": nzbid,
|
||||
"nzbname": nzbname,
|
||||
"sent_to": sent_to,
|
||||
"alt_nzbname": alt_nzbname})
|
||||
|
||||
#if it's a directsend link (ie. via a retry).
|
||||
if directsend is None:
|
||||
return return_val
|
||||
else:
|
||||
#update the db on the snatch.
|
||||
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
|
||||
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov)
|
||||
if alt_nzbname is None or alt_nzbname == '':
|
||||
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
|
||||
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov)
|
||||
else:
|
||||
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname) + ' [' + alt_nzbname + ']')
|
||||
if '[RSS]' in tmpprov : tmpprov = re.sub('\[RSS\]','', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
|
||||
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
|
||||
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
|
||||
return
|
||||
|
|
|
@ -456,15 +456,15 @@ def no_searchresults(ComicID):
|
|||
"LatestIssue": "Error"}
|
||||
myDB.upsert("comics", newValue, controlValue)
|
||||
|
||||
def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, prov=None):
|
||||
def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, prov=None, alt_nzbname=None):
|
||||
myDB = db.DBConnection()
|
||||
|
||||
newValue = {"NZBName": NZBName}
|
||||
newValue = {'NZBName': NZBName}
|
||||
|
||||
if IssueID is None or IssueID == 'None':
|
||||
#if IssueID is None, it's a one-off download from the pull-list.
|
||||
#give it a generic ID above the last one so it doesn't throw an error later.
|
||||
print "SARC detected as: " + str(SARC)
|
||||
logger.fdebug("Story Arc (SARC) detected as: " + str(SARC))
|
||||
if mylar.HIGHCOUNT == 0:
|
||||
IssueID = '900000'
|
||||
else:
|
||||
|
@ -480,10 +480,23 @@ def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, pro
|
|||
|
||||
if id:
|
||||
logger.info('setting the nzbid for this download grabbed by ' + prov + ' in the nzblog to : ' + str(id))
|
||||
newValue["ID"] = id
|
||||
newValue['ID'] = id
|
||||
|
||||
if alt_nzbname:
|
||||
logger.info('setting the alternate nzbname for this download grabbed by ' + prov + ' in the nzblog to : ' + alt_nzbname)
|
||||
newValue['AltNZBName'] = alt_nzbname
|
||||
|
||||
#check if it exists already in the log.
|
||||
chkd = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? and Provider=?', [IssueID, prov]).fetchone()
|
||||
if chkd is None:
|
||||
pass
|
||||
else:
|
||||
if chkd['AltNZBName'] is None or chkd['AltNZBName'] == '':
|
||||
#we need to wipe the entry so we can re-update with the alt-nzbname if required
|
||||
myDB.action('DELETE FROM nzblog WHERE IssueID=? and Provider=?', [IssueID, prov])
|
||||
logger.fdebug('Deleted stale entry from nzblog for IssueID: ' + str(IssueID) + ' [' + prov + ']')
|
||||
myDB.upsert("nzblog", newValue, controlValue)
|
||||
|
||||
|
||||
|
||||
def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None, IssueArcID=None, module=None):
|
||||
# When doing a Force Search (Wanted tab), the resulting search calls this to update.
|
||||
|
|
Loading…
Add table
Reference in a new issue