1
0
Fork 0
mirror of https://github.com/evilhero/mylar synced 2025-03-09 21:33:42 +00:00

IMP: Dognzb / nzb.su RSS Feeds and API searches should now be working again (no UID needed for dognzb) (thnx lostkt87 for all of the detailed logs and feedback), FIX: Filechecker wouldn't recognize some alpha-numerics in issue numbers

This commit is contained in:
evilhero 2014-02-01 14:50:48 -05:00
parent ce5576dd0d
commit bbf9c0f0e6
3 changed files with 50 additions and 37 deletions

View file

@ -392,6 +392,9 @@ def listFiles(dir,watchcomic,AlternateSearch=None,manual=None,sarc=None):
#if the issue has an alphanumeric (issue_exceptions, join it and push it through)
logger.fdebug('JUSTTHEDIGITS [' + justthedigits + ']' )
if digitsvalid == "true":
pass
else:
if justthedigits.isdigit():
digitsvalid = "true"
else:

View file

@ -289,23 +289,36 @@ def nzbs(provider=None):
for ft in feedthis:
site = ft['site']
#print str(site) + " now being updated..."
logger.fdebug(str(site) + " now being updated...")
for entry in ft['feed'].entries:
#print "entry: " + str(entry)
tmpsz = entry.enclosures[0]
if site == 'dognzb':
#because the rss of dog doesn't carry the enclosure item, we'll use the newznab size value
if entry.attrib.get('name') == 'size':
tmpsz = entry.attrib.get('value')
feeddata.append({
'Site': site,
'Title': entry.title,
'Link': entry.link,
'Pubdate': entry.updated,
'Title': ft['feed'].entries[i].title,
'Link': ft['feed'].entries[i].link,
'Pubdate': ft['feed'].entries[i].updated,
'Size': tmpsz
})
else:
#this should work for all newznabs (nzb.su included)
#only difference is the size of the file between this and above (which is probably the same)
tmpsz = ft['feed'].entries[i].enclosures[0]
feeddata.append({
'Site': site,
'Title': ft['feed'].entries[i].title,
'Link': ft['feed'].entries[i].link,
'Pubdate': ft['feed'].entries[i].updated,
'Size': tmpsz['length']
})
# print ("Site: " + str(feeddata[i]['Site']))
# print ("Title: " + str(feeddata[i]['Title']))
# print ("Link: " + str(feeddata[i]['Link']))
# print ("pubdate: " + str(feeddata[i]['Pubdate']))
# print ("size: " + str(feeddata[i]['Size']))
#logger.fdebug("Site: " + str(feeddata[i]['Site']))
#logger.fdebug("Title: " + str(feeddata[i]['Title']))
#logger.fdebug("Link: " + str(feeddata[i]['Link']))
#logger.fdebug("pubdate: " + str(feeddata[i]['Pubdate']))
#logger.fdebug("size: " + str(feeddata[i]['Size']))
i+=1
logger.info(str(site) + ' : ' + str(i) + ' entries indexed.')

View file

@ -19,9 +19,6 @@ from __future__ import division
import mylar
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, prov_nzbx, notifiers, rsscheck
nzbsu_APIkey = mylar.NZBSU_APIKEY
dognzb_APIkey = mylar.DOGNZB_APIKEY
LOG = mylar.LOG_DIR
import lib.feedparser as feedparser
@ -589,9 +586,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
#if bb is not None: logger.fdebug("results: " + str(bb))
elif nzbprov != 'experimental':
if nzbprov == 'dognzb':
findurl = "http://dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
findurl = "http://dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030&apikey=" + str(mylar.DOGNZB_APIKEY)
elif nzbprov == 'nzb.su':
findurl = "https://nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
findurl = "https://nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030&apikey=" + str(mylar.NZBSU_APIKEY)
elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it.
if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/':