Consider Usenet Retention in searches

All providers should now consider usenet retention. I've tested newznab,
nzbx and "experimental", nzb.su and dognzb are newznab based so should
work but I can't test them.
This commit is contained in:
TheLabRatt 2013-03-01 12:58:01 +00:00
parent 8ce830d703
commit 12b195e2f9
3 changed files with 903 additions and 885 deletions

View File

@ -29,8 +29,10 @@ def Startit(searchName, searchIssue, searchYear):
if mylar.USE_MAXSIZE: if mylar.USE_MAXSIZE:
size_constraints = size_constraints + "&maxsize=" + str(mylar.MAXSIZE) size_constraints = size_constraints + "&maxsize=" + str(mylar.MAXSIZE)
if mylar.USENET_RETENTION != None:
max_age = "&age=" + str(mylar.USENET_RETENTION)
feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + "&dq=%s&max=25&more=1" %joinSearch) feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + str(max_age) + "&dq=%s&max=25&more=1" %joinSearch)
totNum = len(feed.entries) totNum = len(feed.entries)

View File

@ -1,7 +1,7 @@
import lib.simplejson as json import lib.simplejson as json
import mylar import mylar
from mylar import logger, helpers from mylar import logger, helpers
import urllib2 import urllib2, datetime
def searchit(cm): def searchit(cm):
@ -39,18 +39,24 @@ def searchit(cm):
url = item['nzb'] url = item['nzb']
title = item['name'] title = item['name']
size = item['size'] size = item['size']
nzbdate = datetime.datetime.fromtimestamp(item['postdate'])
nzbage = abs(( datetime.datetime.now()-nzbdate).days )
if nzbage <= mylar.USENET_RETENTION:
entries.append({ entries.append({
'title': str(title), 'title': str(title),
'link': str(url) 'link': str(url)
}) })
#resultlist.append((title, size, url, provider))
logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size))) logger.fdebug('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
else:
logger.fdebug('%s outside usenet retention: %s days.' % (title, nzbage))
#resultlist.append((title, size, url, provider))
except Exception, e: except Exception, e:
logger.error(u"An unknown error occurred trying to parse the feed: %s" % e) logger.error(u"An unknown error occurred trying to parse the feed: %s" % e)
if len(entries) >= 1: if len(entries) >= 1:
mres['entries'] = entries mres['entries'] = entries
else:
mres = "no results"
return mres return mres

View File

@ -97,6 +97,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
# -------- # --------
providercount = int(nzbp + newznabs) providercount = int(nzbp + newznabs)
logger.fdebug("there are : " + str(providercount) + " search providers you have selected.") logger.fdebug("there are : " + str(providercount) + " search providers you have selected.")
logger.fdebug("Usenet Retention : " + str(mylar.USENET_RETENTION) + " days")
nzbpr = nzbp-1 nzbpr = nzbp-1
findit = 'no' findit = 'no'
@ -343,9 +344,18 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
logger.fdebug("search-url: " + str(findurl)) logger.fdebug("search-url: " + str(findurl))
elif nzbprov == 'nzbx': elif nzbprov == 'nzbx':
bb = prov_nzbx.searchit(comsearch[findloop]) bb = prov_nzbx.searchit(comsearch[findloop])
if len(bb) == 0:
bb = "no results"
# If USENET_RETENTION is set, honour it
# For newznab sites, that means appending "&maxage=<whatever>" on the URL
if nzbprov != 'nzbx': if nzbprov != 'nzbx':
# Add a user-agent # Add a user-agent
#print ("user-agent:" + str(mylar.USER_AGENT)) #print ("user-agent:" + str(mylar.USER_AGENT))
if mylar.USENET_RETENTION != None:
findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION)
request = urllib2.Request(findurl) request = urllib2.Request(findurl)
request.add_header('User-Agent', str(mylar.USER_AGENT)) request.add_header('User-Agent', str(mylar.USER_AGENT))
opener = urllib2.build_opener() opener = urllib2.build_opener()