FIX: Added addtional newsgroup to Experimental option for some newer results (0-day)

This commit is contained in:
evilhero 2013-09-20 01:17:04 -04:00
parent e86b5ce00f
commit 85db439550
2 changed files with 50 additions and 37 deletions

View File

@ -196,6 +196,7 @@ RAW_PASSWORD = None
RAW_GROUPS = None RAW_GROUPS = None
EXPERIMENTAL = False EXPERIMENTAL = False
ALTEXPERIMENTAL = False
COMIC_LOCATION = None COMIC_LOCATION = None
QUAL_ALTVERS = None QUAL_ALTVERS = None
@ -313,7 +314,7 @@ def initialize():
LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_DIRECTORY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \ LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_DIRECTORY, BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBSU, NZBSU_UID, NZBSU_APIKEY, DOGNZB, DOGNZB_UID, DOGNZB_APIKEY, NZBX,\ USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBSU, NZBSU_UID, NZBSU_APIKEY, DOGNZB, DOGNZB_UID, DOGNZB_APIKEY, NZBX,\
NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \ NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \ RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, ALTEXPERIMENTAL, \
ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, \ ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, \
ENABLE_TORRENTS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \ ENABLE_TORRENTS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, ENABLE_TORRENT_SEARCH, ENABLE_KAT, ENABLE_CBT, CBT_PASSKEY, \ ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, ENABLE_TORRENT_SEARCH, ENABLE_KAT, ENABLE_CBT, CBT_PASSKEY, \
@ -525,7 +526,7 @@ def initialize():
RAW_GROUPS = check_setting_str(CFG, 'Raw', 'raw_groups', '') RAW_GROUPS = check_setting_str(CFG, 'Raw', 'raw_groups', '')
EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0)) EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0))
ALTEXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'altexperimental', 1))
NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0)) NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0))
if CONFIG_VERSION: if CONFIG_VERSION:
@ -920,6 +921,7 @@ def config_write():
new_config['Experimental'] = {} new_config['Experimental'] = {}
new_config['Experimental']['experimental'] = int(EXPERIMENTAL) new_config['Experimental']['experimental'] = int(EXPERIMENTAL)
new_config['Experimental']['altexperimental'] = int(ALTEXPERIMENTAL)
new_config['Newznab'] = {} new_config['Newznab'] = {}
new_config['Newznab']['newznab'] = int(NEWZNAB) new_config['Newznab']['newznab'] = int(NEWZNAB)

View File

@ -31,6 +31,9 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion):
regexName = searchName.replace(" ", '((\\s)?[-:])?(\\s)?') regexName = searchName.replace(" ", '((\\s)?[-:])?(\\s)?')
#logger.fdebug('searchName:' + searchName)
#logger.fdebug('regexName:' + regexName)
if mylar.USE_MINSIZE: if mylar.USE_MINSIZE:
size_constraints = "minsize=" + str(mylar.MINSIZE) size_constraints = "minsize=" + str(mylar.MINSIZE)
@ -43,51 +46,58 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion):
if mylar.USENET_RETENTION != None: if mylar.USENET_RETENTION != None:
max_age = "&age=" + str(mylar.USENET_RETENTION) max_age = "&age=" + str(mylar.USENET_RETENTION)
feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + str(max_age) + "&dq=%s&max=50&more=1" %joinSearch) feeds = []
feeds.append(feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + str(max_age) + "&dq=%s&max=50&more=1" %joinSearch))
if mylar.ALTEXPERIMENTAL:
feeds.append(feedparser.parse("http://nzbindex.nl/rss/?dq=%s&g[]=41&g[]=510&sort=agedesc&hidespam=0&max=&more=1" %joinSearch))
totNum = len(feed.entries)
keyPair = {}
regList = []
entries = [] entries = []
mres = {} mres = {}
countUp = 0 tallycount = 0
logger.fdebug(str(totNum) + " results") for feed in feeds:
totNum = len(feed.entries)
tallycount += len(feed.entries)
while countUp < totNum: keyPair = {}
urlParse = feed.entries[countUp].enclosures[0] regList = []
#keyPair[feed.entries[countUp].title] = feed.entries[countUp].link countUp = 0
keyPair[feed.entries[countUp].title] = urlParse["href"]
countUp=countUp+1 logger.fdebug(str(totNum) + " results")
while countUp < totNum:
urlParse = feed.entries[countUp].enclosures[0]
#keyPair[feed.entries[countUp].title] = feed.entries[countUp].link
keyPair[feed.entries[countUp].title] = urlParse["href"]
countUp=countUp+1
# thanks to SpammyHagar for spending the time in compiling these regEx's! # thanks to SpammyHagar for spending the time in compiling these regEx's!
regExTest="" regExTest=""
regEx = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, searchYear) regEx = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, searchYear)
regExOne = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, searchYear) regExOne = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, searchYear)
#Sometimes comics aren't actually published the same year comicVine says - trying to adjust for these cases #Sometimes comics aren't actually published the same year comicVine says - trying to adjust for these cases
regExTwo = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1) regExTwo = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1)
regExThree = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1) regExThree = "(%s\\s*(0)?(0)?%s\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1)
regExFour = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1) regExFour = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)+1)
regExFive = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1) regExFive = "(%s\\s*(0)?(0)?%s\\s*\\(.*?\\)\\s*\\(%s\\))" %(regexName, searchIssue, int(searchYear)-1)
regexList=[regEx, regExOne, regExTwo, regExThree, regExFour, regExFive] regexList=[regEx, regExOne, regExTwo, regExThree, regExFour, regExFive]
except_list=['releases', 'gold line', 'distribution', '0-day', '0 day'] except_list=['releases', 'gold line', 'distribution', '0-day', '0 day']
for title, link in keyPair.items(): for title, link in keyPair.items():
#logger.fdebug("titlesplit: " + str(title.split("\""))) #logger.fdebug("titlesplit: " + str(title.split("\"")))
splitTitle = title.split("\"") splitTitle = title.split("\"")
for subs in splitTitle: for subs in splitTitle:
logger.fdebug(subs) logger.fdebug(subs)
regExCount = 0 regExCount = 0
if len(subs) > 10 and not any(d in subs.lower() for d in except_list): if len(subs) > 10 and not any(d in subs.lower() for d in except_list):
#Looping through dictionary to run each regEx - length + regex is determined by regexList up top. #Looping through dictionary to run each regEx - length + regex is determined by regexList up top.
# while regExCount < len(regexList): # while regExCount < len(regexList):
# regExTest = re.findall(regexList[regExCount], subs, flags=re.IGNORECASE) # regExTest = re.findall(regexList[regExCount], subs, flags=re.IGNORECASE)
@ -98,13 +108,14 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion):
# 'title': subs, # 'title': subs,
# 'link': str(link) # 'link': str(link)
# }) # })
entries.append({ entries.append({
'title': subs, 'title': subs,
'link': str(link) 'link': str(link)
}) })
if len(entries) >= 1: # if len(entries) >= 1:
if tallycount >= 1:
mres['entries'] = entries mres['entries'] = entries
return mres return mres
# print("Title: "+regList[0]) # print("Title: "+regList[0])