IMP: Added new Public Torrent providers to TPSE option (will use TPSE for searches, and both WWT and DEM to poll RSS Feeds from), FIX: Removed KAT as a provider, IMP: Added ability for Retry Issue option to scan through all previous snatches instead of just using the last option, FIX: Tried to fix problems with Wanted tab causing an error when opening (usually after a new install), IMP: Will now store the last time that Mylar retrieved a pullist with alt_pull =2 method and will check every 2 hours for new issues and/or issues being delayed, FIX: Fixed a notification error when doing one-off downloads from the pull-list, FIX:(#1348) Fixed an issue with escape characters when performing a mass update change to the comic location (loc_move setting), FIX:(#1350) When post-processing if filename didn't have a year present, and volume of series wasn't given would cause an error

This commit is contained in:
evilhero 2016-08-20 17:56:30 -04:00
parent a84069d8d2
commit a850e386d4
10 changed files with 389 additions and 240 deletions

View File

@ -572,7 +572,10 @@
</div>
<div class="config">
<div class="row checkbox left clearfix">
<input id="enable_kat" type="checkbox" name="enable_kat" value=1 ${config['enable_kat']} /><label>Enable KAT</label>
<input id="enable_tpse" title="Will Use TPSE for searches, Demonoid & WWT for RSS feeds" type="checkbox" name="enable_tpse" value=1 ${config['enable_tpse']} /><label>Enable Public Torrent Search</label>
<div align="left">
<small class="heading"><span style="float: left; margin-left: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Search: TPSE / RSS: DEM & WWT</small>
</div>
</div>
<div class="row checkbox left clearfix">
<input type="checkbox" id="enable_32p" onclick="initConfigCheckbox($(this));" name="enable_32p" value=1 ${config['enable_32p']} /><label>Enable 32P</label>

View File

@ -477,7 +477,7 @@ class PostProcessor(object):
logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.')
datematch = "False"
if datematch == "False" and any([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None']):
if datematch == "False" and any([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None', watch_issueyear is not None]):
#now we see if the issue year matches exactly to what we have within Mylar.
if int(watch_issueyear) == int(watchmatch['issue_year']):
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Issue Year of ' + str(watch_issueyear) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year']))

View File

@ -254,6 +254,7 @@ USE_BLACKHOLE = False
BLACKHOLE_DIR = None
PROVIDER_ORDER = None
TMP_PROV = None #to be used to compare provider being used against provider that actually found the result (ie. TPSE != WWT, != DEM)
NZBSU = False
NZBSU_UID = None
@ -371,9 +372,9 @@ SEEDBOX_PASS = None
SEEDBOX_WATCHDIR = None
ENABLE_TORRENT_SEARCH = 0
ENABLE_KAT = 0
KAT_PROXY = None
KAT_VERIFY = True
ENABLE_TPSE = 0
TPSE_PROXY = None
TPSE_VERIFY = True
ENABLE_32P = 0
MODE_32P = None #0 = legacymode, #1 = authmode
@ -466,10 +467,10 @@ def initialize():
ENABLE_TORZNAB, TORZNAB_NAME, TORZNAB_HOST, TORZNAB_APIKEY, TORZNAB_CATEGORY, TORZNAB_VERIFY, \
EXPERIMENTAL, ALTEXPERIMENTAL, USE_RTORRENT, RTORRENT_HOST, RTORRENT_USERNAME, RTORRENT_PASSWORD, RTORRENT_STARTONLOAD, RTORRENT_LABEL, RTORRENT_DIRECTORY, \
USE_UTORRENT, UTORRENT_HOST, UTORRENT_USERNAME, UTORRENT_PASSWORD, UTORRENT_LABEL, USE_TRANSMISSION, TRANSMISSION_HOST, TRANSMISSION_USERNAME, TRANSMISSION_PASSWORD, \
ENABLE_META, CMTAGGER_PATH, CBR2CBZ_ONLY, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, CT_SETTINGSPATH, CMTAG_START_YEAR_AS_VOLUME, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, \
ENABLE_META, CMTAGGER_PATH, CBR2CBZ_ONLY, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, CT_SETTINGSPATH, CMTAG_START_YEAR_AS_VOLUME, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, TMP_PROV, \
dbUpdateScheduler, searchScheduler, RSSScheduler, WeeklyScheduler, VersionScheduler, FolderMonitorScheduler, \
ALLOW_PACKS, ENABLE_TORRENTS, TORRENT_DOWNLOADER, MINSEEDS, USE_WATCHDIR, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_KAT, KAT_PROXY, KAT_VERIFY, ENABLE_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_TPSE, TPSE_PROXY, TPSE_VERIFY, ENABLE_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
@ -569,6 +570,7 @@ def initialize():
DUPECONSTRAINT = check_setting_str(CFG, 'General', 'dupeconstraint', 'filesize')
DDUMP = bool(check_setting_int(CFG, 'General', 'ddump', 0))
DUPLICATE_DUMP = check_setting_str(CFG, 'General', 'duplicate_dump', '')
PULL_REFRESH = check_setting_str(CFG, 'General', 'pull_refresh', '')
AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0))
AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 1))
COMIC_COVER_LOCAL = bool(check_setting_int(CFG, 'General', 'comic_cover_local', 0))
@ -700,9 +702,9 @@ def initialize():
SEEDBOX_WATCHDIR = check_setting_str(CFG, 'Torrents', 'seedbox_watchdir', '')
ENABLE_TORRENT_SEARCH = bool(check_setting_int(CFG, 'Torrents', 'enable_torrent_search', 0))
ENABLE_KAT = bool(check_setting_int(CFG, 'Torrents', 'enable_kat', 0))
KAT_PROXY = check_setting_str(CFG, 'Torrents', 'kat_proxy', '')
KAT_VERIFY = bool(check_setting_int(CFG, 'Torrents', 'kat_verify', 1))
ENABLE_TPSE = bool(check_setting_int(CFG, 'Torrents', 'enable_tpse', 0))
TPSE_PROXY = check_setting_str(CFG, 'Torrents', 'tpse_proxy', '')
TPSE_VERIFY = bool(check_setting_int(CFG, 'Torrents', 'tpse_verify', 1))
ENABLE_CBT = check_setting_str(CFG, 'Torrents', 'enable_cbt', '-1')
if ENABLE_CBT != '-1':
@ -804,8 +806,8 @@ def initialize():
if ENABLE_32P:
PR.append('32p')
PR_NUM +=1
if ENABLE_KAT:
PR.append('kat')
if ENABLE_TPSE:
PR.append('tpse')
PR_NUM +=1
@ -1362,6 +1364,7 @@ def config_write():
new_config['General']['dupeconstraint'] = DUPECONSTRAINT
new_config['General']['ddump'] = int(DDUMP)
new_config['General']['duplicate_dump'] = DUPLICATE_DUMP
new_config['General']['pull_refresh'] = PULL_REFRESH
new_config['General']['autowant_all'] = int(AUTOWANT_ALL)
new_config['General']['autowant_upcoming'] = int(AUTOWANT_UPCOMING)
new_config['General']['preferred_quality'] = int(PREFERRED_QUALITY)
@ -1458,9 +1461,9 @@ def config_write():
new_config['Torrents']['seedbox_watchdir'] = SEEDBOX_WATCHDIR
new_config['Torrents']['enable_torrent_search'] = int(ENABLE_TORRENT_SEARCH)
new_config['Torrents']['enable_kat'] = int(ENABLE_KAT)
new_config['Torrents']['kat_proxy'] = KAT_PROXY
new_config['Torrents']['kat_verify'] = KAT_VERIFY
new_config['Torrents']['enable_tpse'] = int(ENABLE_TPSE)
new_config['Torrents']['tpse_proxy'] = TPSE_PROXY
new_config['Torrents']['tpse_verify'] = TPSE_VERIFY
new_config['Torrents']['enable_32p'] = int(ENABLE_32P)
new_config['Torrents']['mode_32p'] = int(MODE_32P)
new_config['Torrents']['passkey_32p'] = PASSKEY_32P

View File

@ -161,7 +161,9 @@ def human2bytes(s):
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
letter = s[-1:].strip().upper()
num = s[:-1]
assert num.isdigit() and letter in symbols
#assert num.isdigit() and letter in symbols
#use below assert statement to handle sizes with decimal places
assert float(num) and letter in symbols
num = float(num)
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
@ -817,7 +819,7 @@ def updateComicLocation():
comlocation = re.sub(ddir, ccdir, dlc).strip()
#regenerate the new path location so that it's os.dependent now.
com_done = re.sub('%&', os.sep, comlocation).strip()
com_done = re.sub('%&', os.sep.encode('unicode-escape'), comlocation).strip()
comloc.append({"comlocation": com_done,
"origlocation": dl['ComicLocation'],
@ -1906,20 +1908,17 @@ def create_https_certificates(ssl_cert, ssl_key):
return True
def torrent_create(site, linkid, alt=None):
if site == '32P' or site == 'TOR':
if any([site == '32P', site == 'TOR']):
pass
elif site == 'KAT':
if 'http' in linkid:
if alt is None:
#if it's being passed here with the http alread in, then it's an old rssdb entry and we can take it as is.
url = linkid
else:
url = re.sub('http://torcache.net/','http://torrage.com/', linkid).strip()
elif site == 'TPSE':
if alt is None:
url = 'http://torrentproject.se/torrent/' + str(linkid) + '.torrent'
else:
if alt is None:
url = 'http://torcache.net/torrent/' + str(linkid) + '.torrent'
else:
url = 'http://torrage.com/' + str(linkid) + '.torrent'
url = 'http://torrentproject.se/torrent/' + str(linkid) + '.torrent'
elif site == 'DEM':
url = 'https://www.demonoid.pw/files/download/' + str(linkid) + '/'
elif site == 'WWT':
url = 'https://worldwidetorrents.eu/download.php'
return url

View File

@ -4,6 +4,7 @@ import os, sys
import re
import lib.feedparser as feedparser
import lib.requests as requests
import urlparse
import ftpsshup
import datetime
import gzip
@ -42,17 +43,24 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
if issue:
srchterm += '%20' + str(issue)
if mylar.KAT_PROXY:
if mylar.KAT_PROXY.endswith('/'):
kat_url = mylar.KAT_PROXY
if mylar.TPSE_PROXY:
if mylar.TPSE_PROXY.endswith('/'):
tpse_url = mylar.TPSE_PROXY
else:
kat_url = mylar.KAT_PROXY + '/'
tpse_url = mylar.TPSE_PROXY + '/'
else:
#switched to https.
kat_url = 'https://kat.cr/'
tpse_url = 'https://torrentproject.se/'
if pickfeed == 'KAT':
#we need to cycle through both categories (comics & other) - so we loop.
#this is for the public trackers included thus far in order to properly cycle throught the correct ones depending on the search request
# TPSE = search only
# DEM = rss feed
# WWT = rss feed
if pickfeed == 'TPSE-SEARCH':
pickfeed = '2'
loopit = 1
elif pickfeed == 'TPSE':
#we need to cycle through both DEM + WWT feeds
loopit = 2
else:
loopit = 1
@ -67,29 +75,29 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
feeddata = []
myDB = db.DBConnection()
torthekat = []
torthetpse = []
torthe32p = []
torinfo = {}
while (lp < loopit):
if lp == 0 and loopit == 2:
pickfeed = '2'
pickfeed = '6' #DEM RSS
elif lp == 1 and loopit == 2:
pickfeed = '5'
pickfeed = '999' #WWT RSS
feedtype = None
if pickfeed == "1" and mylar.ENABLE_32P: # 32pages new releases feed.
feed = 'https://32pag.es/feeds.php?feed=torrents_all&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey']
feedtype = ' from the New Releases RSS Feed for comics'
verify = bool(mylar.VERIFY_32P)
elif pickfeed == "2" and srchterm is not None: # kat.ph search
feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "3": # kat.ph rss feed
feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases RSS Feed for comics'
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "2" and srchterm is not None: # TP.SE search / RSS
feed = tpse_url + 'rss/' + str(srchterm) + '/'
verify = bool(mylar.TPSE_VERIFY)
elif pickfeed == "3": # TP.SE rss feed (3101 = comics category) / non-RSS
feed = tpse_url + '?hl=en&safe=off&num=50&start=0&orderby=best&s=&filter=3101'
feedtype = ' from the New Releases RSS Feed for comics from TP.SE'
verify = bool(mylar.TPSE_VERIFY)
elif pickfeed == "4": #32p search
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
@ -100,13 +108,16 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
lp=+1
continue
return
elif pickfeed == "5" and srchterm is not None: # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized)
feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Aother%20seeds%3A1/?rss=1"
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "6": # kat.ph rss feed (category:other so that we can get them quicker if need-be)
feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases for category Other RSS Feed that contain comics'
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "5" and srchterm is not None: # demonoid search / non-RSS
feed = 'https://www.demonoid.pw/' + "files/?category=10&subcategory=All&language=0&seeded=2&external=2&query=" + str(srchterm) + "&uid=0&out=rss"
verify = bool(mylar.TPSE_VERIFY)
elif pickfeed == "6": # demonoid rss feed
feed = 'https://www.demonoid.pw/rss/10.xml'
feedtype = ' from the New Releases RSS Feed from Demonoid'
verify = bool(mylar.TPSE_VERIFY)
elif pickfeed == "999": #WWT rss feed
feed = 'https://www.worldwidetorrents.eu/rss.php?cat=50'
feedtype = ' from the New Releases RSS Feed from WorldWideTorrents'
elif int(pickfeed) >= 7 and feedinfo is not None:
#personal 32P notification feeds.
#get the info here
@ -117,12 +128,20 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
logger.error('invalid pickfeed denoted...')
return
if pickfeed == "3" or pickfeed == "6" or pickfeed == "2" or pickfeed == "5":
picksite = 'KAT'
elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7:
if pickfeed == '2' or pickfeed == '3':
picksite = 'TPSE'
#if pickfeed == '2':
# feedme = tpse.
elif pickfeed == '5' or pickfeed == '6':
picksite = 'DEM'
#if pickfeed == '5':
# feedme = dem.
elif pickfeed == '999':
picksite = 'WWT'
elif pickfeed == '1' or pickfeed == '4' or int(pickfeed) > 7:
picksite = '32P'
if pickfeed != '4':
if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']):
payload = None
try:
@ -132,7 +151,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
return
feedme = feedparser.parse(r.content)
#logger.info(feedme) #<-- uncomment this to see what Mylar is retrieving from the feed
i = 0
@ -154,26 +173,70 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
'files': entry['num_files']
})
i += 1
elif pickfeed == '3':
#TP.SE RSS FEED (parse)
pass
elif pickfeed == '5':
#DEMONOID SEARCH RESULT (parse)
pass
elif pickfeed == "999":
logger.info('FEED: ' + feed)
try:
feedme = feedparser.parse(feed)
except Exception, e:
logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
return
#WWT / FEED
for entry in feedme.entries:
tmpsz = entry.description
tmpsz_st = tmpsz.find('Size:') + 6
if 'GB' in tmpsz[tmpsz_st:]:
szform = 'GB'
sz = 'G'
elif 'MB' in tmpsz[tmpsz_st:]:
szform = 'MB'
sz = 'M'
linkwwt = urlparse.parse_qs(urlparse.urlparse(entry.link).query)['id']
feeddata.append({
'site': picksite,
'title': entry.title,
'link': ''.join(linkwwt),
'pubdate': entry.updated,
'size': helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz)) #+ 2 is for the length of the MB/GB in the size.
})
i+=1
else:
for entry in feedme['entries']:
if any([pickfeed == "3", pickfeed == "6"]):
tmpsz = feedme.entries[i].enclosures[0]
#TP.SE RSS SEARCH RESULT
if pickfeed == "2":
tmpenc = feedme.entries[i].enclosures[0]
torthetpse.append({
'site': picksite,
'title': feedme.entries[i].title,
'link': re.sub('.torrent', '', str(urlparse.urlparse(tmpenc['url'])[2].rpartition('/')[2])).strip(),
'pubdate': feedme.entries[i].updated,
'size': tmpenc['length']
})
#DEMONOID / FEED
elif pickfeed == "6":
tmpsz = feedme.entries[i].description
tmpsz_st = tmpsz.find('Size:') + 6
if 'GB' in tmpsz[tmpsz_st:]:
szform = 'GB'
sz = 'G'
elif 'MB' in tmpsz[tmpsz_st:]:
szform = 'MB'
sz = 'M'
feeddata.append({
'site': picksite,
'title': feedme.entries[i].title,
'link': tmpsz['url'],
'link': str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[1]),
'pubdate': feedme.entries[i].updated,
'size': tmpsz['length']
})
elif any([pickfeed == "2", pickfeed == "5"]):
tmpsz = feedme.entries[i].enclosures[0]
torthekat.append({
'site': picksite,
'title': feedme.entries[i].title,
'link': tmpsz['url'],
'pubdate': feedme.entries[i].updated,
'size': tmpsz['length']
'size': helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz)),
})
#32p / FEEDS
elif pickfeed == "1" or int(pickfeed) > 7:
tmpdesc = feedme.entries[i].description
st_pub = feedme.entries[i].title.find('(')
@ -262,7 +325,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
if pickfeed == '4':
torinfo['entries'] = torthe32p
else:
torinfo['entries'] = torthekat
torinfo['entries'] = torthetpse
return torinfo
return
@ -380,13 +443,8 @@ def rssdbupdate(feeddata, i, type):
if type == 'torrent':
#we just store the torrent ID's now.
if dataval['site'] == '32P':
newlink = dataval['link']
else:
#store the hash/id from KAT
newlink = os.path.basename(re.sub('.torrent', '', dataval['link'][:dataval['link'].find('?title')]))
newVal = {"Link": newlink,
newVal = {"Link": dataval['link'],
"Pubdate": dataval['pubdate'],
"Site": dataval['site'],
"Size": dataval['size']}
@ -442,8 +500,8 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
if mylar.ENABLE_32P:
tresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site='32P'", [tsearch])
if mylar.ENABLE_KAT:
tresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [tsearch])
if mylar.ENABLE_TPSE:
tresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND (Site='DEM' OR Site='WWT')", [tsearch])
logger.fdebug('seriesname_alt:' + str(seriesname_alt))
if seriesname_alt is None or seriesname_alt == 'None':
@ -481,8 +539,8 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
AS_Alternate = '%' + AS_Alternate
if mylar.ENABLE_32P:
tresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site='32P'", [AS_Alternate])
if mylar.ENABLE_KAT:
tresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [AS_Alternate])
if mylar.ENABLE_TPSE:
tresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND (Site='DEM' OR Site='WWT')", [AS_Alternate])
if tresults is None:
logger.fdebug('torrent search returned no results for ' + seriesname)
@ -514,7 +572,7 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
#logger.fdebug('there are ' + str(len(torsplit)) + ' sections in this title')
i=0
if nzbprov is not None:
if nzbprov != tor['Site']:
if nzbprov != tor['Site'] and not any([mylar.ENABLE_TPSE, tor['Site'] != 'WWT', tor['Site'] != 'DEM']):
logger.fdebug('this is a result from ' + str(tor['Site']) + ', not the site I am looking for of ' + str(nzbprov))
continue
#0 holds the title/issue and format-type.
@ -724,7 +782,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
filename = re.sub(' ', '_', filename)
filename += "_" + str(issue) + "_" + str(seriesyear)
if linkit[-7:] != "torrent": # and site != "KAT":
if linkit[-7:] != "torrent":
filename += ".torrent"
if any([mylar.USE_UTORRENT, mylar.USE_RTORRENT, mylar.USE_TRANSMISSION]):
filepath = os.path.join(mylar.CACHE_DIR, filename)
@ -788,37 +846,62 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
headers = None #{'Accept-encoding': 'gzip',
# 'User-Agent': str(mylar.USER_AGENT)}
elif site == 'KAT':
#stfind = linkit.find('?')
#if stfind == -1:
# kat_referrer = helpers.torrent_create('KAT', linkit)
#else:
# kat_referrer = linkit[:stfind]
url = helpers.torrent_create('KAT', linkit)
elif site == 'TPSE':
url = helpers.torrent_create('TPSE', linkit)
if url.startswith('https'):
kat_referrer = 'https://torcache.net/'
tpse_referrer = 'https://torrentproject.se/'
else:
kat_referrer = 'http://torcache.net/'
#logger.fdebug('KAT Referer set to :' + kat_referrer)
tpse_referrer = 'http://torrentproject.se/'
headers = {'Accept-encoding': 'gzip',
'User-Agent': str(mylar.USER_AGENT),
'Referer': kat_referrer}
'Referer': tpse_referrer}
logger.fdebug('Grabbing torrent from url:' + str(url))
payload = None
verify = False
elif site == 'DEM':
url = helpers.torrent_create('DEM', linkit)
if url.startswith('https'):
dem_referrer = 'https://www.demonoid.pw/files/download/'
else:
dem_referrer = 'http://www.demonoid.pw/files/download/'
headers = {'Accept-encoding': 'gzip',
'User-Agent': str(mylar.USER_AGENT),
'Referer': dem_referrer}
logger.fdebug('Grabbing torrent from url:' + str(url))
payload = None
verify = False
elif site == 'WWT':
url = helpers.torrent_create('WWT', linkit)
if url.startswith('https'):
wwt_referrer = 'https://worldwidetorrents.eu'
else:
wwt_referrer = 'http://worldwidetorrent.eu'
headers = {'Accept-encoding': 'gzip',
'User-Agent': str(mylar.USER_AGENT),
'Referer': wwt_referrer}
logger.fdebug('Grabbing torrent [id:' + str(linkit) + '] from url:' + str(url))
payload = {'id': linkit}
verify = False
else:
headers = {'Accept-encoding': 'gzip',
'User-Agent': str(mylar.USER_AGENT)}
#'Referer': kat_referrer}
url = linkit #helpers.torrent_create('TOR', linkit)
url = linkit
payload = None
verify = False
@ -866,10 +949,10 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.info('blah: ' + str(r.status_code))
return "fail"
if site == 'KAT' and any([str(r.status_code) == '403', str(r.status_code) == '404']):
logger.warn('Unable to download from KAT [' + str(r.status_code) + ']')
if any([site == 'TPSE', site == 'DEM', site == 'WWT']) and any([str(r.status_code) == '403', str(r.status_code) == '404']):
logger.warn('Unable to download from ' + site + ' [' + str(r.status_code) + ']')
#retry with the alternate torrent link.
url = helpers.torrent_create('KAT', linkit, True)
url = helpers.torrent_create(site, linkit, True)
logger.fdebug('Trying alternate url: ' + str(url))
try:
r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
@ -881,7 +964,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
logger.warn('Unable to download torrent from ' + site + ' [Status Code returned: ' + str(r.status_code) + ']')
return "fail"
if site == 'KAT':
if any([site == 'TPSE', site == 'DEM', site == 'WWT']):
if r.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)

View File

@ -56,10 +56,10 @@ class tehMain():
#function for looping through nzbs/torrent feeds
if mylar.ENABLE_TORRENT_SEARCH:
logger.info('[RSS] Initiating Torrent RSS Check.')
if mylar.ENABLE_KAT:
logger.info('[RSS] Initiating Torrent RSS Feed Check on KAT.')
rsscheck.torrents(pickfeed='3')
rsscheck.torrents(pickfeed='6')
if mylar.ENABLE_TPSE:
logger.info('[RSS] Initiating Torrent RSS Feed Check on TorrentProject.')
#rsscheck.torrents(pickfeed='3') #TP.SE RSS Check (has to be page-parsed)
rsscheck.torrents(pickfeed='TPSE') #TPSE = DEM RSS Check + WWT RSS Check
if mylar.ENABLE_32P:
logger.info('[RSS] Initiating Torrent RSS Feed Check on 32P.')
if mylar.MODE_32P == 0:

View File

@ -90,8 +90,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
torprovider.append('32p')
torp+=1
#print torprovider[0]
if mylar.ENABLE_KAT:
torprovider.append('kat')
if mylar.ENABLE_TPSE:
torprovider.append('tpse')
torp+=1
if mylar.ENABLE_TORZNAB:
torprovider.append('torznab')
@ -207,8 +207,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
newznab_host = None
if prov_order[prov_count] == '32p':
searchprov = '32P'
elif prov_order[prov_count] == 'kat':
searchprov = 'KAT'
elif prov_order[prov_count] == 'tpse':
searchprov = 'TPSE'
elif prov_order[prov_count] == 'torznab':
searchprov = 'Torznab'
elif 'newznab' in prov_order[prov_count]:
@ -278,6 +278,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mylar.SNATCHED_HAVETOTAL and IssueID is not None:
logger.fdebug('Adding this to the HAVE total for the series.')
helpers.incr_snatched(ComicID)
if searchprov == 'TPSE' and mylar.TMP_PROV != searchprov:
searchprov = mylar.TMP_PROV
return findit, searchprov
else:
#if searchprov == '32P':
@ -497,7 +499,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#logger.fdebug('nzbprov: ' + str(nzbprov))
#logger.fdebug('comicid: ' + str(ComicID))
if RSS == "yes":
if nzbprov == '32P' or nzbprov == 'KAT':
if nzbprov == '32P' or nzbprov == 'TPSE':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [" + str(nzbprov) + "] RSS for " + ComicName + " : " + str(mod_isssearch))
bb = rsscheck.torrentdbsearch(ComicName, mod_isssearch, ComicID, nzbprov)
@ -530,10 +532,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else:
bb = "no results"
rss = "no"
elif nzbprov == 'KAT':
elif nzbprov == 'TPSE':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch))
bb = rsscheck.torrents(pickfeed='KAT', seriesname=cmname, issue=mod_isssearch)#cmname,issue=mod_isssearch)
logger.fdebug("Sending request to [TPSE] for " + str(cmname) + " : " + str(mod_isssearch))
bb = rsscheck.torrents(pickfeed='TPSE-SEARCH', seriesname=cmname, issue=mod_isssearch)#cmname,issue=mod_isssearch)
rss = "no"
#if bb is not None: logger.fdebug("results: " + str(bb))
elif nzbprov != 'experimental':
@ -665,7 +667,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
foundc = "no"
else:
for entry in bb['entries']:
#logger.info(entry) <--- uncomment this to see what the search result(s) are
#logger.info(entry) #<--- uncomment this to see what the search result(s) are
#brief match here against 32p since it returns the direct issue number
if nzbprov == '32P' and RSS == 'no':
if entry['pack'] == '0':
@ -704,13 +706,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if RSS == "yes":
if nzbprov == '32P':
comsize_b = None #entry['length']
elif nzbprov == 'TPSE':
comsize_b = entry['length']
else:
comsize_b = entry['length']
else:
#Experimental already has size constraints done.
if nzbprov == '32P':
comsize_b = entry['filesize'] #None
elif nzbprov == 'KAT':
elif nzbprov == 'TPSE':
comsize_b = entry['size']
elif nzbprov == 'experimental':
comsize_b = entry['length'] # we only want the size from the rss - the search/api has it already.
@ -719,9 +723,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
comsize_b = tmpsz['length']
#file restriction limitation here
#only works with KAT (done here) & 32P (done in rsscheck) & Experimental (has it embeded in search and rss checks)
if nzbprov == 'KAT' or (nzbprov == '32P' and RSS == 'no'):
if nzbprov == 'KAT':
#only works with TPSE (done here) & 32P (done in rsscheck) & Experimental (has it embeded in search and rss checks)
if nzbprov == 'TPSE' or (nzbprov == '32P' and RSS == 'no'):
if nzbprov == 'TPSE':
if 'cbr' in entry['title'].lower():
format_type = 'cbr'
elif 'cbz' in entry['title'].lower():
@ -1174,6 +1178,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
downloadit = False
#-------------------------------------fix this!
if nzbprov == 'TPSE' and any([entry['site'] == 'WWT', entry['site'] == 'DEM']):
if entry['site'] == 'WWT':
nzbprov = 'WWT'
else:
nzbprov = 'DEM'
logger.info(nzbprov)
logger.info('rss:' + RSS)
logger.info('allow_packs:' + str(allow_packs))
@ -1486,9 +1496,14 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
comicinfo = []
if IssueID is None:
cyear = ComicYear
else:
cyear = comyear
comicinfo.append({"ComicName": ComicName,
"IssueNumber": IssueNumber,
"comyear": comyear,
"comyear": cyear,
"pack": False,
"pack_numbers": None,
"modcomicname": modcomicname})
@ -1529,6 +1544,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
findloop+=1
if foundc == "yes":
if 'TPSE' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']):
tmpprov = re.sub('TPSE', nzbprov, tmpprov)
foundcomic.append("yes")
if comicinfo[0]['pack']:
issinfo = comicinfo[0]['pack_issuelist']
@ -1549,9 +1566,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
#send out the notifications for the snatch.
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
if IssueID is None:
cyear = ComicYear
else:
cyear = comyear
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov)
prov_count == 0
#break
mylar.TMP_PROV = nzbprov
return foundc
if foundc == "no":# and prov_count == 0:
@ -1649,7 +1670,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
else:
AllowPacks = False
mode = result['mode']
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_32P) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_TPSE or mylar.ENABLE_32P) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks)
if foundNZB == "yes":
#print ("found!")
@ -1693,7 +1714,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
AllowPacks = False
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_KAT or mylar.ENABLE_32P) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_TPSE or mylar.ENABLE_32P) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks)
if foundNZB == "yes":
logger.fdebug("I found " + comic['ComicName'] + ' #:' + str(result['Issue_Number']))
@ -1735,7 +1756,7 @@ def searchIssueIDList(issuelist):
else:
AllowPacks = False
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_32P or mylar.ENABLE_KAT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.ENABLE_32P or mylar.ENABLE_TPSE) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS or mylar.USE_BLACKHOLE):
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks)
if foundNZB == "yes":
#print ("found!")
@ -1801,7 +1822,7 @@ def nzbname_create(provider, title=None, info=None):
# it searches nzblog which contains the nzbname to pull out the IssueID and start the post-processing
# it is also used to keep the hashinfo for the nzbname in case it fails downloading, it will get put into the failed db for future exclusions
if mylar.USE_BLACKHOLE and provider != '32P' and provider != 'KAT':
if mylar.USE_BLACKHOLE and provider != '32P' and provider != 'TPSE':
if os.path.exists(mylar.BLACKHOLE_DIR):
#load in the required info to generate the nzb names when required (blackhole only)
ComicName = info[0]['ComicName']
@ -1824,7 +1845,7 @@ def nzbname_create(provider, title=None, info=None):
logger.fdebug("nzb name to be used for post-processing is : " + str(nzbname))
elif provider == '32P' or provider == 'KAT':
elif provider == '32P' or provider == 'TPSE':
#filesafe the name cause people are idiots when they post sometimes.
nzbname = re.sub('\s{2,}', ' ', helpers.filesafe(title)).strip()
#let's change all space to decimals for simplicity
@ -1893,9 +1914,18 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
nzbid = generate_id(nzbprov, link)
logger.fdebug('issues match!')
if 'TPSE' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']):
tmpprov = re.sub('TPSE', nzbprov, tmpprov)
if comicinfo[0]['pack'] == True:
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(tmpprov) + " within a pack containing issues: " + comicinfo[0]['pack_numbers'])
else:
if IssueID is None:
#one-off information
logger.fdebug("ComicName: " + ComicName)
logger.fdebug("Issue: " + str(IssueNumber))
logger.fdebug("Year: " + str(ComicYear))
logger.fdebug("IssueDate:" + str(IssueDate))
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov))
logger.fdebug("link given by: " + str(nzbprov))
@ -1919,7 +1949,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
elif check_the_fail == 'Good':
logger.fdebug('[FAILED_DOWNLOAD_CHECKER] This is not in the failed downloads list. Will continue with the download.')
if link and (nzbprov != 'KAT' and nzbprov != '32P' and nzbprov != 'Torznab'):
if link and all([nzbprov != 'TPSE', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != '32P', nzbprov != 'Torznab']):
#generate nzbid here.
@ -2069,7 +2099,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
#blackhole
sent_to = None
if mylar.USE_BLACKHOLE and nzbprov != '32P' and nzbprov != 'KAT' and nzbprov != 'Torznab':
if mylar.USE_BLACKHOLE and all([nzbprov != '32P', nzbprov != 'TPSE', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != 'Torznab']):
logger.fdebug("using blackhole directory at : " + str(mylar.BLACKHOLE_DIR))
if os.path.exists(mylar.BLACKHOLE_DIR):
#copy the nzb from nzbpath to blackhole dir.
@ -2083,8 +2113,8 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
sent_to = "your Blackhole Directory"
#end blackhole
#torrents (32P & KAT)
elif nzbprov == '32P' or nzbprov == 'KAT' or nzbprov == 'Torznab':
#torrents (32P & TPSE & DEM)
elif any([nzbprov == '32P', nzbprov == 'TPSE', nzbprov == 'WWT', nzbprov == 'DEM', nzbprov == 'Torznab']):
logger.fdebug("ComicName:" + ComicName)
logger.fdebug("link:" + link)
logger.fdebug("Torrent Provider:" + nzbprov)
@ -2301,6 +2331,8 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if directsend is None:
return return_val
else:
if 'TPSE' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']):
tmpprov = re.sub('TPSE', nzbprov, tmpprov)
#update the db on the snatch.
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
@ -2312,6 +2344,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
updater.nzblog(IssueID, nzbname, ComicName, SARC=None, IssueArcID=None, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname)
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
mylar.TMP_PROV = nzbprov
return
def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov):
@ -2350,7 +2383,7 @@ def FailedMark(IssueID, ComicID, id, nzbname, prov, oneoffinfo=None):
FailProcess = Failed.FailedProcessor(issueid=IssueID, comicid=ComicID, id=id, nzb_name=nzbname, prov=prov, oneoffinfo=oneoffinfo)
Markit = FailProcess.markFailed()
if prov == '32P' or prov == 'KAT': return "torrent-fail"
if prov == '32P' or prov == 'TPSE': return "torrent-fail"
else: return "downloadchk-fail"
def IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstword, hyphensplit, orignzb=None):
@ -2493,8 +2526,8 @@ def generate_id(nzbprov, link):
elif nzbprov == '32P':
#32P just has the torrent id stored.
nzbid = link
elif nzbprov == 'KAT':
if 'http' not in link:
elif any([nzbprov == 'TPSE', nzbprov == 'WWT', nzbprov == 'DEM']):
if 'http' not in link and any([nzbprov == 'WWT', nzbprov == 'DEM']):
nzbid = link
else:
#for users that already have the cache in place.

View File

@ -325,6 +325,8 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
#if it's at this point and the refresh is None, odds are very good that it's already up-to-date so let it flow thru
if mylar.PULL_REFRESH is None:
mylar.PULL_REFRESH = datetime.datetime.today()
#update the PULL_REFRESH
mylar.config_write()
logger.fdebug('pull_refresh: ' + str(mylar.PULL_REFRESH))
c_obj_date = mylar.PULL_REFRESH
#logger.fdebug('c_obj_date: ' + str(c_obj_date))
@ -382,6 +384,9 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
#logger.fdebug('hours: ' + str(hours) + ' -- forcecheck: ' + str(forcecheck))
if hours > 2 or forcecheck == 'yes':
logger.fdebug('weekinfo:' + str(weekinfo))
mylar.PULL_REFRESH = datetime.datetime.today()
#update the PULL_REFRESH
mylar.config_write()
chkitout = mylar.locg.locg(weeknumber=str(weekinfo['weeknumber']),year=str(weekinfo['year']))
logger.fdebug('linking ComicID to Pull-list to reflect status.')

View File

@ -1084,34 +1084,46 @@ class WebInterface(object):
logger.info('Unable to locate how issue was downloaded (name, provider). Cannot continue.')
return
providers_snatched = []
confirmedsnatch = False
for cs in chk_snatch:
if cs['Provider'] == 'CBT':
logger.info('Invalid provider attached to download (CBT). I cannot find this on 32P, so ignoring this result.')
if cs['Provider'] == 'CBT' or cs['Provider'] == 'KAT':
logger.info('Invalid provider attached to download (' + cs['Provider'] + '). I cannot find this on 32P, so ignoring this result.')
elif cs['Status'] == 'Snatched':
logger.info('Located snatched download:')
logger.info('--Referencing : ' + cs['Provider'] + ' @ ' + str(cs['DateAdded']))
Provider = cs['Provider']
providers_snatched.append({'Provider': cs['Provider'],
'DateAdded': cs['DateAdded']})
confirmedsnatch = True
break
elif (cs['Status'] == 'Post-Processed' or cs['Status'] == 'Downloaded') and confirmedsnatch == True:
logger.info('Issue has already been Snatched, Downloaded & Post-Processed.')
logger.info('You should be using Manual Search or Mark Wanted - not retry the same download.')
return
#return
try:
Provider_sql = '%' + Provider + '%'
chk_log = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? AND Provider like (?)', [IssueID, Provider_sql]).fetchone()
except:
logger.warn('Unable to locate provider reference for attempted Retry. Will see if I can just get the last attempted download.')
chk_log = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? and Provider != "CBT"', [IssueID]).fetchone()
if chk_log is None:
logger.info('Unable to locate provider information from nzblog - if you wiped the log, you have to search/download as per normal')
if len(providers_snatched) == 0:
return
nzbname = chk_log['NZBName']
id = chk_log['ID']
fullprov = chk_log['PROVIDER'] #the full newznab name if it exists will appear here as 'sitename (newznab)'
chk_logresults = []
for ps in sorted(providers_snatched, key=itemgetter('DateAdded', 'Provider'), reverse=True):
try:
Provider_sql = '%' + ps['Provider'] + '%'
chk_the_log = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? AND Provider like (?)', [IssueID, Provider_sql]).fetchone()
except:
logger.warn('Unable to locate provider reference for attempted Retry. Will see if I can just get the last attempted download.')
chk_the_log = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? and Provider != "CBT" and Provider != "KAT"', [IssueID]).fetchone()
if chk_the_log is None:
if len(providers_snatched) == 1:
logger.info('Unable to locate provider information ' + ps['Provider'] + ' from nzblog - if you wiped the log, you have to search/download as per normal')
return
else:
logger.info('Unable to locate provider information ' + ps['Provider'] + ' from nzblog. Checking additional providers that came back as being used to download this issue')
continue
else:
chk_logresults.append({'NZBName': chk_the_log['NZBName'],
'ID': chk_the_log['ID'],
'PROVIDER': chk_the_log['PROVIDER']})
if all([ComicYear is not None, ComicYear != 'None']) and all([IssueID is not None, IssueID != 'None']):
getYear = myDB.selectone('SELECT IssueDate, ReleaseDate FROM Issues WHERE IssueID=?', [IssueID]).fetchone()
@ -1127,100 +1139,108 @@ class WebInterface(object):
else:
ComicYear = getYear['IssueDate'][:4]
for chk_log in chk_logresults:
nzbname = chk_log['NZBName']
id = chk_log['ID']
fullprov = chk_log['PROVIDER'] #the full newznab name if it exists will appear here as 'sitename (newznab)'
#now we break it down by provider to recreate the link.
#torrents first.
if Provider == '32P' or Provider == 'KAT':
if not mylar.ENABLE_TORRENT_SEARCH:
logger.error('Torrent Providers are not enabled - unable to process retry request until provider is re-enabled.')
return
#now we break it down by provider to recreate the link.
#torrents first.
if any([fullprov == '32P', fullprov == 'TPSE', fullprov == 'WWT', fullprov == 'DEM']):
if not mylar.ENABLE_TORRENT_SEARCH:
logger.error('Torrent Providers are not enabled - unable to process retry request until provider is re-enabled.')
continue
if Provider == '32P':
if not mylar.ENABLE_32P:
logger.error('32P is not enabled - unable to process retry request until provider is re-enabled.')
return
link = str(id)
if fullprov == '32P':
if not mylar.ENABLE_32P:
logger.error('32P is not enabled - unable to process retry request until provider is re-enabled.')
continue
elif Provider == 'KAT':
if not mylar.ENABLE_KAT:
logger.error('KAT is not enabled - unable to process retry request until provider is re-enabled.')
return
link = 'http://torcache.net/torrent/' + str(id) + '.torrent'
elif any([fullprov == 'TPSE', fullprov == 'WWT', fullprov == 'DEM']):
if not mylar.ENABLE_TPSE:
logger.error('TPSE is not enabled - unable to process retry request until provider is re-enabled.')
continue
logger.fdebug("sending .torrent to watchdir.")
logger.fdebug("ComicName:" + ComicName)
logger.fdebug("link:" + str(link))
logger.fdebug("Torrent Provider:" + Provider)
logger.fdebug("sending .torrent to watchdir.")
logger.fdebug("ComicName:" + ComicName)
logger.fdebug("Torrent Provider:" + fullprov)
logger.fdebug("Torrent ID:" + str(id))
rcheck = mylar.rsscheck.torsend2client(ComicName, IssueNumber, ComicYear, link, Provider)
if rcheck == "fail":
logger.error("Unable to send torrent - check logs and settings.")
else:
annualize = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if annualize is None:
modcomicname = ComicName
rcheck = mylar.rsscheck.torsend2client(ComicName, IssueNumber, ComicYear, id, fullprov)
if rcheck == "fail":
logger.error("Unable to send torrent - check logs and settings.")
continue
else:
logger.info('Successfully retried issue.')
break
else:
modcomicname = ComicName + ' Annual'
annualize = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if annualize is None:
modcomicname = ComicName
else:
modcomicname = ComicName + ' Annual'
comicinfo = []
comicinfo.append({"ComicName": ComicName,
"IssueNumber": IssueNumber,
"comyear": ComicYear,
"modcomicname": modcomicname})
comicinfo = []
comicinfo.append({"ComicName": ComicName,
"IssueNumber": IssueNumber,
"comyear": ComicYear,
"modcomicname": modcomicname})
newznabinfo = None
newznabinfo = None
if Provider == 'nzb.su':
if not mylar.NZBSU:
logger.error('nzb.su is not enabled - unable to process retry request until provider is re-enabled.')
return
# http://nzb.su/getnzb/ea1befdeee0affd663735b2b09010140.nzb&i=<uid>&r=<passkey>
link = 'http://nzb.su/getnzb/' + str(id) + '.nzb&i=' + str(mylar.NZBSU_UID) + '&r=' + str(mylar.NZBSU_APIKEY)
logger.info('fetched via nzb.su. Retrying the send : ' + str(link))
elif Provider == 'dognzb':
if not mylar.DOGNZB:
logger.error('Dognzb is not enabled - unable to process retry request until provider is re-enabled.')
return
# https://dognzb.cr/fetch/5931874bf7381b274f647712b796f0ac/<passkey>
link = 'https://dognzb.cr/fetch/' + str(id) + '/' + str(mylar.DOGNZB_APIKEY)
logger.info('fetched via dognzb. Retrying the send : ' + str(link))
elif Provider == 'experimental':
if not mylar.EXPERIMENTAL:
logger.error('Experimental is not enabled - unable to process retry request until provider is re-enabled.')
return
# http://nzbindex.nl/download/110818178
link = 'http://nzbindex.nl/download/' + str(id)
logger.info('fetched via experimental. Retrying the send : ' + str(link))
elif 'newznab' in Provider:
if not mylar.NEWZNAB:
logger.error('Newznabs are not enabled - unable to process retry request until provider is re-enabled.')
return
if Provider == 'nzb.su':
if not mylar.NZBSU:
logger.error('nzb.su is not enabled - unable to process retry request until provider is re-enabled.')
continue
# http://nzb.su/getnzb/ea1befdeee0affd663735b2b09010140.nzb&i=<uid>&r=<passkey>
link = 'http://nzb.su/getnzb/' + str(id) + '.nzb&i=' + str(mylar.NZBSU_UID) + '&r=' + str(mylar.NZBSU_APIKEY)
logger.info('fetched via nzb.su. Retrying the send : ' + str(link))
elif Provider == 'dognzb':
if not mylar.DOGNZB:
logger.error('Dognzb is not enabled - unable to process retry request until provider is re-enabled.')
continue
# https://dognzb.cr/fetch/5931874bf7381b274f647712b796f0ac/<passkey>
link = 'https://dognzb.cr/fetch/' + str(id) + '/' + str(mylar.DOGNZB_APIKEY)
logger.info('fetched via dognzb. Retrying the send : ' + str(link))
elif Provider == 'experimental':
if not mylar.EXPERIMENTAL:
logger.error('Experimental is not enabled - unable to process retry request until provider is re-enabled.')
continue
# http://nzbindex.nl/download/110818178
link = 'http://nzbindex.nl/download/' + str(id)
logger.info('fetched via experimental. Retrying the send : ' + str(link))
elif 'newznab' in Provider:
if not mylar.NEWZNAB:
logger.error('Newznabs are not enabled - unable to process retry request until provider is re-enabled.')
continue
# http://192.168.2.2/getnzb/4323f9c567c260e3d9fc48e09462946c.nzb&i=<uid>&r=<passkey>
# trickier - we have to scroll through all the newznabs until we find a match.
logger.info('fetched via newnzab. Retrying the send.')
m = re.findall('[^()]+', fullprov)
tmpprov = m[0].strip()
# http://192.168.2.2/getnzb/4323f9c567c260e3d9fc48e09462946c.nzb&i=<uid>&r=<passkey>
# trickier - we have to scroll through all the newznabs until we find a match.
logger.info('fetched via newnzab. Retrying the send.')
m = re.findall('[^()]+', fullprov)
tmpprov = m[0].strip()
for newznab_info in mylar.EXTRA_NEWZNABS:
if tmpprov.lower() in newznab_info[0].lower():
if (newznab_info[5] == '1' or newznab_info[5] == 1):
if newznab_info[1].endswith('/'):
newznab_host = newznab_info[1]
for newznab_info in mylar.EXTRA_NEWZNABS:
if tmpprov.lower() in newznab_info[0].lower():
if (newznab_info[5] == '1' or newznab_info[5] == 1):
if newznab_info[1].endswith('/'):
newznab_host = newznab_info[1]
else:
newznab_host = newznab_info[1] + '/'
newznab_api = newznab_info[3]
newznab_uid = newznab_info[4]
link = str(newznab_host) + 'getnzb/' + str(id) + '.nzb&i=' + str(newznab_uid) + '&r=' + str(newznab_api)
logger.info('newznab detected as : ' + str(newznab_info[0]) + ' @ ' + str(newznab_host))
logger.info('link : ' + str(link))
newznabinfo = (newznab_info[0], newznab_info[1], newznab_info[2], newznab_info[3], newznab_info[4])
break
else:
newznab_host = newznab_info[1] + '/'
newznab_api = newznab_info[3]
newznab_uid = newznab_info[4]
link = str(newznab_host) + 'getnzb/' + str(id) + '.nzb&i=' + str(newznab_uid) + '&r=' + str(newznab_api)
logger.info('newznab detected as : ' + str(newznab_info[0]) + ' @ ' + str(newznab_host))
logger.info('link : ' + str(link))
newznabinfo = (newznab_info[0], newznab_info[1], newznab_info[2], newznab_info[3], newznab_info[4])
break
else:
logger.error(str(newznab_info[0]) + ' is not enabled - unable to process retry request until provider is re-enabled.')
return
logger.error(str(newznab_info[0]) + ' is not enabled - unable to process retry request until provider is re-enabled.')
continue
sendit = search.searcher(Provider, nzbname, comicinfo, link=link, IssueID=IssueID, ComicID=ComicID, tmpprov=fullprov, directsend=True, newznab=newznabinfo)
sendit = search.searcher(Provider, nzbname, comicinfo, link=link, IssueID=IssueID, ComicID=ComicID, tmpprov=fullprov, directsend=True, newznab=newznabinfo)
break
return
retryissue.exposed = True
def queueit(self, **kwargs):
@ -1815,6 +1835,7 @@ class WebInterface(object):
mylar.WANTED_TAB_OFF = False
try:
ab = upc['weeknumber']
bc = upc['year']
except TypeError:
logger.warn('Weekly Pull hasn\'t finished being generated as of yet (or has yet to initialize). Try to wait a few seconds more to accomodate processing.')
mylar.WANTED_TAB_OFF = True
@ -2062,7 +2083,7 @@ class WebInterface(object):
myDB = db.DBConnection()
failedlist = myDB.select('SELECT * from Failed')
for f in failedlist:
if f['Provider'] == 'KAT': #if any([f['Provider'] == 'KAT', f['Provider'] == '32P']):
if f['Provider'] == 'TPSE': #if any([f['Provider'] == 'TPSE', f['Provider'] == '32P']):
link = helpers.torrent_create(f['Provider'], f['ID'])
else:
link = f['ID']
@ -3314,7 +3335,6 @@ class WebInterface(object):
for RID in RemoveIDS:
newlist = [k for k in comiclist if k['ComicID'] != RID]
comiclist = newlist
logger.info('newlist: ' + str(newlist))
for cl in comiclist:
ComicName = cl['ComicName']
@ -3837,7 +3857,7 @@ class WebInterface(object):
"seedbox_user": mylar.SEEDBOX_USER,
"seedbox_pass": mylar.SEEDBOX_PASS,
"enable_torrent_search": helpers.checked(mylar.ENABLE_TORRENT_SEARCH),
"enable_kat": helpers.checked(mylar.ENABLE_KAT),
"enable_tpse": helpers.checked(mylar.ENABLE_TPSE),
"enable_32p": helpers.checked(mylar.ENABLE_32P),
"legacymode_32p": helpers.radio(mylar.MODE_32P, 0),
"authmode_32p": helpers.radio(mylar.MODE_32P, 1),
@ -4126,7 +4146,7 @@ class WebInterface(object):
nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None, nzbget_directory=None,
usenet_retention=None, nzbsu=0, nzbsu_uid=None, nzbsu_apikey=None, nzbsu_verify=0, dognzb=0, dognzb_apikey=None, dognzb_verify=0, newznab=0, newznab_host=None, newznab_name=None, newznab_verify=0, newznab_apikey=None, newznab_uid=None, newznab_enabled=0,
enable_torznab=0, torznab_name=None, torznab_host=None, torznab_apikey=None, torznab_category=None, experimental=0, check_folder=None, enable_check_folder=0,
enable_meta=0, cbr2cbz_only=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, ct_cbz_overwrite=0, unrar_cmd=None, enable_rss=0, rss_checkinterval=None, failed_download_handling=0, failed_auto=0, enable_torrent_search=0, enable_kat=0, enable_32p=0, mode_32p=0, rssfeed_32p=None, passkey_32p=None, username_32p=None, password_32p=None, snatchedtorrent_notify=0,
enable_meta=0, cbr2cbz_only=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, ct_cbz_overwrite=0, unrar_cmd=None, enable_rss=0, rss_checkinterval=None, failed_download_handling=0, failed_auto=0, enable_torrent_search=0, enable_tpse=0, enable_32p=0, mode_32p=0, rssfeed_32p=None, passkey_32p=None, username_32p=None, password_32p=None, snatchedtorrent_notify=0,
enable_torrents=0, minseeds=0, local_watchdir=None, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None,
prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None,
pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0, torrent_downloader=0, torrent_local=0, torrent_seedbox=0, utorrent_host=None, utorrent_username=None, utorrent_password=None, utorrent_label=None,
@ -4218,7 +4238,7 @@ class WebInterface(object):
mylar.TRANSMISSION_USERNAME = transmission_username
mylar.TRANSMISSION_PASSWORD = transmission_password
mylar.ENABLE_TORRENT_SEARCH = int(enable_torrent_search)
mylar.ENABLE_KAT = int(enable_kat)
mylar.ENABLE_TPSE = int(enable_tpse)
mylar.ENABLE_32P = int(enable_32p)
mylar.MODE_32P = int(mode_32p)
mylar.RSSFEED_32P = rssfeed_32p

View File

@ -426,7 +426,10 @@ def pullit(forcecheck=None):
if pulldate == '00000000' and mylar.ALT_PULL != 2:
pulldate = shipdate
weektmp = datetime.date(*(int(s) for s in pulldate.split('-')))
try:
weektmp = datetime.date(*(int(s) for s in pulldate.split('-')))
except TypeError:
weektmp = datetime.date.today()
weeknumber = weektmp.strftime("%U")
logger.info(u"Populating the NEW Weekly Pull list into Mylar for week " + str(weeknumber))