IMP: Added 0-day pack support for 32p (definitely beta quality), FIX: Reconfigured post-process api to allow for direct post-processing of issues/packs directly to either issueid/comicid as appropriate, FIX: Removed some unnecessary logging, IMP: Added better error/logging handling for 32p authentication (will now return proper error instead of a generic message), FIX: auto-snatch environment variables set to use temporary values instead of reusing incorrect globals

This commit is contained in:
evilhero 2018-03-22 10:00:34 -04:00
parent 47fb2db6ed
commit b0d2ab5430
12 changed files with 559 additions and 417 deletions

View File

@ -15,6 +15,9 @@
-->
<a id="menu_link_scan" class="button">Download</a>
<a href="#" id="menu_link_refresh" onclick="doAjaxCall('pullSearch?week=${weekinfo['weeknumber']}&year=${weekinfo['year']}',$(this),'table')" data-success="Submitted background search request for new pull issues">Manually check for issues</a>
%if all([mylar.CONFIG.ENABLE_TORRENT_SEARCH is True, mylar.CONFIG.ENABLE_32P is True, mylar.CONFIG.MODE_32P is True]):
<a href="#" id="menu_link_refresh" onclick="doAjaxCall('download_0day?week=${weekinfo['midweek']}',$(this),'table')" data-success="Submitted background search request for 0-day pack for this week">Download 0-Day Pack</a>
%endif
</div>
</div>
<a href="home" class="back">&laquo; Back to overview</a>

View File

@ -44,7 +44,7 @@ class PostProcessor(object):
FOLDER_NAME = 2
FILE_NAME = 3
def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None):
def __init__(self, nzb_name, nzb_folder, issueid=None, module=None, queue=None, comicid=None, apicall=False):
"""
Creates a new post processor with the given file path and optionally an NZB name.
@ -59,9 +59,14 @@ class PostProcessor(object):
else:
self.module = '[POST-PROCESSING]'
if queue:
if queue:
self.queue = queue
if apicall is True:
self.apicall = True
else:
self.apicall = False
if mylar.CONFIG.FILE_OPTS == 'copy':
self.fileop = shutil.copy
else:
@ -357,8 +362,8 @@ class PostProcessor(object):
self.oneoffinlist = False
if any([self.nzb_name == 'Manual Run', self.issueid is not None, self.comicid is not None]):
if all([self.issueid is None, self.comicid is not None]) or self.nzb_name == 'Manual Run':
if any([self.nzb_name == 'Manual Run', self.issueid is not None, self.comicid is not None, self.apicall is True]):
if all([self.issueid is None, self.comicid is not None, self.apicall is True]) or self.nzb_name == 'Manual Run':
if self.comicid is not None:
logger.fdebug('%s Now post-processing pack directly against ComicID: %s' % (module, self.comicid))
else:
@ -669,13 +674,13 @@ class PostProcessor(object):
xmld2 = xmld.dynamic_replace(helpers.conversion(x['Series']))
xfile = xmld2['mod_seriesname'].lower()
if re.sub('\|', '', xseries).strip() == re.sub('\|', '', xfile).strip():
logger.fdebug(module + '[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (x['ComicName'], x['ComicID']))
#logger.fdebug(module + '[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (x['ComicName'], x['ComicID']))
mlp.append(x)
else:
pass
if len(mlp) == 1:
manual_list = mlp
logger.fdebug(module + '[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series')
#logger.fdebug(module + '[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series')
#we should setup for manual post-processing of story-arc issues here
#we can also search by ComicID to just grab those particular arcs as an alternative as well (not done)
@ -984,74 +989,77 @@ class PostProcessor(object):
else:
#one-off manual pp'd of torrents
oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID and s.Hash is not NULL inner join weekly as w on s.IssueID = w.IssueID WHERE (s.Provider ='32P' or s.Provider='TPSE' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff == 1;")
if oneofflist is None:
logger.fdebug(module + ' No one-off\'s have ever been snatched using mylar.')
if all(['0-Day Week' in self.nzb_name, mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is True]):
pass
else:
oneoffvals = []
oneoff_issuelist = []
nm = 0
for ofl in oneofflist:
oneoffvals.append({"ComicName": ofl['ComicName'],
"ComicPublisher": ofl['PUBLISHER'],
"Issue_Number": ofl['Issue_Number'],
"AlternateSearch": None,
"ComicID": ofl['ComicID'],
"IssueID": ofl['IssueID'],
"WatchValues": {"SeriesYear": None,
"LatestDate": None,
"ComicVersion": None,
"Publisher": ofl['PUBLISHER'],
"Total": None,
"ComicID": ofl['ComicID'],
"IsArc": False}})
oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID and s.Hash is not NULL inner join weekly as w on s.IssueID = w.IssueID WHERE (s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;")
if oneofflist is None:
logger.fdebug(module + ' No one-off\'s have ever been snatched using mylar.')
else:
oneoffvals = []
oneoff_issuelist = []
nm = 0
for ofl in oneofflist:
oneoffvals.append({"ComicName": ofl['ComicName'],
"ComicPublisher": ofl['PUBLISHER'],
"Issue_Number": ofl['Issue_Number'],
"AlternateSearch": None,
"ComicID": ofl['ComicID'],
"IssueID": ofl['IssueID'],
"WatchValues": {"SeriesYear": None,
"LatestDate": None,
"ComicVersion": None,
"Publisher": ofl['PUBLISHER'],
"Total": None,
"ComicID": ofl['ComicID'],
"IsArc": False}})
for fl in filelist['comiclist']:
#logger.info('fl: %s' % fl)
for ofv in oneoffvals:
#logger.info('ofv: %s' % ofv)
wm = filechecker.FileChecker(watchcomic=ofv['ComicName'], Publisher=ofv['ComicPublisher'], AlternateSearch=None, manual=ofv['WatchValues'])
watchmatch = wm.matchIT(fl)
if watchmatch['process_status'] == 'fail':
nm+=1
continue
else:
temploc= watchmatch['justthedigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
logger.info('watchmatch: %s' % watchmatch)
if 'annual' in temploc.lower():
biannchk = re.sub('-', '', temploc.lower()).strip()
if 'biannual' in biannchk:
logger.fdebug(module + ' Bi-Annual detected.')
fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip())
for fl in filelist['comiclist']:
#logger.info('fl: %s' % fl)
for ofv in oneoffvals:
#logger.info('ofv: %s' % ofv)
wm = filechecker.FileChecker(watchcomic=ofv['ComicName'], Publisher=ofv['ComicPublisher'], AlternateSearch=None, manual=ofv['WatchValues'])
watchmatch = wm.matchIT(fl)
if watchmatch['process_status'] == 'fail':
nm+=1
continue
else:
fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip())
logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(ofv['ComicID']))
annchk = "yes"
else:
fcdigit = helpers.issuedigits(temploc)
temploc= watchmatch['justthedigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
if fcdigit == helpers.issuedigits(ofv['Issue_Number']):
if watchmatch['sub']:
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
logger.info('watchmatch: %s' % watchmatch)
if 'annual' in temploc.lower():
biannchk = re.sub('-', '', temploc.lower()).strip()
if 'biannual' in biannchk:
logger.fdebug(module + ' Bi-Annual detected.')
fcdigit = helpers.issuedigits(re.sub('biannual', '', str(biannchk)).strip())
else:
fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip())
logger.fdebug(module + ' Annual detected [' + str(fcdigit) +']. ComicID assigned as ' + str(ofv['ComicID']))
annchk = "yes"
else:
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
oneoff_issuelist.append({"ComicLocation": clocation,
"ComicID": ofv['ComicID'],
"IssueID": ofv['IssueID'],
"IssueNumber": ofv['Issue_Number'],
"ComicName": ofv['ComicName'],
"One-Off": True})
self.oneoffinlist = True
else:
logger.fdebug(module + ' No corresponding issue # in dB found for %s # %s' % (ofv['ComicName'],ofv['Issue_Number']))
continue
fcdigit = helpers.issuedigits(temploc)
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + ofv['ComicName'] + '-' + ofv['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
break
if fcdigit == helpers.issuedigits(ofv['Issue_Number']):
if watchmatch['sub']:
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
else:
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
oneoff_issuelist.append({"ComicLocation": clocation,
"ComicID": ofv['ComicID'],
"IssueID": ofv['IssueID'],
"IssueNumber": ofv['Issue_Number'],
"ComicName": ofv['ComicName'],
"One-Off": True})
self.oneoffinlist = True
else:
logger.fdebug(module + ' No corresponding issue # in dB found for %s # %s' % (ofv['ComicName'],ofv['Issue_Number']))
continue
if any([self.nzb_name != 'Manual Run', self.oneoffinlist is True]) and all([self.issueid is None, self.comicid is None]):
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + ofv['ComicName'] + '-' + ofv['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
break
if any([self.nzb_name != 'Manual Run', self.oneoffinlist is True]) and all([self.issueid is None, self.comicid is None, self.apicall is False]):
ppinfo = []
if self.oneoffinlist is False:
nzbname = self.nzb_name
@ -1195,7 +1203,7 @@ class PostProcessor(object):
logger.info('[PPINFO-POST-PROCESSING-ATTEMPT] %s' % pp)
self.nzb_or_oneoff_pp(tinfo=pp)
if any([self.nzb_name == 'Manual Run', self.issueid is not None, self.comicid is not None]):
if any([self.nzb_name == 'Manual Run', self.issueid is not None, self.comicid is not None, self.apicall is True]):
#loop through the hits here.
if len(manual_list) == 0 and len(manual_arclist) == 0:
logger.info(module + ' No matches for Manual Run ... exiting.')

View File

@ -358,7 +358,7 @@ class Api(object):
comicid = kwargs['comicid']
if 'apc_version' not in kwargs:
fp = process.Process(self.nzb_name, self.nzb_folder, issueid=issueid, failed=failed, comicid=comicid)
fp = process.Process(self.nzb_name, self.nzb_folder, issueid=issueid, failed=failed, comicid=comicid, apicall=True)
self.data = fp.post_process()
else:
logger.info('[API] Api Call from ComicRN detected - initiating script post-processing.')

View File

@ -1,4 +1,5 @@
import urllib2
import json
import re
import time
import math
@ -37,7 +38,7 @@ class info32p(object):
if not lses.login():
if not self.test:
logger.error(self.module + ' [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.')
logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module)
return "disable"
else:
if self.error:
@ -45,7 +46,7 @@ class info32p(object):
else:
return self.method
else:
logger.fdebug(self.module + '[LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...')
logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module)
self.authkey = lses.authkey
self.passkey = lses.passkey
self.uid = lses.uid
@ -77,7 +78,7 @@ class info32p(object):
else:
verify = False
logger.fdebug('[32P] Verify SSL set to : ' + str(verify))
logger.fdebug('[32P] Verify SSL set to : %s' % verify)
if not verify:
#32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$
@ -95,7 +96,7 @@ class info32p(object):
soup.prettify()
if self.searchterm:
logger.info('[32P] Successfully authenticated. Initiating search for : ' + self.searchterm)
logger.info('[32P] Successfully authenticated. Initiating search for : %s' % self.searchterm)
return self.search32p(s)
logger.info('[32P] Successfully authenticated.')
@ -103,7 +104,7 @@ class info32p(object):
all_script2 = soup.find_all("link", {"rel": "alternate"})
authfound = False
logger.info(self.module + ' Atttempting to integrate with all of your 32P Notification feeds.')
logger.info('%s Atttempting to integrate with all of your 32P Notification feeds.' % self.module)
#get inkdrop count ...
#user_info = soup.find_all(attrs={"class": "stat"})
@ -132,7 +133,7 @@ class info32p(object):
notifynumber_st = alurl.find('torrents_notify_')
notifynumber_en = alurl.find('_', notifynumber_st +17)
notifynumber = alurl[notifynumber_st:notifynumber_en]
logger.fdebug(self.module + ' [NOTIFICATION: ' + str(notifyname) + '] Notification ID: ' + str(notifynumber))
logger.fdebug('%s [NOTIFICATION: %s] Notification ID: %s' % (self.module, notifyname,notifynumber))
#generate the rss-url here
feedinfo.append({'feed': notifynumber + '_' + str(self.passkey),
@ -167,71 +168,78 @@ class info32p(object):
return feedinfo
def searchit(self):
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
series_search = self.searchterm['series']
comic_id = self.searchterm['id']
annualize = False
if 'annual' in series_search.lower():
series_search = re.sub(' annual', '', series_search.lower()).strip()
annualize = True
issue_search = self.searchterm['issue']
volume_search = self.searchterm['volume']
publisher_search = self.searchterm['publisher']
spl = [x for x in self.publisher_list if x in publisher_search]
for x in spl:
publisher_search = re.sub(x, '', publisher_search).strip()
logger.info('publisher search set to : ' + publisher_search)
chk_id = None
# lookup the ComicID in the 32p sqlite3 table to pull the series_id to use.
if comic_id:
chk_id = helpers.checkthe_id(comic_id)
#logger.info('searchterm: %s' % self.searchterm)
series_search = self.searchterm['series']
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
if series_search.startswith('0-Day Comics Pack'):
torrentid = 22247 #2018
issue_search = self.searchterm['issue'] #'21' #Wed
volume_search = self.searchterm['volume'] #'2' #2nd month
publisher_search = None #'2' #2nd month
comic_id = None
else:
comic_id = self.searchterm['id']
if any([chk_id is None, mylar.CONFIG.DEEP_SEARCH_32P is True]):
#generate the dynamic name of the series here so we can match it up
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(series_search)
mod_series = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
as_puinfo = as_d.dynamic_replace(publisher_search)
pub_series = as_puinfo['mod_seriesname']
annualize = False
if 'annual' in series_search.lower():
series_search = re.sub(' annual', '', series_search.lower()).strip()
annualize = True
issue_search = self.searchterm['issue']
volume_search = self.searchterm['volume']
publisher_search = self.searchterm['publisher']
spl = [x for x in self.publisher_list if x in publisher_search]
for x in spl:
publisher_search = re.sub(x, '', publisher_search).strip()
#logger.info('publisher search set to : %s' % publisher_search)
logger.info('series_search: ' + series_search)
# lookup the ComicID in the 32p sqlite3 table to pull the series_id to use.
if comic_id:
chk_id = helpers.checkthe_id(comic_id)
if '/' in series_search:
series_search = series_search[:series_search.find('/')]
if ':' in series_search:
series_search = series_search[:series_search.find(':')]
if ',' in series_search:
series_search = series_search[:series_search.find(',')]
if any([chk_id is None, mylar.CONFIG.DEEP_SEARCH_32P is True]):
#generate the dynamic name of the series here so we can match it up
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(series_search)
mod_series = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
as_puinfo = as_d.dynamic_replace(publisher_search)
pub_series = as_puinfo['mod_seriesname']
logger.info('search_32p: %s' % mylar.CONFIG.SEARCH_32P)
if mylar.CONFIG.SEARCH_32P is False:
url = 'https://walksoftly.itsaninja.party/serieslist.php'
params = {'series': re.sub('\|','', mod_series.lower()).strip()} #series_search}
logger.info('search query: %s' % re.sub('\|', '', mod_series.lower()).strip())
try:
t = requests.get(url, params=params, verify=True, headers={'USER-AGENT': mylar.USER_AGENT[:mylar.USER_AGENT.find('/')+7] + mylar.USER_AGENT[mylar.USER_AGENT.find('(')+1]})
except requests.exceptions.RequestException as e:
logger.warn(e)
return "no results"
logger.fdebug('series_search: %s' % series_search)
if t.status_code == '619':
logger.warn('[' + str(t.status_code) + '] Unable to retrieve data from site.')
return "no results"
elif t.status_code == '999':
logger.warn('[' + str(t.status_code) + '] No series title was provided to the search query.')
return "no results"
if '/' in series_search:
series_search = series_search[:series_search.find('/')]
if ':' in series_search:
series_search = series_search[:series_search.find(':')]
if ',' in series_search:
series_search = series_search[:series_search.find(',')]
try:
results = t.json()
except:
results = t.text
logger.fdebug('config.search_32p: %s' % mylar.CONFIG.SEARCH_32P)
if mylar.CONFIG.SEARCH_32P is False:
url = 'https://walksoftly.itsaninja.party/serieslist.php'
params = {'series': re.sub('\|','', mod_series.lower()).strip()} #series_search}
logger.fdebug('search query: %s' % re.sub('\|', '', mod_series.lower()).strip())
try:
t = requests.get(url, params=params, verify=True, headers={'USER-AGENT': mylar.USER_AGENT[:mylar.USER_AGENT.find('/')+7] + mylar.USER_AGENT[mylar.USER_AGENT.find('(')+1]})
except requests.exceptions.RequestException as e:
logger.warn(e)
return "no results"
logger.info('results: %s' % results)
if len(results) == 0:
logger.warn('No results found for search on 32P.')
return "no results"
if t.status_code == '619':
logger.warn('[%s] Unable to retrieve data from site.' % t.status_code)
return "no results"
elif t.status_code == '999':
logger.warn('[%s] No series title was provided to the search query.' % t.status_code)
return "no results"
try:
results = t.json()
except:
results = t.text
if len(results) == 0:
logger.warn('No results found for search on 32P.')
return "no results"
with cfscrape.create_scraper() as s:
s.headers = self.headers
@ -242,49 +250,53 @@ class info32p(object):
pdata = []
pubmatch = False
if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]):
if mylar.CONFIG.SEARCH_32P is True:
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = s.get(url, params=params, verify=True, allow_redirects=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
for r in results:
if mylar.CONFIG.SEARCH_32P is True:
torrentid = r['data-id']
torrentname = r.findNext(text=True)
torrentname = torrentname.strip()
else:
torrentid = r['id']
torrentname = r['series']
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(torrentname)
seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
logger.info('searchresult: ' + seriesresult + ' --- ' + mod_series + '[' + publisher_search + ']')
if seriesresult.lower() == mod_series.lower():
logger.fdebug('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
data.append({"id": torrentid,
"series": torrentname})
elif publisher_search.lower() in seriesresult.lower():
logger.fdebug('[MATCH] Publisher match.')
tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip()
as_t = filechecker.FileChecker()
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower():
logger.fdebug('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
pdata.append({"id": torrentid,
"series": torrentname})
pubmatch = True
logger.info(str(len(data)) + ' series listed for searching that match.')
if series_search.startswith('0-Day Comics Pack'):
data.append({"id": torrentid,
"series": series_search})
else:
logger.info('Exact series ID already discovered previously. Setting to :' + chk_id['series'] + '[' + str(chk_id['id']) + ']')
pdata.append({"id": chk_id['id'],
"series": chk_id['series']})
pubmatch = True
if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]):
if mylar.CONFIG.SEARCH_32P is True:
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = s.get(url, params=params, verify=True, allow_redirects=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
for r in results:
if mylar.CONFIG.SEARCH_32P is True:
torrentid = r['data-id']
torrentname = r.findNext(text=True)
torrentname = torrentname.strip()
else:
torrentid = r['id']
torrentname = r['series']
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(torrentname)
seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search))
if seriesresult.lower() == mod_series.lower():
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
data.append({"id": torrentid,
"series": torrentname})
elif publisher_search.lower() in seriesresult.lower():
logger.fdebug('[MATCH] Publisher match.')
tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip()
as_t = filechecker.FileChecker()
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower():
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
pdata.append({"id": torrentid,
"series": torrentname})
pubmatch = True
logger.fdebug('%s series listed for searching that match.' % len(data))
else:
logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id']))
pdata.append({"id": chk_id['id'],
"series": chk_id['series']})
pubmatch = True
if all([len(data) == 0, len(pdata) == 0]):
return "no results"
@ -294,10 +306,9 @@ class info32p(object):
dataset += data
if len(pdata) > 0:
dataset += pdata
logger.info('dataset: %s' % dataset)
logger.info(str(len(dataset)) + ' series match the tile being searched for on 32P...')
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
if chk_id is None and any([len(data) == 1, len(pdata) == 1]):
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack')]) and any([len(data) == 1, len(pdata) == 1]):
#update the 32p_reference so we avoid doing a url lookup next time
helpers.checkthe_id(comic_id, dataset)
else:
@ -307,37 +318,44 @@ class info32p(object):
resultlist = {}
for x in dataset:
#for 0-day packs, issue=week#, volume=month, id=0-day year pack
payload = {'action': 'groupsearch',
'id': x['id'], #searchid,
'issue': issue_search}
#for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st)
payload = {"action": "groupsearch",
"id": x['id'], #searchid,
"issue": issue_search}
#in order to match up against 0-day stuff, volume has to be none at this point
#when doing other searches tho, this should be allowed to go through
#if all([volume_search != 'None', volume_search is not None]):
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
if series_search.startswith('0-Day Comics Pack'):
payload.update({"volume": volume_search})
logger.info('payload: ' + str(payload))
payload = json.dumps(payload)
payload = json.loads(payload)
logger.fdebug('payload: %s' % payload)
url = 'https://32pag.es/ajax.php'
time.sleep(1) #just to make sure we don't hammer, 1s pause.
try:
d = s.post(url, params=payload, verify=True, allow_redirects=True)
#logger.debug(self.module + ' Reply from AJAX: \n %s', d.text)
d = s.get(url, params=payload, verify=True, allow_redirects=True)
except Exception as e:
logger.info(self.module + ' Could not POST URL %s', url)
logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url))
try:
searchResults = d.json()
except:
searchResults = d.text
logger.debug(self.module + ' Search Result did not return valid JSON, falling back on text: %s', searchResults.text)
logger.debug('%s Search Result did not return valid JSON, falling back on text: %s' % (self.module, searchResults.text))
return False
#logger.debug(self.module + " Search Result: %s", searchResults)
if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
logger.fdebug('successfully retrieved %s search results' % searchResults['count'])
for a in searchResults['details']:
if series_search.startswith('0-Day Comics Pack'):
title = series_search
else:
title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues']
results32p.append({'link': a['id'],
'title': self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'],
'title': title,
'filesize': a['size'],
'issues': a['issues'],
'pack': a['pack'],
@ -350,9 +368,12 @@ class info32p(object):
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'),
'int_pubdate': float(a['upload_time'])})
else:
logger.fdebug('32P did not return any valid search results.')
if len(results32p) > 0:
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
logger.debug('%s Resultslist: %s' % (self.module, resultlist))
else:
resultlist = 'no results'
@ -372,15 +393,15 @@ class info32p(object):
try:
self.ses = cfscrape.create_scraper()
except Exception as e:
logger.error(self.module + " Can't create session with cfscrape")
logger.error('%s Can\'t create session with cfscrape' % self.module)
self.session_path = session_path if session_path is not None else os.path.join(mylar.CONFIG.CACHE_DIR, ".32p_cookies.dat")
self.ses.cookies = LWPCookieJar(self.session_path)
if not os.path.exists(self.session_path):
logger.fdebug(self.module + ' Session cookie does not exist. Signing in and Creating.')
logger.fdebug('%s Session cookie does not exist. Signing in and Creating.' % self.module)
self.ses.cookies.save()
else:
logger.fdebug(self.module + ' Session cookie found. Attempting to load...')
logger.fdebug('%s Session cookie found. Attempting to load...' % self.module)
self.ses.cookies.load(ignore_discard=True)
self.un = un
self.pw = pw
@ -431,27 +452,26 @@ class info32p(object):
try:
r = self.ses.get(u, params=params, timeout=60, allow_redirects=False, cookies=testcookie)
except Exception as e:
logger.error("Got an exception trying to GET from to:" + u)
logger.error('Got an exception [%s] trying to GET to: %s' % (e,u))
self.error = {'status':'error', 'message':'exception trying to retrieve site'}
return False
if r.status_code != 200:
if r.status_code == 302:
newloc = r.headers.get('location', '')
logger.warn("Got redirect from the POST-ajax action=login GET:" + newloc)
logger.warn('Got redirect from the POST-ajax action=login GET: %s' % newloc)
self.error = {'status':'redirect-error', 'message':'got redirect from POST-ajax login action : ' + newloc}
else:
logger.error("Got bad status code in the POST-ajax action=login GET:" + str(r.status_code))
logger.error('Got bad status code in the POST-ajax action=login GET: %s' % r.status_code)
self.error = {'status':'bad status code', 'message':'bad status code received in the POST-ajax login action :' + str(r.status_code)}
return False
try:
j = r.json()
except:
logger.warn("Error - response from session-based skey check was not JSON: %s",r.text)
logger.warn('Error - response from session-based skey check was not JSON: %s' % r.text)
return False
#logger.info(j)
self.uid = j['response']['id']
self.authkey = j['response']['authkey']
self.passkey = pk = j['response']['passkey']
@ -466,7 +486,7 @@ class info32p(object):
except:
logger.error('Inkdrop result did not return valid JSON, unable to verify response')
else:
logger.info('inkdrops: %s' % self.inkdrops)
logger.fdebug('inkdrops: %s' % self.inkdrops)
return True
@ -490,32 +510,32 @@ class info32p(object):
try:
r = self.ses.post(u, data=postdata, timeout=60, allow_redirects=True)
logger.debug(self.module + ' Status Code: ' + str(r.status_code))
logger.debug('%s Status Code: %s' % (self.module, r.status_code))
except Exception as e:
logger.error(self.module + " Got an exception when trying to login to %s POST [%s]", (u, e))
logger.error('%s Got an exception when trying to login: %s' % (self.module, e))
self.error = {'status':'exception', 'message':'Exception when trying to login'}
return False
if r.status_code != 200:
logger.warn(self.module + " Got bad status code from login POST: %d\n%s\n%s", r.status_code, r.text, r.headers)
logger.debug(self.module + " Request URL: %s \n Content: %s \n History: %s", r.url ,r.text, r.history)
logger.warn('%s Got bad status code from login POST: %d\n%s\n%s' % (self.module, r.status_code, r.text, r.headers))
logger.debug('%s Request URL: %s \n Content: %s \n History: %s' % (self.module, r.url ,r.text, r.history))
self.error = {'status':'Bad Status code', 'message':(r.status_code, r.text, r.headers)}
return False
try:
logger.debug(self.module + ' Trying to analyze login JSON reply from 32P: %s', r.text)
logger.debug('%s Trying to analyze login JSON reply from 32P: %s' % (self.module, r.text))
d = r.json()
except:
logger.debug(self.module + " Request URL: %s \n Content: %s \n History: %s", r.url ,r.text, r.history)
logger.error(self.module + " The data returned by the login page was not JSON: %s", r.text)
logger.debug('%s Request URL: %s \n Content: %s \n History: %s' % (self.module, r.url ,r.text, r.history))
logger.error('%s The data returned by the login page was not JSON: %s' % (self.module, r.text))
self.error = {'status':'JSON not returned', 'message':r.text}
return False
if d['status'] == 'success':
return True
logger.error(self.module + " Got unexpected status result: %s", d)
logger.debug(self.module + " Request URL: %s \n Content: %s \n History: %s \n Json: %s", r.url ,r.text, r.history, d)
logger.error('%s Got unexpected status result: %s' % (self.module, d))
logger.debug('%s Request URL: %s \n Content: %s \n History: %s \n Json: %s' % (self.module, r.url ,r.text, r.history, d))
self.error = d
return False
@ -551,7 +571,7 @@ class info32p(object):
Note that this will generate a new session on 32pag.es every time you login successfully!
This is why the "keeplogged" option is only for when you persist cookies to disk.
Note that after a successful login, it will test the session key, which has the side effect of
getting the authkey,passkey & uid
@ -563,12 +583,12 @@ class info32p(object):
if self.cookie_exists('session'):
self.ses.cookies.save(ignore_discard=True)
if (not self.test_skey_valid()):
logger.error("Bad error: The attempt to get your attributes after successful login failed!")
logger.error('Bad error: The attempt to get your attributes after successful login failed!')
self.error = {'status': 'Bad error', 'message': 'Attempt to get attributes after successful login failed.'}
return False
return True
logger.warn("Missing session cookie after successful login: %s", self.ses.cookies)
logger.warn('Missing session cookie after successful login: %s' % self.ses.cookies)
self.ses.cookies.clear()
self.ses.cookies.save()
return False
@ -584,16 +604,16 @@ class info32p(object):
self.authkey, self.passkey, and self.uid
'''
if (self.test_skey_valid()):
logger.fdebug(self.module + ' Session key-based login was good.')
logger.fdebug('%s Session key-based login was good.' % self.module)
self.method = 'Session Cookie retrieved OK.'
return True
if (self.test_login()):
logger.fdebug(self.module + ' Credential-based login was good.')
logger.fdebug('%s Credential-based login was good.' % self.module)
self.method = 'Credential-based login OK.'
return True
logger.warn(self.module + ' Both session key and credential-based logins failed.')
logger.warn('%s Both session key and credential-based logins failed.' % self.module)
self.method = 'Both session key & credential login failed.'
return False

View File

@ -85,6 +85,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'WEEKFOLDER_FORMAT': (int, 'Weekly', 0),
'INDIE_PUB': (int, 'Weekly', 75),
'BIGGIE_PUB': (int, 'Weekly', 55),
'PACK_0DAY_WATCHLIST_ONLY': (bool, 'Weekly', True),
'HTTP_PORT' : (int, 'Interface', 8090),
'HTTP_HOST' : (str, 'Interface', '0.0.0.0'),

View File

@ -120,6 +120,7 @@ class FileChecker(object):
'comiclocation': runresults['comiclocation'],
'series_name': runresults['series_name'],
'series_name_decoded': runresults['series_name_decoded'],
'dynamic_name': runresults['dynamic_name'],
'series_volume': runresults['series_volume'],
'alt_series': runresults['alt_series'],
'alt_issue': runresults['alt_issue'],
@ -918,6 +919,10 @@ class FileChecker(object):
if issue_number is None or series_name is None:
logger.fdebug('Cannot parse the filename properly. I\'m going to make note of this filename so that my evil ruler can make it work.')
if series_name is not None:
dreplace = self.dynamic_replace(series_name)['mod_seriesname']
else:
dreplace = None
return {'parse_status': 'failure',
'sub': path_list,
'comicfilename': filename,
@ -926,12 +931,14 @@ class FileChecker(object):
'series_name_decoded': series_name_decoded,
'alt_series': alt_series,
'alt_issue': alt_issue,
'dynamic_name': dreplace,
'issue_number': issue_number,
'justthedigits': issue_number, #redundant but it's needed atm
'series_volume': issue_volume,
'issue_year': issue_year,
'annual_comicid': None,
'scangroup': scangroup}
'scangroup': scangroup,
'reading_order': None}
if self.justparse:
return {'parse_status': 'success',
@ -1107,7 +1114,7 @@ class FileChecker(object):
'scangroup': series_info['scangroup']}
else:
logger.info('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']')
#logger.fdebug('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']')
return {'process_status': 'fail',
'comicfilename': filename,
'sub': series_info['sub'],

View File

@ -3033,9 +3033,9 @@ def script_env(mode, vars):
mylar_env['mylar_torrent_file'] = str(vars['torrentinfo']['filepath'])
else:
try:
mylar_env['mylar_release_files'] = "|".join(vars['torrentinfo']['files'])
mylar_env['mylar_release_files'] = '|'.join(vars['torrentinfo']['files'])
except TypeError:
mylar_env['mylar_release_files'] = "|".join(json.dumps(vars['torrentinfo']['files']))
mylar_env['mylar_release_files'] = '|'.join(json.dumps(vars['torrentinfo']['files']))
elif 'nzbinfo' in vars:
mylar_env['mylar_release_id'] = vars['nzbinfo']['id']
if 'client_id' in vars['nzbinfo']:
@ -3048,11 +3048,17 @@ def script_env(mode, vars):
mylar_env['mylar_release_provider'] = vars['provider']
if 'comicinfo' in vars:
try:
mylar_env['mylar_comicid'] = vars['comicinfo']['comicid'] #comicid/issueid are unknown for one-offs (should be fixable tho)
if vars['comicinfo']['comicid'] is not None:
mylar_env['mylar_comicid'] = vars['comicinfo']['comicid'] #comicid/issueid are unknown for one-offs (should be fixable tho)
else:
mylar_env['mylar_comicid'] = 'None'
except:
pass
try:
mylar_env['mylar_issueid'] = vars['comicinfo']['issueid']
if vars['comicinfo']['issueid'] is not None:
mylar_env['mylar_issueid'] = vars['comicinfo']['issueid']
else:
mylar_env['mylar_issueid'] = 'None'
except:
pass
mylar_env['mylar_comicname'] = vars['comicinfo']['comicname']
@ -3072,8 +3078,10 @@ def script_env(mode, vars):
mylar_env['mylar_release_pack'] = str(vars['pack'])
if vars['pack'] is True:
mylar_env['mylar_release_pack_numbers'] = vars['pack_numbers']
mylar_env['mylar_release_pack_issuelist'] = vars['pack_issuelist']
if vars['pack_numbers'] is not None:
mylar_env['mylar_release_pack_numbers'] = vars['pack_numbers']
if vars['pack_issuelist'] is not None:
mylar_env['mylar_release_pack_issuelist'] = vars['pack_issuelist']
mylar_env['mylar_method'] = vars['method']
mylar_env['mylar_client'] = vars['clientmode']
@ -3349,9 +3357,10 @@ def stupidchk():
mylar.EN_OOMICS = ens[0][0]
def newznab_test(name, host, ssl, apikey):
params = {'t': 'caps',
from xml.dom.minidom import parseString, Element
params = {'t': 'search',
'apikey': apikey,
'o': json}
'o': 'xml'}
if host[:-1] == '/':
host = host + 'api'
@ -3364,7 +3373,21 @@ def newznab_test(name, host, ssl, apikey):
logger.warn('Unable to connect: %s' % e)
return
else:
logger.info('Connected - Status code returned: %s' % r.status_code)
try:
data = parseString(r.content)
except Exception as e:
logger.warn('error %s' % e)
try:
error_code = data.getElementsByTagName('error')[0].attributes['code'].value
except Exception as e:
logger.info('Connected - Status code returned: %s' % r.status_code)
return True
else:
code = error_code
description = data.getElementsByTagName('error')[0].attributes['description'].value
logger.info('[ERROR:%s] - %s' % (code, description))
return False
def get_free_space(folder):
min_threshold = 100000000 #threshold for minimum amount of freespace available (#100mb)
@ -3634,6 +3657,46 @@ def publisherImages(publisher):
return comicpublisher
def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
import db
myDB = db.DBConnection()
watchlist = listLibrary()
matchlist = []
#get the weeknumber/year for the pulldate
dt = datetime.datetime.strptime(pulldate, '%Y-%m-%d')
weeknumber = dt.strftime("%U")
year = dt.strftime("%Y")
for f in filelist:
file = re.sub(folder, '', f).strip()
pp = mylar.filechecker.FileChecker(justparse=True, file=file)
parsedinfo = pp.listFiles()
if parsedinfo['parse_status'] == 'success':
dyncheck = re.sub('[\|\s]', '', parsedinfo['dynamic_name'].lower()).strip()
check = myDB.selectone('SELECT * FROM weekly WHERE DynamicName=? AND weeknumber=? AND year=? AND STATUS<>"Downloaded"', [dyncheck, weeknumber, year]).fetchone()
if check is not None:
logger.fdebug('[%s] found match: %s #%s' % (file, check['COMIC'], check['ISSUE']))
matchlist.append({'comicname': check['COMIC'],
'issue': check['ISSUE'],
'comicid': check['ComicID'],
'issueid': check['IssueID'],
'dynamicname': check['DynamicName']})
else:
logger.fdebug('[%s] unable to match to the pull: %s' % (file, parsedinfo))
if len(matchlist) > 0:
for x in matchlist:
if all([x['comicid'] not in watchlist, mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is False]):
oneoff = True
mode = 'pullwant'
elif all([x['comicid'] not in watchlist, mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is True]):
continue
else:
oneoff = False
mode = 'want'
mylar.updater.nzblog(x['issueid'], nzbname, x['comicname'], id=nzbid, prov=prov, oneoff=oneoff)
mylar.updater.foundsearch(x['comicid'], x['issueid'], mode=mode, provider=prov, hash=hash)
def file_ops(path,dst,arc=False,one_off=False):
# # path = source path + filename
# # dst = destination path + filename

View File

@ -21,12 +21,13 @@ import logger
class Process(object):
def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None):
def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False):
self.nzb_name = nzb_name
self.nzb_folder = nzb_folder
self.failed = failed
self.issueid = issueid
self.comicid = comicid
self.apicall = apicall
def post_process(self):
if self.failed == '0':
@ -38,8 +39,8 @@ class Process(object):
retry_outside = False
if self.failed is False:
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid)
if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.issueid is not None]):
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall)
if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.apicall is True, self.issueid is not None]):
threading.Thread(target=PostProcess.Process).start()
else:
thread_ = threading.Thread(target=PostProcess.Process, name="Post-Processing")

View File

@ -569,6 +569,12 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False)
else:
tsearch = tsearch_seriesname + "%"
if seriesname == '0-Day Comics Pack - %s' % (issue[:4]):
#call the helper to get the month
tsearch += 'vol%s' % issue[5:7]
tsearch += '%'
tsearch += '#%s' % issue[8:10]
tsearch += '%'
logger.fdebug('tsearch : ' + tsearch)
AS_Alt = []
tresults = []
@ -581,7 +587,7 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False)
logger.fdebug('seriesname_alt:' + str(seriesname_alt))
if seriesname_alt is None or seriesname_alt == 'None':
if tresults is None:
if not tresults:
logger.fdebug('no Alternate name given. Aborting search.')
return "no results"
else:
@ -618,8 +624,8 @@ def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None, oneoff=False)
if mylar.CONFIG.ENABLE_PUBLIC and nzbprov == 'Public Torrents':
tresults += myDB.select("SELECT * FROM rssdb WHERE Title like ? AND (Site='DEM' OR Site='WWT')", [AS_Alternate])
if tresults is None:
logger.fdebug('torrent search returned no results for ' + seriesname)
if not tresults:
logger.fdebug('torrent search returned no results for %s' % seriesname)
return "no results"
extensions = ('cbr', 'cbz')

View File

@ -82,10 +82,10 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mode == 'pullwant' or IssueID is None:
#one-off the download.
logger.fdebug('One-Off Search parameters:')
logger.fdebug("ComicName: " + ComicName)
logger.fdebug("Issue: " + str(IssueNumber))
logger.fdebug("Year: " + str(ComicYear))
logger.fdebug("IssueDate:" + str(IssueDate))
logger.fdebug('ComicName: %s' % ComicName)
logger.fdebug('Issue: %s' % IssueNumber)
logger.fdebug('Year: %s' %ComicYear)
logger.fdebug('IssueDate: %s' % IssueDate)
oneoff = True
if SARC:
logger.fdebug("Story-ARC Search parameters:")
@ -183,52 +183,53 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
searchcnt = 2 #set the searchcnt to 2 (api)
srchloop = 2 #start the counter at api, so it will exit without running RSS
intIss = helpers.issuedigits(IssueNumber)
iss = IssueNumber
if u'\xbd' in IssueNumber:
findcomiciss = '0.5'
elif u'\xbc' in IssueNumber:
findcomiciss = '0.25'
elif u'\xbe' in IssueNumber:
findcomiciss = '0.75'
elif u'\u221e' in IssueNumber:
#issnum = utf-8 will encode the infinity symbol without any help
findcomiciss = 'infinity' # set 9999999999 for integer value of issue
else:
findcomiciss = iss
if IssueNumber is not None:
intIss = helpers.issuedigits(IssueNumber)
iss = IssueNumber
if u'\xbd' in IssueNumber:
findcomiciss = '0.5'
elif u'\xbc' in IssueNumber:
findcomiciss = '0.25'
elif u'\xbe' in IssueNumber:
findcomiciss = '0.75'
elif u'\u221e' in IssueNumber:
#issnum = utf-8 will encode the infinity symbol without any help
findcomiciss = 'infinity' # set 9999999999 for integer value of issue
else:
findcomiciss = iss
#determine the amount of loops here
fcs = 0
c_alpha = None
dsp_c_alpha = None
c_number = None
c_num_a4 = None
while fcs < len(findcomiciss):
#take first occurance of alpha in string and carry it through
if findcomiciss[fcs].isalpha():
c_alpha = findcomiciss[fcs:].rstrip()
c_number = findcomiciss[:fcs].rstrip()
break
elif '.' in findcomiciss[fcs]:
c_number = findcomiciss[:fcs].rstrip()
c_num_a4 = findcomiciss[fcs+1:].rstrip()
#if decimal seperates numeric from alpha (ie - 7.INH)
#don't give calpha a value or else will seperate with a space further down
#assign it to dsp_c_alpha so that it can be displayed for debugging.
if not c_num_a4.isdigit():
dsp_c_alpha = c_num_a4
else:
c_number = str(c_number) + '.' + str(c_num_a4)
break
fcs+=1
logger.fdebug("calpha/cnumber: " + str(dsp_c_alpha) + " / " + str(c_number))
#determine the amount of loops here
fcs = 0
c_alpha = None
dsp_c_alpha = None
c_number = None
c_num_a4 = None
while fcs < len(findcomiciss):
#take first occurance of alpha in string and carry it through
if findcomiciss[fcs].isalpha():
c_alpha = findcomiciss[fcs:].rstrip()
c_number = findcomiciss[:fcs].rstrip()
break
elif '.' in findcomiciss[fcs]:
c_number = findcomiciss[:fcs].rstrip()
c_num_a4 = findcomiciss[fcs+1:].rstrip()
#if decimal seperates numeric from alpha (ie - 7.INH)
#don't give calpha a value or else will seperate with a space further down
#assign it to dsp_c_alpha so that it can be displayed for debugging.
if not c_num_a4.isdigit():
dsp_c_alpha = c_num_a4
else:
c_number = str(c_number) + '.' + str(c_num_a4)
break
fcs+=1
logger.fdebug("calpha/cnumber: " + str(dsp_c_alpha) + " / " + str(c_number))
if c_number is None:
c_number = findcomiciss # if it's None, means no special alphas or decimals
if c_number is None:
c_number = findcomiciss # if it's None, means no special alphas or decimals
if '.' in c_number:
decst = c_number.find('.')
c_number = c_number[:decst].rstrip()
if '.' in c_number:
decst = c_number.find('.')
c_number = c_number[:decst].rstrip()
while (srchloop <= searchcnt):
logger.fdebug('srchloop: %s' % srchloop)
@ -241,12 +242,15 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if srchloop == 1: searchmode = 'rss' #order of ops - this will be used first.
elif srchloop == 2: searchmode = 'api'
if len(c_number) == 1:
cmloopit = 3
elif len(c_number) == 2:
cmloopit = 2
else:
if '0-Day' in ComicName:
cmloopit = 1
else:
if len(c_number) == 1:
cmloopit = 3
elif len(c_number) == 2:
cmloopit = 2
else:
cmloopit = 1
if findit['status'] is True:
@ -338,7 +342,11 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if searchprov == 'newznab':
searchprov = newznab_host[0].rstrip()
if manual is not True:
logger.info('Could not find Issue ' + IssueNumber + ' of ' + ComicName + ' (' + str(SeriesYear) + ') using ' + str(searchprov) + ' [' + str(searchmode) + ']')
if IssueNumber is not None:
issuedisplay = IssueNumber
else:
issuedisplay = StoreDate[5:]
logger.info('Could not find Issue %s of %s (%s) using %s [%s]' % (issuedisplay, ComicName, SeriesYear, searchprov, searchmode))
prov_count+=1
if findit['status'] is True:
@ -430,7 +438,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
tmpprov = name_newznab + ' (' + nzbprov + ')'
else:
tmpprov = nzbprov
logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + IssueNumber + " (" + str(ComicYear) + ") using " + str(tmpprov))
if IssueNumber is not None:
issuedisplay = IssueNumber
else:
issuedisplay = StoreDate[5:]
if '0-Day Comics Pack' in ComicName:
logger.info('Shhh be very quiet...I\'m looking for %s using %s.' % (ComicName, tmpprov))
else:
logger.info('Shhh be very quiet...I\'m looking for %s issue: %s (%s) using %s.' % (ComicName, issuedisplay, ComicYear, tmpprov))
#this will completely render the api search results empty. Needs to get fixed.
@ -457,21 +473,25 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
cm = re.sub('\s+', ' ', cm)
cm = re.sub(" ", "%20", str(cm))
intIss = helpers.issuedigits(IssueNumber)
iss = IssueNumber
if u'\xbd' in IssueNumber:
findcomiciss = '0.5'
elif u'\xbc' in IssueNumber:
findcomiciss = '0.25'
elif u'\xbe' in IssueNumber:
findcomiciss = '0.75'
elif u'\u221e' in IssueNumber:
#issnum = utf-8 will encode the infinity symbol without any help
findcomiciss = 'infinity' # set 9999999999 for integer value of issue
else:
findcomiciss = iss
if IssueNumber is not None:
intIss = helpers.issuedigits(IssueNumber)
iss = IssueNumber
if u'\xbd' in IssueNumber:
findcomiciss = '0.5'
elif u'\xbc' in IssueNumber:
findcomiciss = '0.25'
elif u'\xbe' in IssueNumber:
findcomiciss = '0.75'
elif u'\u221e' in IssueNumber:
#issnum = utf-8 will encode the infinity symbol without any help
findcomiciss = 'infinity' # set 9999999999 for integer value of issue
else:
findcomiciss = iss
isssearch = str(findcomiciss)
else:
isssearch = None
isssearch = str(findcomiciss)
comsearch = cm
#origcmloopit = cmloopit
findcount = 1 # this could be a loop in the future possibly
@ -503,27 +523,31 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
break
# here we account for issue pattern variations
if seperatealpha == "yes":
isssearch = str(c_number) + "%20" + str(c_alpha)
if cmloopit == 3:
comsearch = comsrc + "%2000" + str(isssearch) #+ "%20" + str(filetype)
issdig = '00'
elif cmloopit == 2:
comsearch = comsrc + "%200" + str(isssearch) #+ "%20" + str(filetype)
issdig = '0'
elif cmloopit == 1:
comsearch = comsrc + "%20" + str(isssearch) #+ "%20" + str(filetype)
issdig = ''
if IssueNumber is not None:
if seperatealpha == "yes":
isssearch = str(c_number) + "%20" + str(c_alpha)
if cmloopit == 3:
comsearch = comsrc + "%2000" + str(isssearch) #+ "%20" + str(filetype)
issdig = '00'
elif cmloopit == 2:
comsearch = comsrc + "%200" + str(isssearch) #+ "%20" + str(filetype)
issdig = '0'
elif cmloopit == 1:
comsearch = comsrc + "%20" + str(isssearch) #+ "%20" + str(filetype)
issdig = ''
else:
foundc['status'] = False
done = True
break
mod_isssearch = str(issdig) + str(isssearch)
else:
foundc['status'] = False
done = True
break
mod_isssearch = str(issdig) + str(isssearch)
comsearch = StoreDate
mod_isssearch = StoreDate
#--- this is basically for RSS Feeds ---
#logger.fdebug('RSS Check: ' + str(RSS))
#logger.fdebug('nzbprov: ' + str(nzbprov))
#logger.fdebug('comicid: ' + str(ComicID))
#logger.fdebug('RSS Check: %s' % RSS)
#logger.fdebug('nzbprov: %s' % nzbprov)
#logger.fdebug('comicid: %s' % ComicID)
if RSS == "yes":
if nzbprov == '32P' or nzbprov == 'Public Torrents':
cmname = re.sub("%20", " ", str(comsrc))
@ -546,7 +570,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
bb = "no results"
if nzbprov == '32P':
if all([mylar.CONFIG.MODE_32P == 1,mylar.CONFIG.ENABLE_32P]):
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
if ComicName[:17] == '0-Day Comics Pack':
searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7]}
else:
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
#first we find the id on the serieslist of 32P
#then we call the ajax against the id and issue# and volume (if exists)
a = auth32p.info32p(searchterm=searchterm)
@ -706,11 +733,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
done = False
log2file = ""
pack0day = False
if not bb == "no results":
for entry in bb['entries']:
#logger.fdebug('entry: %s' % entry) #<--- uncomment this to see what the search result(s) are
#brief match here against 32p since it returns the direct issue number
if nzbprov == '32P' and RSS == 'no':
if nzbprov == '32P' and entry['title'][:17] == '0-Day Comics Pack':
logger.info('[32P-0DAY] 0-Day Comics Pack Discovered. Analyzing the pack info...')
if len(bb['entries']) == 1 or pack0day is True:
logger.info('[32P-0DAY] Only one pack for the week available. Selecting this by default.')
else:
logger.info('[32P-0DAY] More than one pack for the week is available...')
logger.info('bb-entries: %s' % bb['entries'])
if bb['entries'][1]['int_pubdate'] >= bb['int_pubdate']:
logger.info('[32P-0DAY] 2nd Pack is newest. Snatching that...')
pack0day = True
continue
elif nzbprov == '32P' and RSS == 'no':
if entry['pack'] == '0':
if helpers.issuedigits(entry['issues']) == intIss:
logger.fdebug('32P direct match to issue # : ' + str(entry['issues']))
@ -778,7 +817,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('comsize_b: %s' % comsize_b)
#file restriction limitation here
#only works with TPSE (done here) & 32P (done in rsscheck) & Experimental (has it embeded in search and rss checks)
if nzbprov == 'Public Torrents' or (nzbprov == '32P' and RSS == 'no'):
if nzbprov == 'Public Torrents' or (nzbprov == '32P' and RSS == 'no' and entry['title'][:17] != '0-Day Comics Pack'):
if nzbprov == 'Public Torrents':
if 'cbr' in entry['title'].lower():
format_type = 'cbr'
@ -810,22 +849,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('Size of file cannot be retrieved. Ignoring size-comparison and continuing.')
#comsize_b = 0
else:
comsize_m = helpers.human_size(comsize_b)
logger.fdebug("size given as: " + str(comsize_m))
#----size constraints.
#if it's not within size constaints - dump it now and save some time.
if mylar.CONFIG.USE_MINSIZE:
conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M")
logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b))
if int(conv_minsize) > int(comsize_b):
logger.fdebug("Failure to meet the Minimum size threshold - skipping")
continue
if mylar.CONFIG.USE_MAXSIZE:
conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M")
logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b))
if int(comsize_b) > int(conv_maxsize):
logger.fdebug("Failure to meet the Maximium size threshold - skipping")
continue
if entry['title'][:17] != '0-Day Comics Pack':
comsize_m = helpers.human_size(comsize_b)
logger.fdebug("size given as: " + str(comsize_m))
#----size constraints.
#if it's not within size constaints - dump it now and save some time.
if mylar.CONFIG.USE_MINSIZE:
conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M")
logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b))
if int(conv_minsize) > int(comsize_b):
logger.fdebug("Failure to meet the Minimum size threshold - skipping")
continue
if mylar.CONFIG.USE_MAXSIZE:
conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M")
logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b))
if int(comsize_b) > int(conv_maxsize):
logger.fdebug("Failure to meet the Maximium size threshold - skipping")
continue
#---- date constaints.
# if the posting date is prior to the publication date, dump it and save the time.
@ -1272,14 +1312,16 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('[PACK-QUEUE] Invalid Pack.')
#find the pack range.
pack_issuelist = entry['issues']
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
if issueid_info['valid'] == True:
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.')
else:
logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping')
continue
pack_issuelist = None
issueid_info = None
if not entry['title'].startswith('0-Day Comics Pack'):
pack_issuelist = entry['issues']
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
if issueid_info['valid'] == True:
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.')
else:
logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping')
continue
#pack support.
nowrite = False
nzbid = generate_id(nzbprov, entry['link'])
@ -1741,13 +1783,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
foundcomic.append("yes")
if mylar.COMICINFO[0]['pack']:
issinfo = mylar.COMICINFO[0]['pack_issuelist']
#we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack.
logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname))
#because packs need to have every issue that's not already Downloaded in a Snatched status, throw it to the updater here as well.
for isid in issinfo['issues']:
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov)
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov)
if issinfo is not None:
#we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack.
logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname))
#because packs need to have every issue that's not already Downloaded in a Snatched status, throw it to the updater here as well.
for isid in issinfo['issues']:
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov)
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov)
else:
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov)
else:
if alt_nzbname is None or alt_nzbname == '':
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
@ -2041,7 +2087,6 @@ def searchIssueIDList(issuelist):
AllowPacks = False
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks)
logger.info('foundNZB: %s' % foundNZB)
if foundNZB['status'] is True:
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash'])
else:
@ -2207,7 +2252,10 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
tmpprov = re.sub('Public Torrents', nzbprov, tmpprov)
if comicinfo[0]['pack'] == True:
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(tmpprov) + " within a pack containing issues: " + comicinfo[0]['pack_numbers'])
if '0-Day Comics Pack' not in comicinfo[0]['ComicName']:
logger.info('Found %s (%s) issue: %s using %s within a pack containing issues %s' % (ComicName, comyear, IssueNumber, tmpprov, comicinfo[0]['pack_numbers']))
else:
logger.info('Found %s using %s for %s' % (ComicName, tmpprov, comicinfo[0]['IssueDate']))
else:
if any([oneoff is True, IssueID is None]):
#one-off information
@ -2493,6 +2541,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
#torrent_info{'folder','name',['total_filesize','label','hash','files','time_started'}
t_hash = rcheck['hash']
rcheck.update({'torrent_filename': nzbname})
if any([mylar.USE_RTORRENT, mylar.USE_DELUGE]) and mylar.CONFIG.AUTO_SNATCH:
mylar.SNATCHED_QUEUE.put(rcheck['hash'])
elif any([mylar.USE_RTORRENT, mylar.USE_DELUGE]) and mylar.CONFIG.LOCAL_TORRENT_PP:
@ -2504,8 +2553,13 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
pnumbers = None
plist = None
else:
pnumbers = '|'.join(comicinfo[0]['pack_numbers'])
plist= '|'.join(comicinfo[0]['pack_issuelist'])
if '0-Day Comics Pack' in ComicName:
helpers.lookupthebitches(rcheck['files'], rcheck['folder'], nzbname, nzbid, nzbprov, t_hash, comicinfo[0]['IssueDate'])
pnumbers = None
plist = None
else:
pnumbers = '|'.join(comicinfo[0]['pack_numbers'])
plist = '|'.join(comicinfo[0]['pack_issuelist'])
snatch_vars = {'comicinfo': {'comicname': ComicName,
'volume': comicinfo[0]['ComicVolume'],
'issuenumber': IssueNumber,
@ -2766,7 +2820,10 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov):
snline = modcomicname + ' (' + comyear + ') - Issue #' + IssueNumber + ' snatched!'
if IssueNumber is not None:
snline = modcomicname + ' (' + comyear + ') - Issue #' + IssueNumber + ' snatched!'
else:
snline = modcomicname + ' (' + comyear + ') snatched!'
if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification")
@ -2980,7 +3037,12 @@ def generate_id(nzbprov, link):
nzbid = os.path.splitext(link)[0].rsplit('searchresultid=',1)[1]
elif tmpid == '' or tmpid is None:
nzbid = os.path.splitext(link)[0].rsplit('/', 1)[1]
elif 'apikey' in tmpid:
else:
nzbinfo = urlparse.parse_qs(link)
nzbid = nzbinfo.get('id', None)
if nzbid is not None:
nzbid = ''.join(nzbid)
if nzbid is None:
#if apikey is passed in as a parameter and the id is in the path
findend = tmpid.find('&')
if findend == -1:
@ -2992,16 +3054,6 @@ def generate_id(nzbprov, link):
if '&id' not in tmpid or nzbid == '':
tmpid = urlparse.urlparse(link)[2]
nzbid = tmpid.rsplit('/', 1)[1]
else:
# for the geek in all of us...
st = tmpid.find('&id')
if st == -1:
nzbid = os.path.splitext(link)[0].rsplit('/', 1)[1]
else:
end = tmpid.find('&', st +1)
if end == -1:
end = len(tmpid)
nzbid = re.sub('&id=', '', tmpid[st:end]).strip()
elif nzbprov == 'Torznab':
idtmp = urlparse.urlparse(link)[4]
idpos = idtmp.find('&')

View File

@ -790,7 +790,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
elif mode != 'pullwant':
elif mode != 'pullwant':
if modcomicname:
IssueNum = issue['Issue_Number']
else:

View File

@ -2332,9 +2332,14 @@ class WebInterface(object):
for r in results:
rr = dict(r)
snatchit = [x['hash'] for x in chkthis if r['ISSUEID'] == x['IssueID']]
if snatchit:
logger.fdebug('[%s] Discovered previously snatched torrent not downloaded. Marking for manual auto-snatch retrieval: %s' % (r['ComicName'], ''.join(snatchit)))
rr['hash'] = ''.join(snatchit)
try:
if snatchit:
logger.fdebug('[%s] Discovered previously snatched torrent not downloaded. Marking for manual auto-snatch retrieval: %s' % (r['ComicName'], ''.join(snatchit)))
rr['hash'] = ''.join(snatchit)
else:
rr['hash'] = None
except:
rr['hash'] = None
endresults.append(rr)
results = endresults
@ -4640,55 +4645,23 @@ class WebInterface(object):
ffs = alt_search.find('##')
ffs_alt.append(alt_search[:ffs])
ffs_alt_st = str(ffs_alt[0])
logger.fdebug("ffs_alt: " + str(ffs_alt[0]))
ffs_test = alt_search.split('##')
if len(ffs_test) > 0:
logger.fdebug("ffs_test names: " + str(len(ffs_test)))
ffs_count = len(ffs_test)
n=1
while (n < ffs_count):
ffs_alt.append(ffs_test[n])
logger.fdebug("adding : " + str(ffs_test[n]))
#print("ffs_alt : " + str(ffs_alt))
ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
n+=1
asearch = ffs_alt
else:
asearch = alt_search
# ffs_alt = []
# if '+' in alt_search:
#find first +
# ffs = alt_search.find('+')
# ffs_alt.append(alt_search[:ffs])
# ffs_alt_st = str(ffs_alt[0])
# print("ffs_alt: " + str(ffs_alt[0]))
# split the entire string by the delimter +
# ffs_test = alt_search.split('+')
# if len(ffs_test) > 0:
# print("ffs_test names: " + str(len(ffs_test)))
# ffs_count = len(ffs_test)
# n=1
# while (n < ffs_count):
# ffs_alt.append(ffs_test[n])
# print("adding : " + str(ffs_test[n]))
#print("ffs_alt : " + str(ffs_alt))
# ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
# n+=1
# asearch = ffs_alt
# else:
# asearch = alt_search
asearch = str(alt_search)
controlValueDict = {'ComicID': ComicID}
newValues = {"ComicLocation": com_location}
#"QUALalt_vers": qual_altvers,
#"QUALScanner": qual_scanner,
#"QUALtype": qual_type,
#"QUALquality": qual_quality
#}
if asearch is not None:
if re.sub(r'\s', '', asearch) == '':
newValues['AlternateSearch'] = "None"
@ -4731,22 +4704,18 @@ class WebInterface(object):
newValues['AlternateFileName'] = str(alt_filename)
#force the check/creation of directory com_location here
if os.path.isdir(str(com_location)):
logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
else:
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
#try:
# os.makedirs(str(com_location))
# logger.info(u"Directory successfully created at: " + str(com_location))
#except OSError:
# logger.error(u"Could not create comicdir : " + str(com_location))
if mylar.CONFIG.CREATE_FOLDERS is True:
if mylar.CONFIG.CREATE_FOLDERS is True:
if os.path.isdir(str(com_location)):
logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
else:
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
myDB.upsert("comics", newValues, controlValueDict)
logger.fdebug('Updated Series options!')
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
comic_config.exposed = True
@ -5321,12 +5290,12 @@ class WebInterface(object):
def testnewznab(self, name, host, ssl, apikey):
result = helpers.newznab_test(name, host, ssl, apikey)
if result == '200':
return "Successfully tested %s - valid api response received" % name
if result is True:
logger.info('Successfully tested %s [%s] - valid api response received' % (name, host))
return 'Successfully tested %s!' % name
else:
logger.warn('Testing failed to %s [HOST:%s][SSL:%s][APIKEY:%s]' % (name, host, ssl))
return "Error testing newznab data"
logger.warn('Testing failed to %s [HOST:%s][SSL:%s]' % (name, host, ssl))
return 'Error - failed running test for %s' % name
testnewznab.exposed = True
@ -5381,6 +5350,11 @@ class WebInterface(object):
ti += '<tr><td><center>Seedtime: ' + torrent_info['seedtime'] + '</center></td</tr>'
ti += '</table>'
logger.info('torrent_info:%s' % torrent_info)
#commenting out the next 2 lines will return the torrent information to the screen
#fp = mylar.process.Process(torrent_info['filepath'], torrent_info['dst_folder'], issueid=torrent_info['issueid'], failed=failed)
#fp.post_process()
else:
torrent_name = 'Not Found'
ti = 'Torrent not found (' + str(torrent_hash)
@ -5402,6 +5376,13 @@ class WebInterface(object):
get_the_hash.exposed = True
def download_0day(self, week):
logger.info('Now attempting to search for 0-day pack for week: %s' % week)
#week contains weekinfo['midweek'] = YYYY-mm-dd of Wednesday of the given week's pull
foundcom, prov = search.search_init('0-Day Comics Pack - %s.%s' % (week[:4],week[5:]), None, week[:4], None, None, week, week, None, allow_packs=True, oneoff=True)
download_0day.exposed = True
def test_32p(self):
import auth32p
tmp = auth32p.info32p(test=True)