FIX:(#1455)(#1452)Added search_32p option in config.ini to indicate where Mylar will do the initial compare to see if a series exists on 32p before attempting to retrieve/compare the search results (0 = WS, 1 = 32P). The default setting of 0 will fix the auth mode issues all users were having, FIX: One-off's from the pullist would not snatch in some cases

This commit is contained in:
evilhero 2016-11-11 11:52:24 -05:00
parent 7c7aef0d38
commit a774a3ba0b
4 changed files with 140 additions and 96 deletions

View File

@ -383,6 +383,7 @@ TPSE_PROXY = None
TPSE_VERIFY = True
ENABLE_32P = 0
SEARCH_32P = 0 #0 = use WS to grab torrent groupings, #1 = use 32P to grab torrent groupings
MODE_32P = None #0 = legacymode, #1 = authmode
KEYS_32P = None
RSSFEED_32P = None
@ -477,7 +478,7 @@ def initialize():
ENABLE_META, CMTAGGER_PATH, CBR2CBZ_ONLY, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, CT_SETTINGSPATH, CMTAG_START_YEAR_AS_VOLUME, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, TMP_PROV, \
dbUpdateScheduler, searchScheduler, RSSScheduler, WeeklyScheduler, VersionScheduler, FolderMonitorScheduler, \
ALLOW_PACKS, ENABLE_TORRENTS, TORRENT_DOWNLOADER, MINSEEDS, USE_WATCHDIR, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_TPSE, TPSE_PROXY, TPSE_VERIFY, ENABLE_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_TPSE, TPSE_PROXY, TPSE_VERIFY, ENABLE_32P, SEARCH_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
@ -615,7 +616,7 @@ def initialize():
FFTONEWCOM_DIR = bool(check_setting_int(CFG, 'General', 'fftonewcom_dir', 0))
if FFTONEWCOM_DIR is None:
FFTONEWCOM_DIR = 0
HIGHCOUNT = check_setting_str(CFG, 'General', 'highcount', '')
HIGHCOUNT = check_setting_int(CFG, 'General', 'highcount', 0)
if not HIGHCOUNT: HIGHCOUNT = 0
READ2FILENAME = bool(check_setting_int(CFG, 'General', 'read2filename', 0))
SEND2READ = bool(check_setting_int(CFG, 'General', 'send2read', 0))
@ -731,6 +732,7 @@ def initialize():
print 'Converting CBT settings to 32P - ENABLE_32P: ' + str(ENABLE_32P)
else:
ENABLE_32P = bool(check_setting_int(CFG, 'Torrents', 'enable_32p', 0))
SEARCH_32P = bool(check_setting_int(CFG, 'Torrents', 'search_32p', 0))
MODE_32P = check_setting_int(CFG, 'Torrents', 'mode_32p', 0)
#legacy support of older config - reload into old values for consistency.
@ -1493,6 +1495,7 @@ def config_write():
new_config['Torrents']['tpse_proxy'] = TPSE_PROXY
new_config['Torrents']['tpse_verify'] = TPSE_VERIFY
new_config['Torrents']['enable_32p'] = int(ENABLE_32P)
new_config['Torrents']['search_32p'] = int(SEARCH_32P)
new_config['Torrents']['mode_32p'] = int(MODE_32P)
new_config['Torrents']['passkey_32p'] = PASSKEY_32P
new_config['Torrents']['rssfeed_32p'] = RSSFEED_32P

View File

@ -150,56 +150,87 @@ class info32p(object):
return feedinfo
def searchit(self):
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
series_search = self.searchterm['series']
annualize = False
if 'Annual' in series_search:
series_search = re.sub(' Annual', '', series_search).strip()
annualize = True
issue_search = self.searchterm['issue']
volume_search = self.searchterm['volume']
publisher_search = self.searchterm['publisher']
spl = [x for x in self.publisher_list if x in publisher_search]
for x in spl:
publisher_search = re.sub(x, '', publisher_search).strip()
logger.info('publisher search set to : ' + publisher_search)
#generate the dynamic name of the series here so we can match it up
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(series_search)
mod_series = as_dinfo['mod_seriesname']
as_puinfo = as_d.dynamic_replace(publisher_search)
pub_series = as_puinfo['mod_seriesname']
logger.info('series_search: ' + series_search)
if '/' in series_search:
series_search = series_search[:series_search.find('/')]
if ':' in series_search:
series_search = series_search[:series_search.find(':')]
if ',' in series_search:
series_search = series_search[:series_search.find(',')]
if not mylar.SEARCH_32P:
url = 'https://walksoftly.itsaninja.party/serieslist.php'
params = {'series': series_search}
try:
t = requests.get(url, params=params, verify=True)
except requests.exceptions.RequestException as e:
logger.warn(e)
return "no results"
if t.status_code == '619':
logger.warn('[' + str(t.status_code) + '] Unable to retrieve data from site.')
return "no results"
elif t.status_code == '999':
logger.warn('[' + str(t.status_code) + '] No series title was provided to the search query.')
return "no results"
try:
results = t.json()
except:
results = t.text
if len(results) == 0:
logger.warn('No results found for search on 32P.')
return "no results"
with requests.Session() as s:
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
series_search = self.searchterm['series']
annualize = False
if 'Annual' in series_search:
series_search = re.sub(' Annual', '', series_search).strip()
annualize = True
issue_search = self.searchterm['issue']
volume_search = self.searchterm['volume']
publisher_search = self.searchterm['publisher']
spl = [x for x in self.publisher_list if x in publisher_search]
for x in spl:
publisher_search = re.sub(x, '', publisher_search).strip()
logger.info('publisher search set to : ' + publisher_search)
#generate the dynamic name of the series here so we can match it up
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(series_search)
mod_series = as_dinfo['mod_seriesname']
as_puinfo = as_d.dynamic_replace(publisher_search)
pub_series = as_puinfo['mod_seriesname']
logger.info('series_search: ' + series_search)
if '/' in series_search:
series_search = series_search[:series_search.find('/')]
if ':' in series_search:
series_search = series_search[:series_search.find(':')]
if ',' in series_search:
series_search = series_search[:series_search.find(',')]
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
s.headers = self.headers
cj = LWPCookieJar(os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat"))
cj.load()
s.cookies = cj
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = s.get(url, params=params, verify=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
if mylar.SEARCH_32P:
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = s.get(url, params=params, verify=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
data = []
pdata = []
pubmatch = False
for r in results:
torrentid = r['data-id']
torrentname = r.findNext(text=True)
torrentname = torrentname.strip()
if mylar.SEARCH_32P:
torrentid = r['data-id']
torrentname = r.findNext(text=True)
torrentname = torrentname.strip()
else:
torrentid = r['id']
torrentname = r['series']
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(torrentname)
seriesresult = as_dinfo['mod_seriesname']
@ -215,56 +246,57 @@ class info32p(object):
if as_tinfo['mod_seriesname'] == mod_series:
logger.info('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
pdata.append({"id": torrentid,
"series": torrentname})
"series": torrentname})
pubmatch = True
logger.info(str(len(data)) + ' series listed for searching that match.')
if all([len(data) == 0, len(pdata) == 0]):
return "no results"
if len(data) == 1 or len(pdata) == 1:
logger.info(str(len(data)) + ' series match the title being search for')
if len(pdata) == 1:
dataset = pdata[0]['id']
else:
dataset = data[0]['id']
if len(pdata) == 1:
dataset = pdata[0]['id']
else:
dataset = data[0]['id']
payload = {'action': 'groupsearch',
'id': dataset,
'issue': issue_search}
#in order to match up against 0-day stuff, volume has to be none at this point
#when doing other searches tho, this should be allowed to go through
#if all([volume_search != 'None', volume_search is not None]):
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
payload = {'action': 'groupsearch',
'id': dataset,
'issue': issue_search}
#in order to match up against 0-day stuff, volume has to be none at this point
#when doing other searches tho, this should be allowed to go through
#if all([volume_search != 'None', volume_search is not None]):
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
logger.info('payload: ' + str(payload))
url = 'https://32pag.es/ajax.php'
logger.info('payload: ' + str(payload))
url = 'https://32pag.es/ajax.php'
time.sleep(1) #just to make sure we don't hammer, 1s pause.
d = s.get(url, params=payload, verify=True)
time.sleep(1) #just to make sure we don't hammer, 1s pause.
d = s.get(url, params=payload, verify=True)
results32p = []
resultlist = {}
try:
searchResults = d.json()
except:
searchResults = d.text
if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
for a in searchResults['details']:
results32p.append({'link': a['id'],
'title': self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'],
'filesize': a['size'],
'issues': a['issues'],
'pack': a['pack'],
'format': a['format'],
'language': a['language'],
'seeders': a['seeders'],
'leechers': a['leechers'],
'scanner': a['scanner'],
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%c')})
results32p = []
resultlist = {}
try:
searchResults = d.json()
except:
searchResults = d.text
logger.info(searchResults)
if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
for a in searchResults['details']:
results32p.append({'link': a['id'],
'title': self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'],
'filesize': a['size'],
'issues': a['issues'],
'pack': a['pack'],
'format': a['format'],
'language': a['language'],
'seeders': a['seeders'],
'leechers': a['leechers'],
'scanner': a['scanner'],
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%c')})
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
else:
resultlist = 'no results'
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
else:
resultlist = 'no results'

View File

@ -294,8 +294,11 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.info('Finished searching via :' + str(searchmode) + '. Issue not found - status kept as Wanted.')
else:
logger.fdebug('Could not find issue doing a manual search via : ' + str(searchmode))
if searchprov == '32P' and mylar.MODE_32P == 0:
return findit, 'None'
if searchprov == '32P':
if mylar.MODE_32P == 0:
return findit, 'None'
elif mylar.MODE_32P == 1 and searchmode == 'api':
return findit, 'None'
i+=1
return findit, 'None'
@ -1519,6 +1522,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
comicinfo.append({"ComicName": ComicName,
"IssueNumber": IssueNumber,
"IssueDate": IssueDate,
"comyear": cyear,
"pack": False,
"pack_numbers": None,
@ -1941,22 +1945,23 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
logger.fdebug("ComicName: " + ComicName)
logger.fdebug("Issue: " + str(IssueNumber))
logger.fdebug("Year: " + str(comyear))
logger.fdebug("IssueDate:" + str(IssueDate))
logger.fdebug("IssueDate: " + comicinfo[0]['IssueDate'])
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + IssueNumber + " using " + str(tmpprov))
logger.fdebug("link given by: " + str(nzbprov))
if mylar.FAILED_DOWNLOAD_HANDLING:
if nzbid is not None:
try:
# only nzb providers will have a filen, try it and pass exception
if IssueID is None:
logger.fdebug('One-off mode was initiated - Failed Download handling for : ' + ComicName + ' #' + str(IssueNumber))
comicinfo = {"ComicName": ComicName,
"IssueNumber": IssueNumber}
return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov, oneoffinfo=comicinfo)
except:
pass
if all([nzbid is not None, IssueID is not None]):
# --- this causes any possible snatch to get marked as a Failed download when doing a one-off search...
#try:
# # only nzb providers will have a filen, try it and pass exception
# if IssueID is None:
# logger.fdebug('One-off mode was initiated - Failed Download handling for : ' + ComicName + ' #' + str(IssueNumber))
# comicinfo = {"ComicName": ComicName,
# "IssueNumber": IssueNumber}
# return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov, oneoffinfo=comicinfo)
#except:
# pass
call_the_fail = Failed.FailedProcessor(nzb_name=nzbname, id=nzbid, issueid=IssueID, comicid=ComicID, prov=tmpprov)
check_the_fail = call_the_fail.failed_check()
if check_the_fail == 'Failed':
@ -1964,7 +1969,9 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
return "downloadchk-fail"
elif check_the_fail == 'Good':
logger.fdebug('[FAILED_DOWNLOAD_CHECKER] This is not in the failed downloads list. Will continue with the download.')
else:
logger.fdebug('[FAILED_DOWNLOAD_CHECKER] Failed download checking is not available for one-off downloads atm. Fixed soon!')
if link and all([nzbprov != 'TPSE', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != '32P', nzbprov != 'Torznab']):
#generate nzbid here.

View File

@ -604,15 +604,17 @@ def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, pro
newValue = {'NZBName': NZBName}
if SARC:
logger.fdebug("Story Arc (SARC) detected as: " + str(SARC))
IssueID = 'S' + str(IssueArcID)
newValue['SARC'] = SARC
if IssueID is None or IssueID == 'None':
#if IssueID is None, it's a one-off download from the pull-list.
#give it a generic ID above the last one so it doesn't throw an error later.
logger.fdebug("Story Arc (SARC) detected as: " + str(SARC))
if mylar.HIGHCOUNT == 0:
IssueID = '900000'
mylar.HIGHCOUNT = 900000
IssueID = mylar.HIGHCOUNT
mylar.config_write()
else:
IssueID = int(mylar.HIGHCOUNT) + 1