1
0
Fork 0
mirror of https://github.com/evilhero/mylar synced 2024-12-25 01:01:47 +00:00

Incorperate cfscraper with 32pages auth/torrent download

This commit is contained in:
Undeadhunter 2016-12-28 14:08:12 +01:00 committed by evilhero
parent 6101943540
commit 48933364bb
2 changed files with 38 additions and 17 deletions

View file

@ -53,7 +53,7 @@ class info32p(object):
feedinfo = []
try:
with requests.Session() as s:
with cfscrape.create_scraper() as s:
s.headers = self.headers
cj = LWPCookieJar(os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat"))
cj.load()
@ -72,9 +72,10 @@ class info32p(object):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# post to the login form
scraper = cfscrape.create_scraper()
r = scraper.post(self.url, verify=verify)
r = s.post(self.url, verify=verify, allow_redirects=True)
#logger.debug(self.module + " Content session reply" + r.text)
#need a way to find response code (200=OK), but returns 200 for everything even failed signons (returns a blank page)
#logger.info('[32P] response: ' + str(r.content))
@ -218,7 +219,7 @@ class info32p(object):
logger.warn('No results found for search on 32P.')
return "no results"
with requests.Session() as s:
with cfscrape.create_scraper() as s:
s.headers = self.headers
cj = LWPCookieJar(os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat"))
cj.load()
@ -232,8 +233,7 @@ class info32p(object):
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
scraper = cfscrape.create_scraper()
t = scraper.get(url, params=params, verify=True)
t = s.get(url, params=params, verify=True, allow_redirects=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
@ -306,15 +306,24 @@ class info32p(object):
logger.info('payload: ' + str(payload))
url = 'https://32pag.es/ajax.php'
scraper = cfscrape.create_scraper()
time.sleep(1) #just to make sure we don't hammer, 1s pause.
d = scraper.get(url, params=payload, verify=True)
try:
d = s.post(url, params=payload, verify=True, allow_redirects=True)
logger.debug(self.module + ' Reply from AJAX: \n %s', d.text)
except Exception as e:
logger.info(self.module + ' Could not POST URL %s', url)
try:
searchResults = d.json()
except:
searchResults = d.text
logger.info(searchResults)
logger.debug(self.module + ' Search Result did not return valid JSON, falling back on text: %s', searchResults.text)
return False
logger.debug(self.module + " Search Result: %s", searchResults)
if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
for a in searchResults['details']:
@ -349,7 +358,11 @@ class info32p(object):
'''
self.module = '[32P-AUTHENTICATION]'
self.ses = requests.Session()
try:
self.ses = cfscrape.create_scraper()
except Exception as e:
logger.error(self.module + " Can't create session with cfscrape")
self.session_path = session_path if session_path is not None else os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat")
self.ses.cookies = LWPCookieJar(self.session_path)
if not os.path.exists(self.session_path):
@ -451,8 +464,7 @@ class info32p(object):
u = 'https://32pag.es/login.php?ajax=1'
try:
scraper = cfscrape.create_scraper(self.ses)
r = scraper.post(u, data=postdata, timeout=60, allow_redirects=True)
r = self.ses.post(u, data=postdata, timeout=60, allow_redirects=True)
logger.debug(self.module + ' Status Code: ' + str(r.status_code))
except Exception as e:
logger.error(self.module + " Got an exception when trying to login to %s POST", u)

View file

@ -918,23 +918,32 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site):
#r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (site, e))
logger.warn('Error fetching data from %s (%s): %s' % (site, url, e))
if site == '32P':
logger.info('[TOR2CLIENT-32P] Retrying with 32P')
if mylar.MODE_32P == 1:
logger.info('Attempting to re-authenticate against 32P and poll new keys as required.')
logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.')
feed32p = auth32p.info32p(reauthenticate=True)
feedinfo = feed32p.authenticate()
if feedinfo == "disable":
mylar.ENABLE_32P = 0
mylar.config_write()
return "fail"
logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
scraper = cfscrape.create_scraper()
logger.debug('[TOR2CLIENT-32P] payload: %s \n verify %s \n headers %s \n', payload, verify, headers)
try:
r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
r = scraper.get(url, params=payload, verify=verify, allow_redirects=True)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (site, e))
logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e))
return "fail"
else:
logger.warn('[32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
return "fail"
else:
logger.info('blah: ' + str(r.status_code))