mirror of
https://github.com/evilhero/mylar
synced 2025-03-15 08:18:44 +00:00
IMP: Tiered wanted list searching - anything older than 2 weeks will only by searched for using RSS and/or manual means (scheduled searches only), IMP: Tiered option viewable in Upcoming tab, FIX: Fixed API searching against WWT option (RSS is still broken however, as that is due to WWT), IMP: Removed cherrypy logging in some instances, IMP: Updated cfscrape library to most recent, FIX: Fixed problem with simliar titles being incorrectly post-processed due to multiple matches when manual post-processing, FIX: Fixed notifications not firing off on a successful manual post-process/folder monitor due to some previous changes, FIX: If grabbag_dir was not set, would not correctly set the location, FIX:(#2051) When using nzbget, if nzbToMylar is enabled along with CDH - warning will be issued within Mylar to indicate as such
This commit is contained in:
parent
9fe60b57af
commit
f7c1629679
18 changed files with 372 additions and 132 deletions
|
@ -21,7 +21,7 @@
|
|||
<div id="paddingheader">
|
||||
<h1 class="clearfix"><img src="interfaces/default/images/icon_gear.png" alt="settings"/>Settings</h1>
|
||||
</div>
|
||||
<form action="configUpdate" method="post" class="form" id="configUpdate">
|
||||
<form action="configUpdate" method="post" id="configUpdate">
|
||||
<div id="tabs">
|
||||
<ul>
|
||||
<li><a href="#tabs-1">Information</a></li>
|
||||
|
@ -1464,10 +1464,10 @@
|
|||
<input type="button" value="Save Changes" onclick="doAjaxCall('configUpdate',$(this),'tabs',true);return false;" data-success="Changes saved successfully">
|
||||
<div class="message">
|
||||
<p><span class="ui-icon ui-icon-info" style="float: left; margin-right: .3em;"></span>Web Interface changes require a restart to take effect</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</%def>
|
||||
|
||||
<%def name="javascriptIncludes()">
|
||||
|
|
|
@ -357,6 +357,19 @@ table.display tr.even.gradeF {
|
|||
background-color: #FF5858;
|
||||
}
|
||||
|
||||
table.display tr.odd.gradeT1 {
|
||||
background-color: #FFDDDD;
|
||||
}
|
||||
table.display tr.even.gradeT1 {
|
||||
background-color: #FFDDDD;
|
||||
}
|
||||
|
||||
table.display tr.odd.gradeT2 {
|
||||
background-color: #FFDDAA;
|
||||
}
|
||||
table.display tr.even.gradeT2 {
|
||||
background-color: #FFDDAA;
|
||||
}
|
||||
table.display tr.gradeL #status {
|
||||
background: url("../images/loader_black.gif") no-repeat scroll 15px center transparent;
|
||||
font-size: 11px;
|
||||
|
@ -373,6 +386,8 @@ table.display tr.gradeP td,
|
|||
table.display tr.gradeD td,
|
||||
table.display tr.gradeT td,
|
||||
table.display tr.gradeF td,
|
||||
table.display tr.gradeT1 td,
|
||||
table.display tr.gradeT2 td,
|
||||
table.display tr.gradeZ td {border-bottom: 1px solid #FFF;}
|
||||
table.display tr:last-child td {
|
||||
border-bottom: 1px solid #eee;
|
||||
|
@ -478,6 +493,23 @@ table.display_no_select tr.odd.gradeZ {
|
|||
table.display_no_select tr.even.gradeZ {
|
||||
background-color: white;
|
||||
}
|
||||
|
||||
table.display_no_select tr.odd.gradeT1 {
|
||||
background-color: #FFDDDD;
|
||||
}
|
||||
|
||||
table.display_no_select tr.even.gradeT1 {
|
||||
background-color: white;
|
||||
}
|
||||
|
||||
table.display_no_select tr.odd.gradeT2 {
|
||||
background-color: #FFDDAA;
|
||||
}
|
||||
|
||||
table.display_no_select tr.even.gradeT2 {
|
||||
background-color: white;
|
||||
}
|
||||
|
||||
table.display_no_select tr.gradeL #status {
|
||||
background: url("../images/loader_black.gif") no-repeat scroll 15px center transparent;
|
||||
font-size: 11px;
|
||||
|
@ -494,6 +526,8 @@ table.display_no_select tr.gradeP td,
|
|||
table.display_no_select tr.gradeD td,
|
||||
table.display_no_select tr.gradeT td,
|
||||
table.display_no_select tr.gradeF td,
|
||||
table.display_no_select tr.gradeT1 td,
|
||||
table.display_no_select tr.gradeT2 td,
|
||||
table.display_no_select tr.gradeZ td {border-bottom: 1px solid #FFF;}
|
||||
table.display_no_select tr:last-child td {
|
||||
border-bottom: 1px solid #eee;
|
||||
|
|
|
@ -1960,6 +1960,7 @@ DIV.progress-container > DIV
|
|||
}
|
||||
#upcoming_table th#type,
|
||||
#wanted_table th#type,
|
||||
#wanted_table th#tier,
|
||||
#searchresults_table th#score {
|
||||
min-width: 75px;
|
||||
text-align: center;
|
||||
|
@ -2010,6 +2011,7 @@ DIV.progress-container > DIV
|
|||
}
|
||||
#upcoming_table td#type,
|
||||
#wanted_table td#type,
|
||||
#wanted_table td#tier,
|
||||
#searchresults_table td#score {
|
||||
min-width: 75px;
|
||||
text-align: center;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
<div id="checkboxControls" style="float: right; vertical-align: middle; margin: 5px 3px 3px 3px;">
|
||||
<div style="padding-bottom: 5px;">
|
||||
<label for="Wanted" class="checkbox inline Wanted"><input type="checkbox" id="Wanted" checked="checked" /> Wanted: <b>${isCounts['Wanted']}</b></label>
|
||||
<label for="WantedTier" class="checkbox inline WantedTier">Tiered: <b>${isCounts['WantedTier']}</b></label>
|
||||
%if mylar.CONFIG.UPCOMING_SNATCHED is True:
|
||||
%if int(isCounts['Snatched']) > 0:
|
||||
<label for="Snatched" class="checkbox inline Snatched"><input type="checkbox" id="Snatched" checked="checked" /> Snatched: <b>${isCounts['Snatched']}</b></label>
|
||||
|
@ -46,7 +47,7 @@
|
|||
</select>
|
||||
<input type="hidden" value="Go">
|
||||
</div>
|
||||
|
||||
<small style="float: right; vertical-align: middle;">Date shown in SearchTier is when the issue was added to the Wanted list</small>
|
||||
<table class="display" id="wanted_table">
|
||||
<thead>
|
||||
<tr>
|
||||
|
@ -54,6 +55,7 @@
|
|||
<th id="comicname">Comic</th>
|
||||
<th id="issuenumber">Issue</th>
|
||||
<th id="reldate">Release Date</th>
|
||||
<th id="tier">SearchTier</th>
|
||||
<th id="options">Options</th>
|
||||
</tr>
|
||||
</thead>
|
||||
|
@ -62,12 +64,25 @@
|
|||
<%
|
||||
if issue['Status'] == 'Wanted':
|
||||
grade = 'X'
|
||||
|
||||
try:
|
||||
if issue['DateAdded'] <= mylar.SEARCH_TIER_DATE:
|
||||
tier = "2nd"
|
||||
grade = 'T2'
|
||||
else:
|
||||
tier = "1st [%s]" % issue['DateAdded']
|
||||
grade = 'X'
|
||||
except:
|
||||
tier = "1st [%s]" % issue['DateAdded']
|
||||
grade = 'T2'
|
||||
|
||||
elif issue['Status'] == 'Snatched':
|
||||
grade = 'C'
|
||||
elif issue['Status'] == 'Failed':
|
||||
grade = 'F'
|
||||
else:
|
||||
grade = 'Z'
|
||||
|
||||
%>
|
||||
|
||||
<tr class="${issue['Status']} grade${grade}">
|
||||
|
@ -96,6 +111,15 @@
|
|||
else:
|
||||
adjcomicname = issue['ComicName']
|
||||
endif
|
||||
|
||||
try:
|
||||
if issue['DateAdded'] <= mylar.SEARCH_TIER_DATE:
|
||||
tier = "2nd"
|
||||
else:
|
||||
tier = "1st [%s]" % issue['DateAdded']
|
||||
except:
|
||||
tier = "1st [%s]" % issue['DateAdded']
|
||||
|
||||
%>
|
||||
<td id="select"><input type="checkbox" name="${issueid}" class="checkbox" value="${issueid}"/></td>
|
||||
<td id="comicname">
|
||||
|
@ -107,6 +131,11 @@
|
|||
</td>
|
||||
<td id="issuenumber">${issuenumber}</td>
|
||||
<td id="reldate">${issue['IssueDate']}</td>
|
||||
%if issue['Status'] == 'Wanted':
|
||||
<td id="tier" style="text-align:center;">${tier}</td>
|
||||
%else:
|
||||
<td id="tier"></td>
|
||||
%endif
|
||||
<td id="options">
|
||||
<!--
|
||||
<a class="menu_link_edit" id="choose_specific_download" title="Choose Specific Download" href="javascript:void(0)" onclick="getAvailableDownloads('${issueid}')"><i class="fa fa-search"></i><img src="interfaces/default/images/magnifier.png" height="25" width="25" class="highqual" /></a>
|
||||
|
@ -131,7 +160,6 @@
|
|||
%endfor
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</form>
|
||||
</div>
|
||||
|
||||
|
|
21
lib/cfscrape/LICENSE
Normal file
21
lib/cfscrape/LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Anorov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,55 +1,80 @@
|
|||
from time import sleep
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
from requests.sessions import Session
|
||||
import js2py
|
||||
import subprocess
|
||||
from copy import deepcopy
|
||||
from time import sleep
|
||||
|
||||
from requests.sessions import Session
|
||||
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
__version__ = "1.9.5"
|
||||
|
||||
DEFAULT_USER_AGENTS = [
|
||||
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
|
||||
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
|
||||
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0"
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/65.0.3325.181 Chrome/65.0.3325.181 Safari/537.36",
|
||||
"Mozilla/5.0 (Linux; Android 7.0; Moto G (5) Build/NPPS25.137-93-8) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36",
|
||||
"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53",
|
||||
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0",
|
||||
"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
|
||||
]
|
||||
|
||||
DEFAULT_USER_AGENT = random.choice(DEFAULT_USER_AGENTS)
|
||||
|
||||
BUG_REPORT = """\
|
||||
Cloudflare may have changed their technique, or there may be a bug in the script.
|
||||
|
||||
Please read https://github.com/Anorov/cloudflare-scrape#updates, then file a \
|
||||
bug report at https://github.com/Anorov/cloudflare-scrape/issues."\
|
||||
"""
|
||||
|
||||
ANSWER_ACCEPT_ERROR = """\
|
||||
The challenge answer was not properly accepted by Cloudflare. This can occur if \
|
||||
the target website is under heavy load, or if Cloudflare is experiencing issues. You can
|
||||
potentially resolve this by increasing the challenge answer delay (default: 8 seconds). \
|
||||
For example: cfscrape.create_scraper(delay=15)
|
||||
|
||||
If increasing the delay does not help, please open a GitHub issue at \
|
||||
https://github.com/Anorov/cloudflare-scrape/issues\
|
||||
"""
|
||||
|
||||
class CloudflareScraper(Session):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.delay = kwargs.pop("delay", 8)
|
||||
super(CloudflareScraper, self).__init__(*args, **kwargs)
|
||||
|
||||
if "requests" in self.headers["User-Agent"]:
|
||||
# Spoof Firefox on Linux if no custom User-Agent has been set
|
||||
# Set a random User-Agent if no custom User-Agent has been set
|
||||
self.headers["User-Agent"] = DEFAULT_USER_AGENT
|
||||
|
||||
def is_cloudflare_challenge(self, resp):
|
||||
return (
|
||||
resp.status_code == 503
|
||||
and resp.headers.get("Server", "").startswith("cloudflare")
|
||||
and b"jschl_vc" in resp.content
|
||||
and b"jschl_answer" in resp.content
|
||||
)
|
||||
|
||||
def request(self, method, url, *args, **kwargs):
|
||||
resp = super(CloudflareScraper, self).request(method, url, *args, **kwargs)
|
||||
|
||||
# Check if Cloudflare anti-bot is on
|
||||
if ( resp.status_code == 503
|
||||
and resp.headers.get("Server") == "cloudflare-nginx"
|
||||
and b"jschl_vc" in resp.content
|
||||
and b"jschl_answer" in resp.content
|
||||
):
|
||||
return self.solve_cf_challenge(resp, **kwargs)
|
||||
if self.is_cloudflare_challenge(resp):
|
||||
resp = self.solve_cf_challenge(resp, **kwargs)
|
||||
|
||||
# Otherwise, no Cloudflare anti-bot detected
|
||||
return resp
|
||||
|
||||
def solve_cf_challenge(self, resp, **original_kwargs):
|
||||
sleep(5) # Cloudflare requires a delay before solving the challenge
|
||||
sleep(self.delay) # Cloudflare requires a delay before solving the challenge
|
||||
|
||||
body = resp.text
|
||||
parsed_url = urlparse(resp.url)
|
||||
domain = urlparse(resp.url).netloc
|
||||
domain = parsed_url.netloc
|
||||
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain)
|
||||
|
||||
cloudflare_kwargs = deepcopy(original_kwargs)
|
||||
|
@ -61,23 +86,15 @@ class CloudflareScraper(Session):
|
|||
params["jschl_vc"] = re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)
|
||||
params["pass"] = re.search(r'name="pass" value="(.+?)"', body).group(1)
|
||||
|
||||
# Extract the arithmetic operation
|
||||
js = self.extract_js(body)
|
||||
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
# Something is wrong with the page.
|
||||
# This may indicate Cloudflare has changed their anti-bot
|
||||
# technique. If you see this and are running the latest version,
|
||||
# please open a GitHub issue so I can update the code accordingly.
|
||||
logging.error("[!] Unable to parse Cloudflare anti-bots page. "
|
||||
"Try upgrading cloudflare-scrape, or submit a bug report "
|
||||
"if you are running the latest version. Please read "
|
||||
"https://github.com/Anorov/cloudflare-scrape#updates "
|
||||
"before submitting a bug report.")
|
||||
raise
|
||||
raise ValueError("Unable to parse Cloudflare anti-bots page: %s %s" % (e.message, BUG_REPORT))
|
||||
|
||||
# Safely evaluate the Javascript expression
|
||||
params["jschl_answer"] = str(int(js2py.eval_js(js)) + len(domain))
|
||||
# Solve the Javascript challenge
|
||||
params["jschl_answer"] = self.solve_challenge(body, domain)
|
||||
|
||||
# Requests transforms any request into a GET after a redirect,
|
||||
# so the redirect has to be handled manually here to allow for
|
||||
|
@ -85,26 +102,58 @@ class CloudflareScraper(Session):
|
|||
method = resp.request.method
|
||||
cloudflare_kwargs["allow_redirects"] = False
|
||||
redirect = self.request(method, submit_url, **cloudflare_kwargs)
|
||||
|
||||
redirect_location = urlparse(redirect.headers["Location"])
|
||||
if not redirect_location.netloc:
|
||||
redirect_url = "%s://%s%s" % (parsed_url.scheme, domain, redirect_location.path)
|
||||
return self.request(method, redirect_url, **original_kwargs)
|
||||
return self.request(method, redirect.headers["Location"], **original_kwargs)
|
||||
|
||||
def extract_js(self, body):
|
||||
js = re.search(r"setTimeout\(function\(\){\s+(var "
|
||||
def solve_challenge(self, body, domain):
|
||||
try:
|
||||
js = re.search(r"setTimeout\(function\(\){\s+(var "
|
||||
"s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n", body).group(1)
|
||||
js = re.sub(r"a\.value = (parseInt\(.+?\)).+", r"\1", js)
|
||||
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js)
|
||||
except Exception:
|
||||
raise ValueError("Unable to identify Cloudflare IUAM Javascript on website. %s" % BUG_REPORT)
|
||||
|
||||
js = re.sub(r"a\.value = (.+ \+ t\.length).+", r"\1", js)
|
||||
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js).replace("t.length", str(len(domain)))
|
||||
|
||||
# Strip characters that could be used to exit the string context
|
||||
# These characters are not currently used in Cloudflare's arithmetic snippet
|
||||
js = re.sub(r"[\n\\']", "", js)
|
||||
|
||||
return js
|
||||
if "toFixed" not in js:
|
||||
raise ValueError("Error parsing Cloudflare IUAM Javascript challenge. %s" % BUG_REPORT)
|
||||
|
||||
# Use vm.runInNewContext to safely evaluate code
|
||||
# The sandboxed code cannot use the Node.js standard library
|
||||
js = "console.log(require('vm').runInNewContext('%s', Object.create(null), {timeout: 5000}));" % js
|
||||
|
||||
try:
|
||||
result = subprocess.check_output(["node", "-e", js]).strip()
|
||||
except OSError as e:
|
||||
if e.errno == 2:
|
||||
raise EnvironmentError("Missing Node.js runtime. Node is required and must be in the PATH (check with `node -v`). Your Node binary may be called `nodejs` rather than `node`, in which case you may need to run `apt-get install nodejs-legacy` on some Debian-based systems. (Please read the cfscrape"
|
||||
" README's Dependencies section: https://github.com/Anorov/cloudflare-scrape#dependencies.")
|
||||
raise
|
||||
except Exception:
|
||||
logging.error("Error executing Cloudflare IUAM Javascript. %s" % BUG_REPORT)
|
||||
raise
|
||||
|
||||
try:
|
||||
float(result)
|
||||
except Exception:
|
||||
raise ValueError("Cloudflare IUAM challenge returned unexpected answer. %s" % BUG_REPORT)
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def create_scraper(cls, sess=None, **kwargs):
|
||||
"""
|
||||
Convenience function for creating a ready-to-go requests.Session (subclass) object.
|
||||
Convenience function for creating a ready-to-go CloudflareScraper object.
|
||||
"""
|
||||
scraper = cls()
|
||||
scraper = cls(**kwargs)
|
||||
|
||||
if sess:
|
||||
attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"]
|
||||
|
@ -125,7 +174,7 @@ class CloudflareScraper(Session):
|
|||
scraper.headers["User-Agent"] = user_agent
|
||||
|
||||
try:
|
||||
resp = scraper.get(url)
|
||||
resp = scraper.get(url, **kwargs)
|
||||
resp.raise_for_status()
|
||||
except Exception as e:
|
||||
logging.error("'%s' returned an error. Could not collect tokens." % url)
|
||||
|
@ -153,9 +202,9 @@ class CloudflareScraper(Session):
|
|||
"""
|
||||
Convenience function for building a Cookie HTTP header value.
|
||||
"""
|
||||
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent)
|
||||
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent, **kwargs)
|
||||
return "; ".join("=".join(pair) for pair in tokens.items()), user_agent
|
||||
|
||||
create_scraper = CloudflareScraper.create_scraper
|
||||
get_tokens = CloudflareScraper.get_tokens
|
||||
get_cookie_string = CloudflareScraper.get_cookie_string
|
||||
get_cookie_string = CloudflareScraper.get_cookie_string
|
||||
|
|
|
@ -694,28 +694,46 @@ class PostProcessor(object):
|
|||
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
|
||||
continue
|
||||
|
||||
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
|
||||
self.matched = True
|
||||
continue #break
|
||||
|
||||
|
||||
mlp = []
|
||||
|
||||
xmld = filechecker.FileChecker()
|
||||
#mod_seriesname = as_dinfo['mod_seriesname']
|
||||
for x in manual_list:
|
||||
xmld1 = xmld.dynamic_replace(helpers.conversion(x['ComicName']))
|
||||
xmld = filechecker.FileChecker()
|
||||
xmld1 = xmld.dynamic_replace(helpers.conversion(cs['ComicName']))
|
||||
xseries = xmld1['mod_seriesname'].lower()
|
||||
xmld2 = xmld.dynamic_replace(helpers.conversion(x['Series']))
|
||||
xmld2 = xmld.dynamic_replace(helpers.conversion(watchmatch['series_name']))
|
||||
xfile = xmld2['mod_seriesname'].lower()
|
||||
if re.sub('\|', '', xseries).strip() == re.sub('\|', '', xfile).strip():
|
||||
#logger.fdebug(module + '[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (x['ComicName'], x['ComicID']))
|
||||
mlp.append(x)
|
||||
|
||||
if re.sub('\|', '', xseries) == re.sub('\|', '', xfile):
|
||||
logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, watchmatch['series_name'], cs['ComicID']))
|
||||
self.matched = True
|
||||
else:
|
||||
pass
|
||||
if len(manual_list) == 1 and len(mlp) == 1:
|
||||
manual_list = mlp
|
||||
#logger.fdebug(module + '[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series')
|
||||
continue #break
|
||||
|
||||
if datematch == 'True':
|
||||
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
|
||||
break
|
||||
#mlp = []
|
||||
|
||||
#xmld = filechecker.FileChecker()
|
||||
#if len(manual_list) > 1:
|
||||
# #in case the manual pp matches on more than one series in the watchlist, drop back down to exact name matching to see if we can narrow
|
||||
# #the matches down further to the point where there's only one exact match. Not being able to match specifically when there is more than
|
||||
# #one item in the manual list that's matched to the same file will result in a dupe_src error and/or mistakingly PP'ing against the
|
||||
# #wrong series.
|
||||
# for x in manual_list:
|
||||
# xmld1 = xmld.dynamic_replace(helpers.conversion(x['ComicName']))
|
||||
# xseries = xmld1['mod_seriesname'].lower()
|
||||
# xmld2 = xmld.dynamic_replace(helpers.conversion(x['Series']))
|
||||
# xfile = xmld2['mod_seriesname'].lower()
|
||||
# #logger.info('[xseries:%s][xfile:%s]' % (xseries,xfile))
|
||||
# if re.sub('\|', '', xseries).strip() == re.sub('\|', '', xfile).strip():
|
||||
# logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, x['ComicName'], x['ComicID']))
|
||||
# mlp.append(x)
|
||||
# else:
|
||||
# pass
|
||||
# if len(mlp) == 1:
|
||||
# manual_list = mlp
|
||||
# logger.fdebug('%s[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series' % module)
|
||||
# else:
|
||||
# logger.warn('%s[CONFIRMATION-PROBLEM] Unable to determine proper match for series as more than one successful match came up.' % module)
|
||||
|
||||
|
||||
#we should setup for manual post-processing of story-arc issues here
|
||||
#we can also search by ComicID to just grab those particular arcs as an alternative as well (not done)
|
||||
|
@ -918,7 +936,7 @@ class PostProcessor(object):
|
|||
logger.fdebug(module + '[ONEOFF-SELECTION][self.nzb_name: %s]' % self.nzb_name)
|
||||
oneoffvals = []
|
||||
for ofl in oneofflist:
|
||||
logger.info('[ONEOFF-SELECTION] ofl: %s' % ofl)
|
||||
#logger.info('[ONEOFF-SELECTION] ofl: %s' % ofl)
|
||||
oneoffvals.append({"ComicName": ofl['ComicName'],
|
||||
"ComicPublisher": ofl['PUBLISHER'],
|
||||
"Issue_Number": ofl['Issue_Number'],
|
||||
|
@ -936,7 +954,7 @@ class PostProcessor(object):
|
|||
#this seems redundant to scan in all over again...
|
||||
#for fl in filelist['comiclist']:
|
||||
for ofv in oneoffvals:
|
||||
logger.info('[ONEOFF-SELECTION] ofv: %s' % ofv)
|
||||
#logger.info('[ONEOFF-SELECTION] ofv: %s' % ofv)
|
||||
wm = filechecker.FileChecker(watchcomic=ofv['ComicName'], Publisher=ofv['ComicPublisher'], AlternateSearch=None, manual=ofv['WatchValues'])
|
||||
#if fl['sub'] is not None:
|
||||
# pathtofile = os.path.join(fl['comiclocation'], fl['sub'], fl['comicfilename'])
|
||||
|
@ -1388,12 +1406,14 @@ class PostProcessor(object):
|
|||
# this has no issueID, therefore it's a one-off or a manual post-proc.
|
||||
# At this point, let's just drop it into the Comic Location folder and forget about it..
|
||||
if sandwich is not None and 'S' in sandwich:
|
||||
self._log("One-off STORYARC mode enabled for Post-Processing for " + sarc)
|
||||
logger.info(module + ' One-off STORYARC mode enabled for Post-Processing for ' + sarc)
|
||||
self._log("One-off STORYARC mode enabled for Post-Processing for %s" % sarc)
|
||||
logger.info('%s One-off STORYARC mode enabled for Post-Processing for %s' % (module, sarc))
|
||||
else:
|
||||
self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.")
|
||||
logger.info(module + ' One-off mode enabled for Post-Processing. Will move into Grab-bag directory.')
|
||||
self._log("Grab-Bag Directory set to : " + mylar.CONFIG.GRABBAG_DIR)
|
||||
if mylar.CONFIG.GRABBAG_DIR is None:
|
||||
mylar.CONFIG.GRABBAG_DIR = os.path.join(mylar.CONFIG.DESTINATION_DIR, 'Grabbag')
|
||||
logger.info('%s One-off mode enabled for Post-Processing. Will move into Grab-bag directory: %s' % (module, mylar.CONFIG.GRABBAG_DIR))
|
||||
self._log("Grab-Bag Directory set to : %s" % mylar.CONFIG.GRABBAG_DIR)
|
||||
grdst = mylar.CONFIG.GRABBAG_DIR
|
||||
|
||||
odir = location
|
||||
|
@ -2358,22 +2378,22 @@ class PostProcessor(object):
|
|||
seriesmetadata['seriesmeta'] = seriesmeta
|
||||
self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata)
|
||||
|
||||
if ml is not None:
|
||||
#we only need to return self.log if it's a manual run and it's not a snatched torrent
|
||||
#manual run + not snatched torrent (or normal manual-run)
|
||||
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
|
||||
self._log(u"Post Processing SUCCESSFUL! ")
|
||||
self.valreturn.append({"self.log": self.log,
|
||||
"mode": 'stop',
|
||||
"issueid": issueid,
|
||||
"comicid": comicid})
|
||||
if self.apicall is True:
|
||||
self.sendnotify(series, issueyear, dispiss, annchk, module)
|
||||
return self.queue.put(self.valreturn)
|
||||
#if ml is not None:
|
||||
# #we only need to return self.log if it's a manual run and it's not a snatched torrent
|
||||
# #manual run + not snatched torrent (or normal manual-run)
|
||||
# logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
|
||||
# self._log(u"Post Processing SUCCESSFUL! ")
|
||||
# self.valreturn.append({"self.log": self.log,
|
||||
# "mode": 'stop',
|
||||
# "issueid": issueid,
|
||||
# "comicid": comicid})
|
||||
# #if self.apicall is True:
|
||||
# self.sendnotify(series, issueyear, dispiss, annchk, module)
|
||||
# return self.queue.put(self.valreturn)
|
||||
|
||||
self.sendnotify(series, issueyear, dispiss, annchk, module)
|
||||
|
||||
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
|
||||
logger.info('%s Post-Processing completed for: %s %s' % (module, series, dispiss))
|
||||
self._log(u"Post Processing SUCCESSFUL! ")
|
||||
|
||||
self.valreturn.append({"self.log": self.log,
|
||||
|
|
|
@ -22,6 +22,7 @@ import os, sys, subprocess
|
|||
|
||||
import threading
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
import webbrowser
|
||||
import sqlite3
|
||||
import itertools
|
||||
|
@ -104,6 +105,7 @@ CV_HEADERS = None
|
|||
CVURL = None
|
||||
DEMURL = None
|
||||
WWTURL = None
|
||||
WWT_CF_COOKIEVALUE = None
|
||||
KEYS_32P = None
|
||||
AUTHKEY_32P = None
|
||||
FEED_32P = None
|
||||
|
@ -125,6 +127,7 @@ SNATCHED_QUEUE = Queue.Queue()
|
|||
NZB_QUEUE = Queue.Queue()
|
||||
PP_QUEUE = Queue.Queue()
|
||||
SEARCH_QUEUE = Queue.Queue()
|
||||
SEARCH_TIER_DATE = None
|
||||
COMICSORT = None
|
||||
PULLBYFILE = None
|
||||
CFG = None
|
||||
|
@ -160,11 +163,11 @@ def initialize(config_file):
|
|||
|
||||
global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \
|
||||
DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
|
||||
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, \
|
||||
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \
|
||||
USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \
|
||||
PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \
|
||||
MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, LOG_LEVEL, \
|
||||
SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO
|
||||
SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO, SEARCH_TIER_DATE
|
||||
|
||||
cc = mylar.config.Config(config_file)
|
||||
CONFIG = cc.read(startup=True)
|
||||
|
@ -229,6 +232,13 @@ def initialize(config_file):
|
|||
CURRENT_WEEKNUMBER = todaydate.strftime("%U")
|
||||
CURRENT_YEAR = todaydate.strftime("%Y")
|
||||
|
||||
if SEARCH_TIER_DATE is None:
|
||||
#tier the wanted listed so anything older than 14 days won't trigger the API during searches.
|
||||
#utc_date = datetime.datetime.utcnow()
|
||||
STD = todaydate - timedelta(days = 14)
|
||||
SEARCH_TIER_DATE = STD.strftime('%Y-%m-%d')
|
||||
logger.fdebug('SEARCH_TIER_DATE set to : %s' % SEARCH_TIER_DATE)
|
||||
|
||||
#set the default URL for ComicVine API here.
|
||||
CVURL = 'https://comicvine.gamespot.com/api/'
|
||||
|
||||
|
@ -472,7 +482,7 @@ def dbcheck():
|
|||
c.execute('SELECT ReleaseDate from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
try:
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT)')
|
||||
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
|
||||
c.execute('DROP TABLE readinglist')
|
||||
except sqlite3.OperationalError:
|
||||
|
@ -486,7 +496,7 @@ def dbcheck():
|
|||
c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT, Volume TEXT, IssueNumber TEXT, DynamicName TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT, StatusChange TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT, DateAdded TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)')
|
||||
|
@ -651,7 +661,6 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE issues ADD COLUMN ImageURL_ALT TEXT')
|
||||
|
||||
|
||||
## -- ImportResults Table --
|
||||
|
||||
try:
|
||||
|
@ -829,6 +838,7 @@ def dbcheck():
|
|||
c.execute('SELECT OneOff from nzblog')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE nzblog ADD COLUMN OneOff TEXT')
|
||||
|
||||
## -- Annuals Table --
|
||||
|
||||
try:
|
||||
|
@ -877,6 +887,10 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE annuals ADD COLUMN IssueDate_Edit TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DateAdded from annuals')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE annuals ADD COLUMN DateAdded TEXT')
|
||||
|
||||
## -- Snatched Table --
|
||||
|
||||
|
@ -965,6 +979,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN Manual TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DateAdded from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN DateAdded TEXT')
|
||||
|
||||
## -- searchresults Table --
|
||||
try:
|
||||
c.execute('SELECT SRID from searchresults')
|
||||
|
|
|
@ -104,7 +104,7 @@ class info32p(object):
|
|||
all_script2 = soup.find_all("link", {"rel": "alternate"})
|
||||
|
||||
authfound = False
|
||||
logger.info('%s Atttempting to integrate with all of your 32P Notification feeds.' % self.module)
|
||||
logger.info('%s Attempting to integrate with all of your 32P Notification feeds.' % self.module)
|
||||
|
||||
#get inkdrop count ...
|
||||
#user_info = soup.find_all(attrs={"class": "stat"})
|
||||
|
|
|
@ -274,6 +274,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
|
|||
'MINSEEDS': (int, 'Torrents', 0),
|
||||
'ALLOW_PACKS': (bool, 'Torrents', False),
|
||||
'ENABLE_PUBLIC': (bool, 'Torrents', False),
|
||||
'PUBLIC_VERIFY': (bool, 'Torrents', True),
|
||||
|
||||
'AUTO_SNATCH': (bool, 'AutoSnatch', False),
|
||||
'AUTO_SNATCH_SCRIPT': (str, 'AutoSnatch', None),
|
||||
|
@ -761,6 +762,10 @@ class Config(object):
|
|||
except OSError:
|
||||
logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR)
|
||||
|
||||
if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]):
|
||||
self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag')
|
||||
logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR)
|
||||
|
||||
## Sanity checking
|
||||
if any([self.COMICVINE_API is None, self.COMICVINE_API == 'None', self.COMICVINE_API == '']):
|
||||
logger.error('No User Comicvine API key specified. I will not work very well due to api limits - http://api.comicvine.com/ and get your own free key.')
|
||||
|
|
|
@ -74,6 +74,7 @@ if not LOG_LANG.startswith('en'):
|
|||
logging.getLogger('apscheduler.threadpool').setLevel(logging.WARN)
|
||||
logging.getLogger('apscheduler.scheduler').propagate = False
|
||||
logging.getLogger('apscheduler.threadpool').propagate = False
|
||||
logging.getLogger('cherrypy').propagate = False
|
||||
lg = logging.getLogger('mylar')
|
||||
lg.setLevel(logging.DEBUG)
|
||||
|
||||
|
@ -238,7 +239,7 @@ else:
|
|||
logging.getLogger('apscheduler.threadpool').setLevel(logging.WARN)
|
||||
logging.getLogger('apscheduler.scheduler').propagate = False
|
||||
logging.getLogger('apscheduler.threadpool').propagate = False
|
||||
|
||||
logging.getLogger('cherrypy').propagate = False
|
||||
|
||||
# Close and remove old handlers. This is required to reinit the loggers
|
||||
# at runtime
|
||||
|
|
|
@ -54,7 +54,7 @@ def pullsearch(comicapi, comicquery, offset, type):
|
|||
filterline+= ',name:%s' % x
|
||||
cnt+=1
|
||||
|
||||
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + filterline + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description,first_issue,last_issue&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
|
||||
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + filterline + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description,first_issue,last_issue&format=xml&sort=date_last_updated:desc&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
|
||||
|
||||
#all these imports are standard on most modern python implementations
|
||||
#logger.info('MB.PULLURL:' + PULLURL)
|
||||
|
|
|
@ -107,6 +107,7 @@ class NZBGet(object):
|
|||
|
||||
stat = False
|
||||
double_pp = False
|
||||
double_type = None
|
||||
while stat is False:
|
||||
time.sleep(10)
|
||||
queueinfo = self.server.listgroups()
|
||||
|
@ -117,22 +118,37 @@ class NZBGet(object):
|
|||
else:
|
||||
if 'comicrn' in queuedl[0]['PostInfoText'].lower():
|
||||
double_pp = True
|
||||
double_type = 'ComicRN'
|
||||
elif 'nzbtomylar' in queuedl[0]['PostInfoText'].lower():
|
||||
double_pp = True
|
||||
double_type = 'nzbToMylar'
|
||||
|
||||
if all([len(queuedl[0]['ScriptStatuses']) > 0, double_pp is False]):
|
||||
for x in queuedl[0]['ScriptStatuses']:
|
||||
if 'comicrn' in x['Name'].lower():
|
||||
double_pp = True
|
||||
double_type = 'ComicRN'
|
||||
break
|
||||
elif 'nzbtomylar' in x['Name'].lower():
|
||||
double_pp = True
|
||||
double_type = 'nzbToMylar'
|
||||
break
|
||||
|
||||
if all([len(queuedl[0]['Parameters']) > 0, double_pp is False]):
|
||||
for x in queuedl[0]['Parameters']:
|
||||
if all(['comicrn' in x['Name'].lower(), x['Value'] == 'yes']):
|
||||
double_pp = True
|
||||
double_type = 'ComicRN'
|
||||
break
|
||||
elif all(['nzbtomylar' in x['Name'].lower(), x['Value'] == 'yes']):
|
||||
double_pp = True
|
||||
double_type = 'nzbToMylar'
|
||||
break
|
||||
|
||||
|
||||
if double_pp is True:
|
||||
logger.warn('ComicRN has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.')
|
||||
logger.warn('Either disable Completed Download Handling for NZBGet within Mylar, or remove ComicRN from your category script in NZBGet.')
|
||||
logger.warn('%s has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.' % double_type)
|
||||
logger.warn('Either disable Completed Download Handling for NZBGet within Mylar, or remove %s from your category script in NZBGet.' % double_type)
|
||||
return {'status': 'double-pp', 'failed': False}
|
||||
|
||||
logger.fdebug('status: %s' % queuedl[0]['Status'])
|
||||
|
@ -152,7 +168,7 @@ class NZBGet(object):
|
|||
found = False
|
||||
destdir = None
|
||||
double_pp = False
|
||||
hq = [hs for hs in history if hs['NZBID'] == nzbid and ('SUCCESS' in hs['Status'] or 'COPY' in hs['Status'])]
|
||||
hq = [hs for hs in history if hs['NZBID'] == nzbid and ('SUCCESS' in hs['Status'] or ('COPY' in hs['Status'] and 'DELETED' not in hq[0]['Status']))]
|
||||
if len(hq) > 0:
|
||||
logger.fdebug('found matching completed item in history. Job has a status of %s' % hq[0]['Status'])
|
||||
if len(hq[0]['ScriptStatuses']) > 0:
|
||||
|
|
|
@ -114,6 +114,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
elif pickfeed == "999": #WWT rss feed
|
||||
feed = mylar.WWTURL + 'rss.php?cat=132,50'
|
||||
feedtype = ' from the New Releases RSS Feed from WorldWideTorrents'
|
||||
verify = bool(mylar.CONFIG.PUBLIC_VERIFY)
|
||||
elif int(pickfeed) >= 7 and feedinfo is not None:
|
||||
#personal 32P notification feeds.
|
||||
#get the info here
|
||||
|
@ -135,24 +136,35 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
elif pickfeed == '1' or pickfeed == '4' or int(pickfeed) > 7:
|
||||
picksite = '32P'
|
||||
|
||||
if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']):
|
||||
if all([pickfeed != '4', pickfeed != '3', pickfeed != '5']):
|
||||
payload = None
|
||||
|
||||
ddos_protection = round(random.uniform(0,15),2)
|
||||
time.sleep(ddos_protection)
|
||||
|
||||
logger.info('Now retrieving feed from %s [%s]' % (picksite,feed))
|
||||
try:
|
||||
headers = {'Accept-encoding': 'gzip',
|
||||
'User-Agent': mylar.CV_HEADERS['User-Agent']}
|
||||
cf_cookievalue = None
|
||||
scraper = cfscrape.create_scraper()
|
||||
if pickfeed == '2':
|
||||
cf_cookievalue, cf_user_agent = scraper.get_tokens(feed)
|
||||
headers = {'Accept-encoding': 'gzip',
|
||||
'User-Agent': cf_user_agent}
|
||||
if pickfeed == '999':
|
||||
if all([pickfeed == '999', mylar.WWT_CF_COOKIEVALUE is None]):
|
||||
try:
|
||||
cf_cookievalue, cf_user_agent = scraper.get_tokens(feed, user_agent=mylar.CV_HEADERS['User-Agent'])
|
||||
except Exception as e:
|
||||
logger.warn('[WWT-RSSFEED] Unable to retrieve RSS properly: %s' % e)
|
||||
lp+=1
|
||||
continue
|
||||
else:
|
||||
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
|
||||
cookievalue = cf_cookievalue
|
||||
elif pickfeed == '999':
|
||||
cookievalue = mylar.WWT_CF_COOKIEVALUE
|
||||
|
||||
if cf_cookievalue:
|
||||
r = scraper.get(feed, verify=verify, cookies=cf_cookievalue, headers=headers)
|
||||
r = scraper.get(feed, verify=verify, cookies=cookievalue, headers=headers)
|
||||
else:
|
||||
r = scraper.get(feed, verify=verify)
|
||||
r = scraper.get(feed, verify=verify, headers=headers)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
|
||||
lp+=1
|
||||
|
@ -188,12 +200,12 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
#DEMONOID SEARCH RESULT (parse)
|
||||
pass
|
||||
elif pickfeed == "999":
|
||||
try:
|
||||
feedme = feedparser.parse(feed)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
|
||||
lp+=1
|
||||
continue
|
||||
#try:
|
||||
# feedme = feedparser.parse(feed)
|
||||
#except Exception, e:
|
||||
# logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
|
||||
# lp+=1
|
||||
# continue
|
||||
|
||||
#WWT / FEED
|
||||
for entry in feedme.entries:
|
||||
|
@ -233,9 +245,11 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
tmpsz_end = tmp1 + 2
|
||||
tmpsz_st += 7
|
||||
else:
|
||||
tmpsz = tmpsz[:80] #limit it to the first 80 so it doesn't pick up alt covers mistakingly
|
||||
tmpsz_st = tmpsz.rfind('|')
|
||||
if tmpsz_st != -1:
|
||||
tmpsize = tmpsz[tmpsz_st:tmpsz_st+14]
|
||||
tmpsz_end = tmpsz.find('<br />', tmpsz_st)
|
||||
tmpsize = tmpsz[tmpsz_st:tmpsz_end] #st+14]
|
||||
if any(['GB' in tmpsize, 'MB' in tmpsize, 'KB' in tmpsize, 'TB' in tmpsize]):
|
||||
tmp1 = tmpsz.find('MB', tmpsz_st)
|
||||
if tmp1 == -1:
|
||||
|
@ -260,7 +274,6 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
elif 'TB' in tmpsz[tmpsz_st:tmpsz_end]:
|
||||
szform = 'TB'
|
||||
sz = 'T'
|
||||
|
||||
tsize = helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz))
|
||||
|
||||
#timestamp is in YYYY-MM-DDTHH:MM:SS+TZ :/
|
||||
|
@ -278,9 +291,10 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
feeddata.append({
|
||||
'site': picksite,
|
||||
'title': feedme.entries[i].title,
|
||||
'link': str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[2]),
|
||||
'link': str(re.sub('genid=', '', urlparse.urlparse(feedme.entries[i].link)[4]).strip()),
|
||||
#'link': str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[2]),
|
||||
'pubdate': pdate,
|
||||
'size': tsize,
|
||||
'size': tsize
|
||||
})
|
||||
|
||||
#32p / FEEDS
|
||||
|
@ -942,7 +956,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
wwt_referrer = 'http' + mylar.WWTURL[5:]
|
||||
|
||||
headers = {'Accept-encoding': 'gzip',
|
||||
'User-Agent': str(mylar.USER_AGENT),
|
||||
'User-Agent': mylar.CV_HEADERS['User-Agent'],
|
||||
'Referer': wwt_referrer}
|
||||
|
||||
logger.fdebug('Grabbing torrent [id:' + str(linkit) + '] from url:' + str(url))
|
||||
|
@ -978,8 +992,11 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
return "fail"
|
||||
try:
|
||||
scraper = cfscrape.create_scraper()
|
||||
if cf_cookievalue:
|
||||
r = scraper.get(url, params=payload, cookies=cf_cookievalue, verify=verify, stream=True, headers=headers)
|
||||
if site == 'WWT':
|
||||
if mylar.WWT_CF_COOKIEVALUE is None:
|
||||
cf_cookievalue, cf_user_agent = s.get_tokens(newurl, user_agent=mylar.CV_HEADERS['User-Agent'])
|
||||
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
|
||||
r = scraper.get(url, params=payload, cookies=mylar.WWT_CF_COOKIEVALUE, verify=verify, stream=True, headers=headers)
|
||||
else:
|
||||
r = scraper.get(url, params=payload, verify=verify, stream=True, headers=headers)
|
||||
#r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
|
||||
|
|
|
@ -1916,7 +1916,8 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
'SARC': None,
|
||||
'StoryArcID': None,
|
||||
'IssueArcID': None,
|
||||
'mode': 'want'
|
||||
'mode': 'want',
|
||||
'DateAdded': iss['DateAdded']
|
||||
})
|
||||
elif stloop == 2:
|
||||
if mylar.CONFIG.SEARCH_STORYARCS is True or rsscheck:
|
||||
|
@ -1934,7 +1935,8 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
'SARC': iss['StoryArc'],
|
||||
'StoryArcID': iss['StoryArcID'],
|
||||
'IssueArcID': iss['IssueArcID'],
|
||||
'mode': 'story_arc'
|
||||
'mode': 'story_arc',
|
||||
'DateAdded': iss['DateAdded']
|
||||
})
|
||||
cnt+=1
|
||||
logger.info('Storyarcs to be searched for : %s' % cnt)
|
||||
|
@ -1952,7 +1954,8 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
'SARC': None,
|
||||
'StoryArcID': None,
|
||||
'IssueArcID': None,
|
||||
'mode': 'want_ann'
|
||||
'mode': 'want_ann',
|
||||
'DateAdded': iss['DateAdded']
|
||||
})
|
||||
stloop-=1
|
||||
|
||||
|
@ -2020,7 +2023,22 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
else:
|
||||
ComicYear = str(result['IssueDate'])[:4]
|
||||
|
||||
if rsscheck is None:
|
||||
if result['DateAdded'] is None:
|
||||
DA = datetime.datetime.today()
|
||||
DateAdded = DA.strftime('%Y-%m-%d')
|
||||
if result['mode'] == 'want':
|
||||
table = 'issues'
|
||||
elif result['mode'] == 'want_ann':
|
||||
table = 'annuals'
|
||||
elif result['mode'] == 'story_arc':
|
||||
table = 'storyarcs'
|
||||
logger.fdebug('%s #%s did not have a DateAdded recorded, setting it : %s' % (comic['ComicName'], result['Issue_Number'], DateAdded))
|
||||
myDB.upsert(table, {'DateAdded': DateAdded}, {'IssueID': result['IssueID']})
|
||||
|
||||
else:
|
||||
DateAdded = result['DateAdded']
|
||||
|
||||
if rsscheck is None and DateAdded >= mylar.SEARCH_TIER_DATE:
|
||||
logger.info('adding: ComicID:%s IssueiD: %s' % (result['ComicID'], result['IssueID']))
|
||||
mylar.SEARCH_QUEUE.put({'comicname': comic['ComicName'], 'seriesyear': SeriesYear, 'issuenumber': result['Issue_Number'], 'issueid': result['IssueID'], 'comicid': result['ComicID']})
|
||||
continue
|
||||
|
|
|
@ -2042,6 +2042,7 @@ class WebInterface(object):
|
|||
isCounts[1] = 0 #1 wanted
|
||||
isCounts[2] = 0 #2 snatched
|
||||
isCounts[3] = 0 #3 failed
|
||||
isCounts[4] = 0 #3 wantedTier
|
||||
|
||||
ann_list = []
|
||||
|
||||
|
@ -2060,7 +2061,8 @@ class WebInterface(object):
|
|||
ann_list += annuals_list
|
||||
issues += annuals_list
|
||||
|
||||
issues_tmp = sorted(issues, key=itemgetter('ReleaseDate'), reverse=True)
|
||||
issues_tmp1 = sorted(issues, key=itemgetter('DateAdded'), reverse=True)
|
||||
issues_tmp = sorted(issues_tmp1, key=itemgetter('ReleaseDate'), reverse=True)
|
||||
issues = sorted(issues_tmp, key=itemgetter('Status'), reverse=True)
|
||||
|
||||
for curResult in issues:
|
||||
|
@ -2070,17 +2072,21 @@ class WebInterface(object):
|
|||
continue
|
||||
else:
|
||||
if seas in curResult['Status'].lower():
|
||||
sconv = baseissues[seas]
|
||||
isCounts[sconv]+=1
|
||||
if all([curResult['DateAdded'] <= mylar.SEARCH_TIER_DATE, curResult['Status'] == 'Wanted']):
|
||||
isCounts[4]+=1
|
||||
else:
|
||||
sconv = baseissues[seas]
|
||||
isCounts[sconv]+=1
|
||||
continue
|
||||
|
||||
isCounts = {"Wanted": str(isCounts[1]),
|
||||
"Snatched": str(isCounts[2]),
|
||||
"Failed": str(isCounts[3]),
|
||||
"StoryArcs": str(len(arcs))}
|
||||
"StoryArcs": str(len(arcs)),
|
||||
"WantedTier": str(isCounts[4])}
|
||||
|
||||
iss_cnt = int(isCounts['Wanted'])
|
||||
wantedcount = iss_cnt # + ann_cnt
|
||||
wantedcount = iss_cnt + int(isCounts['WantedTier']) # + ann_cnt
|
||||
|
||||
#let's straightload the series that have no issue data associated as of yet (ie. new series) from the futurepulllist
|
||||
future_nodata_upcoming = myDB.select("SELECT * FROM futureupcoming WHERE IssueNumber='1' OR IssueNumber='0'")
|
||||
|
|
|
@ -962,10 +962,10 @@ def new_pullcheck(weeknumber, pullyear, comic1off_name=None, comic1off_id=None,
|
|||
annualidmatch = [x for x in weeklylist if week['annuallink'] is not None and (int(x['ComicID']) == int(week['annuallink']))]
|
||||
#The above will auto-match against ComicID if it's populated on the pullsite, otherwise do name-matching.
|
||||
namematch = [ab for ab in weeklylist if ab['DynamicName'] == week['dynamicname']]
|
||||
logger.fdebug('rowid: ' + str(week['rowid']))
|
||||
logger.fdebug('idmatch: ' + str(idmatch))
|
||||
logger.fdebug('annualidmatch: ' + str(annualidmatch))
|
||||
logger.fdebug('namematch: ' + str(namematch))
|
||||
#logger.fdebug('rowid: ' + str(week['rowid']))
|
||||
#logger.fdebug('idmatch: ' + str(idmatch))
|
||||
#logger.fdebug('annualidmatch: ' + str(annualidmatch))
|
||||
#logger.fdebug('namematch: ' + str(namematch))
|
||||
if any([idmatch,namematch,annualidmatch]):
|
||||
if idmatch and not annualidmatch:
|
||||
comicname = idmatch[0]['ComicName'].strip()
|
||||
|
|
10
mylar/wwt.py
10
mylar/wwt.py
|
@ -22,7 +22,7 @@ import time
|
|||
import sys
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
import lib.cfscrape as cfscrape
|
||||
|
||||
import mylar
|
||||
from mylar import logger, helpers
|
||||
|
@ -43,9 +43,13 @@ class wwt(object):
|
|||
'incldead': 0,
|
||||
'lang': 0}
|
||||
|
||||
with requests.Session() as s:
|
||||
with cfscrape.create_scraper() as s:
|
||||
newurl = self.url + 'torrents-search.php'
|
||||
r = s.get(newurl, params=params, verify=True)
|
||||
if mylar.WWT_CF_COOKIEVALUE is None:
|
||||
cf_cookievalue, cf_user_agent = s.get_tokens(newurl, user_agent=mylar.CV_HEADERS['User-Agent'])
|
||||
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
|
||||
|
||||
r = s.get(newurl, params=params, verify=True, cookies=mylar.WWT_CF_COOKIEVALUE, headers=mylar.CV_HEADERS)
|
||||
|
||||
if not r.status_code == 200:
|
||||
return
|
||||
|
|
Loading…
Add table
Reference in a new issue