FIX:(#1466) When post-processing if filename did not have issue year or volume, would error, IMP:(#1418) Separate copy/move/hardlink/symlink option for Story arcs, IMP:(#1417) Added story arc format folder to story arc options - can now specify folder format for story arc naming, IMP:(#1374) Added rtorrent_authentication to config.ini allowing user to specify digest/basic authentication, as well as the rpc_url (rtorrent_rpc_url) and ssl (rtorrent_ssl). Rtorrent_host no longer requires protocol in field, IMP: Instead of hitting the intermediary/32P site to get the torrent group, will now retain specific id for torrent group to speed up searches, FIX: (#1472) Fixed some problems with series being searched against 32P which had special characters and would return no results, FIX: Added some specific exception catches when attempting to retrieve search results for nzb providers, FIX: Fixed publisher retaining problem for story arcs (note story arcs will have to be refreshed), FIX: Fixed a problem with the pull-list matching up to the correct series when the issue was an annual for a given series with the alt_pull 2 method (would create an invalid link on the Wanted tab), IMP:(#1470) Added config.ini option to specify folder-formatting for weekly folder numbering, 0=YYYY-WN, 1=YYYY-MM-DD, IMP: (#1471) Added quick check to ensure that newznab host entries do not contain leading/trailing spaces and will strip accordingly, FIX:(#1476) Annuals now have option to add to reading list via the series detail page, IMP: Fixed password fields to allow for special characters within password, IMP: Weekly pull will now correctly show status for previous/future weeks for watchlisted series, IMP: Download option on pullist will work for previous weeks, FIX: Fixed some issues with one-off and story arc post-processing using different file operations, FIX: Fixed error when performing an 'Force Update Active Comics' from the Manage tab (or the scheduled updating of all active comics), IMP: Storyarc issues that are not on watchlist will now retain value for volume to help when searching and file-checking/post-processing, FIX:(#1473) Changed pullist dates on pullist page to unicode to allow for non-English presentation

This commit is contained in:
evilhero 2016-12-02 12:45:49 -05:00
parent 8ad389fd8f
commit 021459c280
30 changed files with 1355 additions and 495 deletions

View File

@ -626,6 +626,8 @@
<a href="#" title="Archive" onclick="doAjaxCall('archiveissue?IssueID=${annual['IssueID']}',$(this),'table')"><img src="interfaces/default/images/archive_icon.png" height="25" width="25" title="Mark issue as Archived" class="highqual" /></a>
<a href="#" title="Add to Reading List"><img src="interfaces/default/images/glasses-icon.png" height="25" width="25" class="highqual" /></a>
-->
<a href="#" title="Add to Reading List" onclick="doAjaxCall('addtoreadlist?IssueID=${annual['IssueID']}',$(this),'table')" data-success="${aninfo['annualComicName']} #${annual['Issue_Number']} added to Reading List"><img src="interfaces/default/images/glasses-icon.png" height="25" width="25" class="highqual" /></a>
<a href="#" onclick="doAjaxCall('retryit?ComicName=${annual['ComicName'] |u}&ComicID=${annual['ComicID']}&IssueID=${annual['IssueID']}&IssueNumber=${annual['Issue_Number']}&ComicYear=${annual['IssueDate']}&ReleaseComicID=${annual['ReleaseComicID']}', $(this),'table')" data-success="Retrying the same version of '${annual['ComicName']}' '${annual['Issue_Number']}'" title="Retry the same download again"><img src="interfaces/default/images/retry_icon.png" height="25" width="25" class="highqual" /></a>
<a href="#" title="Mark annual as Skipped" onclick="doAjaxCall('unqueueissue?IssueID=${annual['IssueID']}&ComicID=${annual['ComicID']}',$(this),'table')" data-success="'${annual['Issue_Number']}' has been marked as skipped"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" class="highqual" /></a>

View File

@ -150,7 +150,7 @@
<div class="row">
<label>HTTP Password</label>
<input type="password" name="http_password" value="${config['http_pass']}" size="30">
<input type="password" name="http_password" value="${config['http_pass']| h}" size="30">
</div>
<div class="row checkbox">
<input type="checkbox" name="launch_browser" value="1" ${config['launch_browser']} /> <label>Launch Browser on Startup</label>
@ -283,7 +283,7 @@
</div>
<div class="row">
<label>SABnzbd Password:</label>
<input type="password" name="sab_password" value="${config['sab_pass']}" size="20">
<input type="password" name="sab_password" value="${config['sab_pass']| h}" size="20">
</div>
<div Class="row">
<div class="populatesab">
@ -349,7 +349,7 @@
</div>
<div class="row">
<label>NZBGet Password:</label>
<input type="password" name="nzbget_password" value="${config['nzbget_pass']}" size="20">
<input type="password" name="nzbget_password" value="${config['nzbget_pass']| h}" size="20">
</div>
<div class="row">
<label>NZBGet Download Directory</label>
@ -438,7 +438,7 @@
</div>
<div class="row">
<label>Seedbox Password</label>
<input type="password" name="seedbox_pass" value="${config['seedbox_pass']}" size="30">
<input type="password" name="seedbox_pass" value="${config['seedbox_pass']| h}" size="30">
</div>
<div class="row">
<label>Watch Directory</label>
@ -457,7 +457,7 @@
</div>
<div class="row">
<label>uTorrent Password</label>
<input type="password" name="utorrent_password" value="${config['utorrent_password']}" size="30">
<input type="password" name="utorrent_password" value="${config['utorrent_password']| h}" size="30">
</div>
<div class="row">
<label>uTorrent Label</label>
@ -466,11 +466,37 @@
</div>
</fieldset>
<fieldset id="rtorrent_options">
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Will *ONLY* work if rtorrent has RPC Support</small>
<div class="row">
<label>rTorrent Host:Port/RPC</label>
<label>rTorrent Host:port(optional)</label>
<input type="text" name="rtorrent_host" value="${config['rtorrent_host']}" size="30">
<small>ie. https://my.rtorrent:80/myuser/RPC1</small>
<small>ie. my.rtorrent:80, 192.168.1.1, scgi://localhost:5000</small>
</div>
<div class="row checkbox left clearfix">
<input id="rtorrent_ssl" type="checkbox" onclick="initConfigCheckbox($this));" name="rtorrent_ssl" value="1" ${config['rtorrent_ssl']} /><label>SSL</label>
</div>
<div class="config">
<div class="row checkbox left clearfix">
<input id="rtorrent_verify" type="checkbox" name="rtorrent_verify" value="1" ${config['rtorrent_verify']} /><label>Verify SSL</label>
</div>
</div>
<div class="row">
<label>rTorrent Authentication</label>
<select name="rtorrent_authenticaiton">
%for x in ['basic', 'digest']:
<%
if config['rtorrent_authentication'] == x:
outputselect = 'selected'
else:
outputselect = ''
%>
<option value=${x} ${outputselect}>${x}</option>
%endfor
</select>
</div>
<div class="row">
<label>rTorrent RPC</label>
<input type="text" name="rtorrent_rpc_url" value="${config['rtorrent_rpc_url']}" size="30">
<small>ie. httprpc plugin = rutorrent/plugins/httprpc/action.php<br>rpc plugin = user/RPC2</small>
</div>
<div class="row">
<label>rTorrent Username</label>
@ -478,10 +504,10 @@
</div>
<div class="row">
<label>rTorrent Password</label>
<input type="password" name="rtorrent_password" value="${config['rtorrent_password']}" size="30">
<input type="password" name="rtorrent_password" value="${config['rtorrent_password']| h}" size="30">
</div>
<div class="row">
<label>Watch Directory</label>
<label>rTorrent Directory</label>
<input type="text" name="rtorrent_directory" value="${config['rtorrent_directory']}" size="30"><br/>
<small>Folder path where torrent download will be assigned</small>
</div>
@ -506,7 +532,7 @@
</div>
<div class="row">
<label>Transmission Password</label>
<input type="password" name="transmission_password" value="${config['transmission_password']}" size="30">
<input type="password" name="transmission_password" value="${config['transmission_password']| h}" size="30">
</div>
<div class="row">
<label>Transmission Directory</label>
@ -624,7 +650,7 @@
</div>
<div class="row">
<label>&nbspPassword</label>
<input type="password" name="password_32p" value="${config['password_32p']}" size="36">
<input type="password" name="password_32p" value="${config['password_32p']| h}" size="36">
<small>( monitor the NEW releases feed & your personal notifications )</small>
</div>
<div align="center" class="row">
@ -1674,6 +1700,7 @@
initConfigCheckbox("#enforce_perms");
initConfigCheckbox("#enable_api");
initConfigCheckbox("#sab_to_mylar");
initConfigCheckbox("#rtorrent_ssl");
initConfigCheckbox("#usenewznab");
initConfigCheckbox("#enable_torznab");
initConfigCheckbox("#usenzbsu");

View File

@ -39,7 +39,7 @@
<form action="importReadlist" method="get">
<fieldset>
<input type="text" value="" runat="server" placeholder="Enter full path to .cbl file to import" onfocus="if
(this.value==this.defaultValue) this.value='';" name="filename" size="40" />
(this.value==this.defaultValue) this.value='';" name="filename" size="45" />
<input type="submit" value="Import">
</fieldset>
</form>
@ -48,12 +48,43 @@
<form action="arcOptions" id="chkoptions" method="GET">
<fieldset>
<legend>Options</legend>
<div class="row checkbox left clearfix">
<div class="row checkbox left clearfix">
<%
storyarcdest = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs')
%>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="storyarcdir" id="storyarcdir" value="1" ${checked(mylar.STORYARCDIR)} /><label>Arcs in StoryArc Directory </br><small>(${storyarcdest})</small></label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.READ2FILENAME)} /><label>Append Reading # to filename</label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="storyarcdir" id="storyarcdir" value="1" ${checked(mylar.STORYARCDIR)} /><label>Arcs in StoryArc Directory </br><small>(${storyarcdest})</small></label>
</div>
<div id="arc_options">
<div class="row">
<label>Arc Folder Format</label>
<input type="text" title="$publisher, $spanyears, $arc" name="arc_folderformat" value="${mylar.ARC_FOLDERFORMAT}" size="25">
</div>
</div>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.READ2FILENAME)} /><label>Append Reading # to filename</label></br>
<%
if mylar.STORYARCDIR:
carcdir = 'StoryArc'
else:
carcdir = 'GrabBag'
%>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.COPY2ARCDIR)} />
<select name="arc_fileops" id="arc_fileops">
<%
if 'windows' in mylar.OS_DETECT.lower():
optionlist = ['move', 'copy']
else:
optionlist = ['move', 'copy', 'hardlink', 'softlink']
%>
%for x in optionlist:
<%
if mylar.ARC_FILEOPS == x:
outputselect = 'selected'
else:
outputselect = ''
%>
<option value=${x} ${outputselect}>${x}</option>
%endfor
</select><label> watchlisted issues to ${carcdir} Directory</label>
</div>
</fieldset>
<div>
@ -132,6 +163,24 @@
<script>
function initThisPage() {
if ($("#storyarcdir").is(":checked"))
{
$("#arc_options").show();
}
else
{
$("#arc_options").hide();
}
$("#storyarcdir").click(function(){
if ($("#storyarcdir").is(":checked"))
{
$("#arc_options").slideDown();
}
else
{
$("#arc_options").slideUp();
}
});
$(function() {
$( "#tabs" ).tabs();
});

View File

@ -8,7 +8,7 @@
<%def name="headerIncludes()">
<div id="subhead_container">
<div id="subhead_menu">
<a id="menu_link_refresh" href="manualpull">Refresh Pull-list</a>
<a href="#" id="menu_link_refresh" onclick="doAjaxCall('pullist?week=${weekinfo['weeknumber']}&year=${weekinfo['year']}',$(this),'table')" data-success="Refresh submitted.">Refresh Pull-list</a>
<a id="menu_link_retry" href="pullrecreate">Recreate Pull-list</a>
<a id="menu_link_scan" class="button">Download</a>
</div>

View File

@ -22,13 +22,11 @@ import os.path
import time
import xmlrpclib
from rtorrent.common import find_torrent, \
is_valid_port, convert_version_tuple_to_str
from rtorrent.connection import Connection
from rtorrent.common import find_torrent, join_uri, \
update_uri, is_valid_port, convert_version_tuple_to_str
from rtorrent.lib.torrentparser import TorrentParser
from rtorrent.lib.xmlrpc.http import HTTPServerProxy
from rtorrent.lib.xmlrpc.scgi import SCGIServerProxy
from rtorrent.rpc import Method
from rtorrent.lib.xmlrpc.basic_auth import BasicAuthTransport
from rtorrent.torrent import Torrent
from rtorrent.group import Group
import rtorrent.rpc # @UnresolvedImport
@ -38,96 +36,28 @@ __author__ = "Chris Lucas"
__contact__ = "chris@chrisjlucas.com"
__license__ = "MIT"
MIN_RTORRENT_VERSION = (0, 8, 1)
MIN_RTORRENT_VERSION_STR = convert_version_tuple_to_str(MIN_RTORRENT_VERSION)
class RTorrent:
""" Create a new rTorrent connection """
rpc_prefix = None
def __init__(self, uri, username=None, password=None,
verify=False, sp=None, sp_kwargs=None):
self.uri = uri # : From X{__init__(self, url)}
self.username = username
self.password = password
self.schema = urllib.splittype(uri)[0]
if sp:
self.sp = sp
elif self.schema in ['http', 'https']:
self.sp = HTTPServerProxy
elif self.schema == 'scgi':
self.sp = SCGIServerProxy
else:
raise NotImplementedError()
self.sp_kwargs = sp_kwargs or {}
def __init__(self, uri, auth=None, verify_server=False, verify_ssl=True, sp=None, sp_kwargs=None):
self.connection = Connection(uri, auth, verify_ssl, sp, sp_kwargs)
self.torrents = [] # : List of L{Torrent} instances
self._rpc_methods = [] # : List of rTorrent RPC methods
self._torrent_cache = []
self._client_version_tuple = ()
if verify is True:
self._verify_conn()
self._torrent_cache = []
# Verify connection is valid
if verify_server is True:
self.connection.verify()
@property
def client(self):
return self.connection.client
def _get_conn(self):
"""Get ServerProxy instance"""
if self.username is not None and self.password is not None:
if self.schema == 'scgi':
raise NotImplementedError()
return self.sp(
self.uri,
transport=BasicAuthTransport(self.username, self.password),
**self.sp_kwargs
)
return self.sp(self.uri, **self.sp_kwargs)
def _verify_conn(self):
# check for rpc methods that should be available
assert "system.client_version" in self._get_rpc_methods(), "Required RPC method not available."
assert "system.library_version" in self._get_rpc_methods(), "Required RPC method not available."
# minimum rTorrent version check
assert self._meets_version_requirement() is True,\
"Error: Minimum rTorrent version required is {0}".format(
MIN_RTORRENT_VERSION_STR)
def _meets_version_requirement(self):
return self._get_client_version_tuple() >= MIN_RTORRENT_VERSION
def _get_client_version_tuple(self):
conn = self._get_conn()
if not self._client_version_tuple:
if not hasattr(self, "client_version"):
setattr(self, "client_version",
conn.system.client_version())
rtver = getattr(self, "client_version")
self._client_version_tuple = tuple([int(i) for i in
rtver.split(".")])
return self._client_version_tuple
def _update_rpc_methods(self):
self._rpc_methods = self._get_conn().system.listMethods()
return self._rpc_methods
def _get_rpc_methods(self):
""" Get list of raw RPC commands
@return: raw RPC commands
@rtype: list
"""
return(self._rpc_methods or self._update_rpc_methods())
return self.client
def get_torrents(self, view="main"):
"""Get list of all torrents in specified view
@ -199,7 +129,7 @@ class RTorrent:
return(func_name)
def load_torrent(self, torrent, start=False, verbose=False, verify_load=True):
def load_torrent(self, torrent, start=False, verbose=False, verify_load=True, verify_retries=3):
"""
Loads torrent into rTorrent (with various enhancements)
@ -244,9 +174,8 @@ class RTorrent:
getattr(p, func_name)(torrent)
if verify_load:
MAX_RETRIES = 3
i = 0
while i < MAX_RETRIES:
while i < verify_retries:
self.get_torrents()
if info_hash in [t.info_hash for t in self.torrents]:
break
@ -319,7 +248,7 @@ class RTorrent:
assert view is not None, "view parameter required on non-persistent groups"
p.group.insert('', name, view)
self._update_rpc_methods()
self.connection._update_rpc_methods()
def get_group(self, name):
assert name is not None, "group name required"
@ -402,8 +331,8 @@ def _build_class_methods(class_obj):
def __compare_rpc_methods(rt_new, rt_old):
from pprint import pprint
rt_new_methods = set(rt_new._get_rpc_methods())
rt_old_methods = set(rt_old._get_rpc_methods())
rt_new_methods = set(rt_new.connection._get_rpc_methods())
rt_old_methods = set(rt_old.connection._get_rpc_methods())
print("New Methods:")
pprint(rt_new_methods - rt_old_methods)
print("Methods not in new rTorrent:")
@ -418,7 +347,7 @@ def __check_supported_methods(rt):
rtorrent.torrent.methods +
rtorrent.tracker.methods +
rtorrent.peer.methods])
all_methods = set(rt._get_rpc_methods())
all_methods = set(rt.connection._get_rpc_methods())
print("Methods NOT in supported methods")
pprint(all_methods - supported_methods)

View File

@ -17,7 +17,8 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urlparse
import os
from rtorrent.compat import is_py3
@ -84,3 +85,67 @@ def safe_repr(fmt, *args, **kwargs):
return out.encode("utf-8")
else:
return fmt.format(*args, **kwargs)
def split_path(path):
fragments = path.split('/')
if len(fragments) == 1:
return fragments
if not fragments[-1]:
return fragments[:-1]
return fragments
def join_path(base, path):
# Return if we have a new absolute path
if os.path.isabs(path):
return path
# non-absolute base encountered
if base and not os.path.isabs(base):
raise NotImplementedError()
return '/'.join(split_path(base) + split_path(path))
def join_uri(base, uri, construct=True):
p_uri = urlparse.urlparse(uri)
# Return if there is nothing to join
if not p_uri.path:
return base
scheme, netloc, path, params, query, fragment = urlparse.urlparse(base)
# Switch to 'uri' parts
_, _, _, params, query, fragment = p_uri
path = join_path(path, p_uri.path)
result = urlparse.ParseResult(scheme, netloc, path, params, query, fragment)
if not construct:
return result
# Construct from parts
return urlparse.urlunparse(result)
def update_uri(uri, construct=True, **kwargs):
if isinstance(uri, urlparse.ParseResult):
uri = dict(uri._asdict())
if type(uri) is not dict:
raise ValueError("Unknown URI type")
uri.update(kwargs)
result = urlparse.ParseResult(**uri)
if not construct:
return result
return urlparse.urlunparse(result)

165
lib/rtorrent/connection.py Normal file
View File

@ -0,0 +1,165 @@
import logging
import urllib
from rtorrent.common import convert_version_tuple_to_str, join_uri, update_uri
from rtorrent.lib.xmlrpc.clients.http import HTTPServerProxy
from rtorrent.lib.xmlrpc.clients.scgi import SCGIServerProxy
from rtorrent.lib.xmlrpc.transports.basic_auth import BasicAuthTransport
try:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
except ImportError:
print 'Unable to disable warnings for non-https authentication.'
# Try import requests transport (optional)
try:
from rtorrent.lib.xmlrpc.transports.requests_ import RequestsTransport
except ImportError:
RequestsTransport = None
MIN_RTORRENT_VERSION = (0, 8, 1)
MIN_RTORRENT_VERSION_STR = convert_version_tuple_to_str(MIN_RTORRENT_VERSION)
log = logging.getLogger(__name__)
class Connection(object):
def __init__(self, uri, auth=None, verify_ssl=True, sp=None, sp_kwargs=None):
self.auth = auth
self.verify_ssl = verify_ssl
# Transform + Parse URI
self.uri = self._transform_uri(uri)
self.scheme = urllib.splittype(self.uri)[0]
# Construct RPC Client
self.sp = self._get_sp(self.scheme, sp)
self.sp_kwargs = sp_kwargs or {}
self._client = None
self._client_version_tuple = ()
self._rpc_methods = []
@property
def client(self):
if self._client is None:
# Construct new client
self._client = self.connect()
# Return client
return self._client
def connect(self):
log.debug('Connecting to server: %r', self.uri)
if self.auth:
# Construct server proxy with authentication transport
return self.sp(self.uri, transport=self._construct_transport(), **self.sp_kwargs)
# Construct plain server proxy
return self.sp(self.uri, **self.sp_kwargs)
def test(self):
try:
self.verify()
except:
return False
return True
def verify(self):
# check for rpc methods that should be available
assert "system.client_version" in self._get_rpc_methods(), "Required RPC method not available."
assert "system.library_version" in self._get_rpc_methods(), "Required RPC method not available."
# minimum rTorrent version check
assert self._meets_version_requirement() is True,\
"Error: Minimum rTorrent version required is {0}".format(MIN_RTORRENT_VERSION_STR)
#
# Private methods
#
def _construct_transport(self):
# Ensure "auth" parameter is valid
if type(self.auth) is not tuple or len(self.auth) != 3:
raise ValueError('Invalid "auth" parameter format')
# Construct transport with authentication details
method, _, _ = self.auth
secure = self.scheme == 'https'
log.debug('Constructing transport for scheme: %r, authentication method: %r', self.scheme, method)
# Use requests transport (if available)
if RequestsTransport and method in ['basic', 'digest']:
return RequestsTransport(
secure, self.auth,
verify_ssl=self.verify_ssl
)
# Use basic authentication transport
if method == 'basic':
return BasicAuthTransport(secure, self.auth)
# Unsupported authentication method
if method == 'digest':
raise Exception('Digest authentication requires the "requests" library')
raise NotImplementedError('Unknown authentication method: %r' % method)
def _get_client_version_tuple(self):
if not self._client_version_tuple:
if not hasattr(self, "client_version"):
setattr(self, "client_version", self.client.system.client_version())
rtver = getattr(self, "client_version")
self._client_version_tuple = tuple([int(i) for i in rtver.split(".")])
return self._client_version_tuple
def _get_rpc_methods(self):
""" Get list of raw RPC commands
@return: raw RPC commands
@rtype: list
"""
return(self._rpc_methods or self._update_rpc_methods())
@staticmethod
def _get_sp(scheme, sp):
if sp:
return sp
if scheme in ['http', 'https']:
return HTTPServerProxy
if scheme == 'scgi':
return SCGIServerProxy
raise NotImplementedError()
def _meets_version_requirement(self):
return self._get_client_version_tuple() >= MIN_RTORRENT_VERSION
@staticmethod
def _transform_uri(uri):
scheme = urllib.splittype(uri)[0]
if scheme == 'httprpc' or scheme.startswith('httprpc+'):
# Try find HTTPRPC transport (token after '+' in 'httprpc+https'), otherwise assume HTTP
transport = scheme[scheme.index('+') + 1:] if '+' in scheme else 'http'
# Transform URI with new path and scheme
uri = join_uri(uri, 'plugins/httprpc/action.php', construct=False)
return update_uri(uri, scheme=transport)
return uri
def _update_rpc_methods(self):
self._rpc_methods = self.client.system.listMethods()
return self._rpc_methods

View File

@ -0,0 +1,152 @@
#!/usr/bin/python
# rtorrent_xmlrpc
# (c) 2011 Roger Que <alerante@bellsouth.net>
#
# Modified portions:
# (c) 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Python module for interacting with rtorrent's XML-RPC interface
# directly over SCGI, instead of through an HTTP server intermediary.
# Inspired by Glenn Washburn's xmlrpc2scgi.py [1], but subclasses the
# built-in xmlrpclib classes so that it is compatible with features
# such as MultiCall objects.
#
# [1] <http://libtorrent.rakshasa.no/wiki/UtilsXmlrpc2scgi>
#
# Usage: server = SCGIServerProxy('scgi://localhost:7000/')
# server = SCGIServerProxy('scgi:///path/to/scgi.sock')
# print server.system.listMethods()
# mc = xmlrpclib.MultiCall(server)
# mc.get_up_rate()
# mc.get_down_rate()
# print mc()
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
#
# You must obey the GNU General Public License in all respects for
# all of the code used other than OpenSSL. If you modify file(s)
# with this exception, you may extend this exception to your version
# of the file(s), but you are not obligated to do so. If you do not
# wish to do so, delete this exception statement from your version.
# If you delete this exception statement from all source files in the
# program, then also delete it here.
#
#
#
# Portions based on Python's xmlrpclib:
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
import urllib
import xmlrpclib
from rtorrent.lib.xmlrpc.transports.scgi import SCGITransport
class SCGIServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=False,
allow_none=False, use_datetime=False):
type, uri = urllib.splittype(uri)
if type not in ('scgi'):
raise IOError('unsupported XML-RPC protocol')
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = '/'
if transport is None:
transport = SCGITransport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, methodname, params):
# call a method on the remote server
request = xmlrpclib.dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<SCGIServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))

View File

@ -20,24 +20,46 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from base64 import encodestring
import string
from base64 import b64encode
import httplib
import xmlrpclib
class BasicAuthTransport(xmlrpclib.Transport):
def __init__(self, username=None, password=None):
def __init__(self, secure=False, username=None, password=None):
xmlrpclib.Transport.__init__(self)
self.secure = secure
self.username = username
self.password = password
def send_auth(self, h):
if self.username is not None and self.password is not None:
h.putheader('AUTHORIZATION', "Basic %s" % string.replace(
encodestring("%s:%s" % (self.username, self.password)),
"\012", ""
))
if not self.username or not self.password:
return
auth = b64encode("%s:%s" % (self.username, self.password))
h.putheader('Authorization', "Basic %s" % auth)
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
if self.secure:
try:
self._connection = host, httplib.HTTPSConnection(chost, None, **(x509 or {}))
except AttributeError:
raise NotImplementedError(
"your version of httplib doesn't support HTTPS"
)
else:
self._connection = host, httplib.HTTPConnection(chost)
return self._connection[1]
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request

View File

@ -0,0 +1,91 @@
import requests as requests
import requests.auth as requestsauth
import xmlrpclib
class RequestsTransport(xmlrpclib.Transport):
def __init__(self, secure, auth=None, proxies=None, verify_ssl=True):
xmlrpclib.Transport.__init__(self)
self.secure = secure
# Construct session
self.session = requests.Session()
self.session.auth = self.parse_auth(auth)
self.session.proxies = proxies or {}
self.session.verify = False #verify_ssl
@property
def scheme(self):
if self.secure:
return 'https'
return 'http'
def build_url(self, host, handler):
return '%s://%s' % (self.scheme, host + handler)
def request(self, host, handler, request_body, verbose=0):
# Retry request once if cached connection has gone cold
for i in (0, 1):
try:
return self.single_request(host, handler, request_body, verbose)
except requests.ConnectionError:
if i:
raise
except requests.Timeout:
if i:
raise
def single_request(self, host, handler, request_body, verbose=0):
url = self.build_url(host, handler)
# Send request
response = self.session.post(
url,
data=request_body,
headers={
'Content-Type': 'text/xml'
},
stream=True
)
if response.status_code == 200:
return self.parse_response(response)
# Invalid response returned
raise xmlrpclib.ProtocolError(
host + handler,
response.status_code, response.reason,
response.headers
)
def parse_auth(self, auth):
# Parse "auth" parameter
if type(auth) is not tuple or len(auth) != 3:
return None
method, username, password = auth
# Basic Authentication
if method == 'basic':
print 'basic authentication method being used.'
return requestsauth.HTTPBasicAuth(username, password)
# Digest Authentication
if method == 'digest':
print 'digest authentication method being used.'
return requestsauth.HTTPDigestAuth(username, password)
raise NotImplementedError('Unsupported authentication method: %r' % method)
def parse_response(self, response):
p, u = self.getparser()
# Write chunks to parser
for chunk in response.iter_content(1024):
p.feed(chunk)
# Close parser
p.close()
# Close unmarshaller
return u.close()

View File

@ -28,12 +28,12 @@
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@ -81,12 +81,13 @@
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
import errno
import httplib
import re
import socket
import urllib
import xmlrpclib
import errno
class SCGITransport(xmlrpclib.Transport):
@ -152,68 +153,3 @@ class SCGITransport(xmlrpclib.Transport):
p.close()
return u.close()
class SCGIServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=False,
allow_none=False, use_datetime=False):
type, uri = urllib.splittype(uri)
if type not in ('scgi'):
raise IOError('unsupported XML-RPC protocol')
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = '/'
if transport is None:
transport = SCGITransport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, methodname, params):
# call a method on the remote server
request = xmlrpclib.dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<SCGIServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))

View File

@ -45,7 +45,7 @@ def get_varname(rpc_call):
def _handle_unavailable_rpc_method(method, rt_obj):
msg = "Method isn't available."
if rt_obj._get_client_version_tuple() < method.min_version:
if rt_obj.connection._get_client_version_tuple() < method.min_version:
msg = "This method is only available in " \
"RTorrent version v{0} or later".format(
convert_version_tuple_to_str(method.min_version))
@ -108,8 +108,8 @@ class Method:
return(False)
def is_available(self, rt_obj):
if rt_obj._get_client_version_tuple() < self.min_version or \
self.rpc_call not in rt_obj._get_rpc_methods():
if rt_obj.connection._get_client_version_tuple() < self.min_version or \
self.rpc_call not in rt_obj.connection._get_rpc_methods():
return(False)
else:
return(True)

View File

@ -353,7 +353,7 @@ class PostProcessor(object):
#check for Paused status /
#check for Ended status and 100% completion of issues.
if wv['Status'] == 'Paused' or (wv['Have'] == wv['Total'] and not any(['Present' in wv['ComicPublished'], helpers.now()[:4] in wv['ComicPublished']])):
logger.warn(wv['ComicName'] + ' is either Paused or in an Ended status with 100% completion.')
logger.warn(wv['ComicName'] + ' [' + wv['ComicYear'] + '] is either Paused or in an Ended status with 100% completion. Ignoring for match.')
continue
wv_comicname = wv['ComicName']
wv_comicpublisher = wv['ComicPublisher']
@ -365,7 +365,7 @@ class PostProcessor(object):
wv_publisher = wv['ComicPublisher']
wv_total = wv['Total']
if mylar.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('Checking ' + wv['ComicName'] + ' [' + str(wv['ComicYear']) + '] -- ' + str(wv['ComicID']))
logger.fdebug('Queuing to Check: ' + wv['ComicName'] + ' [' + str(wv['ComicYear']) + '] -- ' + str(wv['ComicID']))
#force it to use the Publication Date of the latest issue instead of the Latest Date (which could be anything)
latestdate = myDB.select('SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC', [wv['ComicID']])
@ -528,7 +528,7 @@ class PostProcessor(object):
logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.')
datematch = "False"
if datematch == "False" and any([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None', watch_issueyear is not None]):
if datematch == "False" and all([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None', watch_issueyear is not None]):
#now we see if the issue year matches exactly to what we have within Mylar.
if int(watch_issueyear) == int(watchmatch['issue_year']):
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Issue Year of ' + str(watch_issueyear) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year']))
@ -599,6 +599,7 @@ class PostProcessor(object):
"ComicName": av['ComicName'],
"DynamicComicName": av['DynamicComicName'],
"ComicPublisher": av['IssuePublisher'],
"Publisher": av['Publisher'],
"IssueID": av['IssueID'],
"IssueNumber": av['IssueNumber'],
"IssueYear": av['IssueYear'], #for some reason this is empty
@ -722,12 +723,19 @@ class PostProcessor(object):
else:
clocation = os.path.join(arcmatch['comiclocation'], helpers.conversion(arcmatch['comicfilename']))
logger.info('[' + k + ' #' + issuechk['IssueNumber'] + '] MATCH: ' + clocation + ' / ' + str(issuechk['IssueID']) + ' / ' + str(v[i]['ArcValues']['IssueID']))
if v[i]['ArcValues']['Publisher'] is None:
arcpublisher = v[i]['ArcValues']['ComicPublisher']
else:
arcpublisher = v[i]['ArcValues']['Publisher']
manual_arclist.append({"ComicLocation": clocation,
"ComicID": v[i]['WatchValues']['ComicID'],
"IssueID": v[i]['ArcValues']['IssueID'],
"IssueNumber": v[i]['ArcValues']['IssueNumber'],
"StoryArc": v[i]['ArcValues']['StoryArc'],
"StoryArcID": v[i]['ArcValues']['StoryArcID'],
"IssueArcID": v[i]['ArcValues']['IssueArcID'],
"Publisher": arcpublisher,
"ReadingOrder": v[i]['ArcValues']['ReadingOrder'],
"ComicName": k})
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + k + '-' + v[i]['WatchValues']['ComicID'] + '] Match verified for ' + arcmatch['comicfilename'])
@ -746,18 +754,8 @@ class PostProcessor(object):
issueid = ml['IssueID']
ofilename = ml['ComicLocation']
logger.info('[STORY-ARC POST-PROCESSING] Enabled for ' + ml['StoryArc'])
arcdir = helpers.filesafe(ml['StoryArc'])
if mylar.REPLACE_SPACES:
arcdir = arcdir.replace(' ', mylar.REPLACE_CHAR)
if mylar.STORYARCDIR:
storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", arcdir)
logger.fdebug(module + ' Story Arc Directory set to : ' + storyarcd)
grdst = storyarcd
else:
logger.fdebug(module + ' Story Arc Directory not configured. Using grabbag directory : ' + mylar.GRABBAG_DIR)
storyarcd = mylar.GRABBAG_DIR
grdst = storyarcd
grdst = helpers.arcformat(ml['StoryArc'], helpers.spantheyears(ml['StoryArcID']), ml['Publisher'])
#tag the meta.
metaresponse = None
@ -826,13 +824,15 @@ class PostProcessor(object):
grab_src = os.path.join(src_location, ofilename)
logger.fdebug(module + ' Source Path : ' + grab_src)
logger.info(module + '[' + mylar.FILE_OPTS + '] ' + str(ofilename) + ' into directory : ' + str(grab_dst))
logger.info(module + '[ONE-OFF MODE][' + mylar.ARC_FILEOPS.upper() + '] ' + str(ofilename) + ' into directory : ' + str(grab_dst))
#this is also for issues that are part of a story arc, and don't belong to a watchlist series (ie. one-off's)
try:
fileoperation = helpers.file_ops(grab_src, grab_dst)
fileoperation = helpers.file_ops(grab_src, grab_dst, one_off=True)
if not fileoperation:
raise OSError
except (OSError, IOError):
logger.fdebug(module + ' Failed to ' + mylar.FILE_OPTS + ' ' + src + ' - check directories and manually re-run.')
logger.fdebug(module + '[ONE-OFF MODE][' + mylar.ARC_FILEOPS.upper() + '] Failure ' + src + ' - check directories and manually re-run.')
return
#tidyup old path
@ -1738,6 +1738,7 @@ class PostProcessor(object):
#src = os.path.join(self.nzb_folder, str(nfilename + ext))
src = os.path.join(odir, ofilename)
try:
self._log("[" + mylar.FILE_OPTS + "] " + src + " - to - " + dst)
fileoperation = helpers.file_ops(src, dst)
if not fileoperation:
raise OSError
@ -1824,23 +1825,21 @@ class PostProcessor(object):
try:
if ml['IssueArcID']:
logger.info('Watchlist Story Arc match detected.')
logger.info(ml)
arcinfo = myDB.selectone('SELECT * FROM readinglist where IssueArcID=?', [ml['IssueArcID']]).fetchone()
if arcinfo is None:
logger.warn('Unable to locate IssueID within givin Story Arc. Ensure everything is up-to-date (refreshed) for the Arc.')
else:
arcdir = helpers.filesafe(arcinfo['StoryArc'])
if mylar.REPLACE_SPACES:
arcdir = arcdir.replace(' ', mylar.REPLACE_CHAR)
if mylar.STORYARCDIR:
storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", arcdir)
logger.fdebug(module + ' Story Arc Directory set to : ' + storyarcd)
grdst = storyarcd
logger.info('here')
if arcinfo['Publisher'] is None:
arcpub = arcinfo['IssuePublisher']
else:
logger.fdebug(module + ' Story Arc Directory not configured. Setting to grabbag directory: ' + mylar.GRABBAG_DIR)
storyarcd = mylar.GRABBAG_DIR
grdst = mylar.GRABBAG_DIR
arcpub = arcinfo['Publisher']
grdst = helpers.arcformat(arcinfo['StoryArc'], helpers.spantheyears(arcinfo['StoryArcID']), arcpub)
logger.info('grdst:' + grdst)
logger.info('there')
checkdirectory = filechecker.validateAndCreateDirectory(grdst, True, module=module)
if not checkdirectory:
logger.warn(module + ' Error trying to validate/create directory. Aborting this process at this time.')
@ -1848,9 +1847,7 @@ class PostProcessor(object):
"mode": 'stop'})
return self.queue.put(self.valreturn)
if mylar.READ2FILENAME:
logger.fdebug(module + ' readingorder#: ' + str(arcinfo['ReadingOrder']))
if int(arcinfo['ReadingOrder']) < 10: readord = "00" + str(arcinfo['ReadingOrder'])
elif int(arcinfo['ReadingOrder']) >= 10 and int(arcinfo['ReadingOrder']) <= 99: readord = "0" + str(arcinfo['ReadingOrder'])
@ -1864,7 +1861,7 @@ class PostProcessor(object):
logger.fdebug(module + ' Destination Path : ' + grab_dst)
grab_src = dst
logger.fdebug(module + ' Source Path : ' + grab_src)
logger.info(module + ' Copying ' + str(dst) + ' into directory : ' + str(grab_dst))
logger.info(module + '[' + mylar.ARC_FILEOPS.upper() + '] ' + str(dst) + ' into directory : ' + str(grab_dst))
try:
#need to ensure that src is pointing to the series in order to do a soft/hard-link properly
@ -1873,7 +1870,7 @@ class PostProcessor(object):
raise OSError
#shutil.copy(grab_src, grab_dst)
except (OSError, IOError):
logger.fdebug(module + ' Failed to ' + mylar.FILE_OPTS + ' ' + src + ' - check directories and manually re-run.')
logger.fdebug(module + '[' + mylar.ARC_FILEOPS.upper() + '] Failure ' + src + ' - check directories and manually re-run.')
return
#delete entry from nzblog table in case it was forced via the Story Arc Page

View File

@ -116,7 +116,7 @@ DONATEBUTTON = True
PULLNEW = None
ALT_PULL = 0
PULLBYFILE = None
PULLBYFILE = False
LOCAL_IP = None
EXT_IP = None
@ -322,6 +322,8 @@ TAB_PASS = None
TAB_DIRECTORY = None
STORYARCDIR = 0
COPY2ARCDIR = 0
ARC_FOLDERFORMAT = None
ARC_FILEOPS = 'copy'
CVURL = None
CURRENT_WEEKNUMBER = None
@ -329,6 +331,7 @@ CURRENT_YEAR = None
PULL_REFRESH = None
WEEKFOLDER = 0
WEEKFOLDER_LOC = None
WEEKFOLDER_FORMAT = 0
LOCMOVE = 0
NEWCOM_DIR = None
FFTONEWCOM_DIR = 0
@ -397,6 +400,11 @@ SNATCHEDTORRENT_NOTIFY = 0
USE_RTORRENT = False
RTORRENT_HOST = None
RTORRENT_AUTHENTICATION = 'basic'
RTORRENT_RPC_URL = None
RTORRENT_SSL = 0
RTORRENT_VERIFY = 0
RTORRENT_CA_BUNDLE = None
RTORRENT_USERNAME = None
RTORRENT_PASSWORD = None
RTORRENT_STARTONLOAD = 0
@ -472,8 +480,8 @@ def initialize():
DOWNLOAD_SCAN_INTERVAL, FOLDER_SCAN_LOG_VERBOSE, IMPORTLOCK, NZB_DOWNLOADER, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_TO_MYLAR, SAB_DIRECTORY, USE_BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_DIRECTORY, NZBSU, NZBSU_UID, NZBSU_APIKEY, NZBSU_VERIFY, DOGNZB, DOGNZB_APIKEY, DOGNZB_VERIFY, \
NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_VERIFY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
ENABLE_TORZNAB, TORZNAB_NAME, TORZNAB_HOST, TORZNAB_APIKEY, TORZNAB_CATEGORY, TORZNAB_VERIFY, \
EXPERIMENTAL, ALTEXPERIMENTAL, USE_RTORRENT, RTORRENT_HOST, RTORRENT_USERNAME, RTORRENT_PASSWORD, RTORRENT_STARTONLOAD, RTORRENT_LABEL, RTORRENT_DIRECTORY, \
ENABLE_TORZNAB, TORZNAB_NAME, TORZNAB_HOST, TORZNAB_APIKEY, TORZNAB_CATEGORY, TORZNAB_VERIFY, EXPERIMENTAL, ALTEXPERIMENTAL, \
USE_RTORRENT, RTORRENT_HOST, RTORRENT_AUTHENTICATION, RTORRENT_RPC_URL, RTORRENT_SSL, RTORRENT_VERIFY, RTORRENT_CA_BUNDLE, RTORRENT_USERNAME, RTORRENT_PASSWORD, RTORRENT_STARTONLOAD, RTORRENT_LABEL, RTORRENT_DIRECTORY, \
USE_UTORRENT, UTORRENT_HOST, UTORRENT_USERNAME, UTORRENT_PASSWORD, UTORRENT_LABEL, USE_TRANSMISSION, TRANSMISSION_HOST, TRANSMISSION_USERNAME, TRANSMISSION_PASSWORD, TRANSMISSION_DIRECTORY, \
ENABLE_META, CMTAGGER_PATH, CBR2CBZ_ONLY, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, CT_SETTINGSPATH, CMTAG_START_YEAR_AS_VOLUME, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, TMP_PROV, \
dbUpdateScheduler, searchScheduler, RSSScheduler, WeeklyScheduler, VersionScheduler, FolderMonitorScheduler, \
@ -481,9 +489,12 @@ def initialize():
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_TPSE, TPSE_PROXY, TPSE_VERIFY, ENABLE_32P, SEARCH_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, \
FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, \
FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, \
STORYARCDIR, COPY2ARCDIR, ARC_FOLDERFORMAT, ARC_FILEOPS, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, PULLBYFILE, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, \
SYNO_FIX, ENFORCE_PERMS, CHMOD_FILE, CHMOD_DIR, CHOWNER, CHGROUP, ANNUALS_ON, CV_ONLY, CV_ONETIMER, CURRENT_WEEKNUMBER, CURRENT_YEAR, PULL_REFRESH, WEEKFOLDER, WEEKFOLDER_LOC, UMASK, \
SYNO_FIX, ENFORCE_PERMS, CHMOD_FILE, CHMOD_DIR, CHOWNER, CHGROUP, ANNUALS_ON, CV_ONLY, CV_ONETIMER, CURRENT_WEEKNUMBER, CURRENT_YEAR, PULL_REFRESH, WEEKFOLDER, WEEKFOLDER_LOC, WEEKFOLDER_FORMAT, UMASK, \
TELEGRAM_ENABLED, TELEGRAM_TOKEN, TELEGRAM_USERID
if __INITIALIZED__:
@ -609,6 +620,7 @@ def initialize():
GRABBAG_DIR = os.path.join(DESTINATION_DIR, 'GrabBag')
WEEKFOLDER = bool(check_setting_int(CFG, 'General', 'weekfolder', 0))
WEEKFOLDER_LOC = check_setting_str(CFG, 'General', 'weekfolder_loc', '')
WEEKFOLDER_FORMAT = bool(check_setting_int(CFG, 'General', 'weekfolder_format', 0))
LOCMOVE = bool(check_setting_int(CFG, 'General', 'locmove', 0))
if LOCMOVE is None:
LOCMOVE = 0
@ -627,6 +639,10 @@ def initialize():
TAB_DIRECTORY = check_setting_str(CFG, 'General', 'tab_directory', '')
STORYARCDIR = bool(check_setting_int(CFG, 'General', 'storyarcdir', 0))
COPY2ARCDIR = bool(check_setting_int(CFG, 'General', 'copy2arcdir', 0))
ARC_FOLDERFORMAT = check_setting_str(CFG, 'General', 'arc_folderformat', '$arc ($spanyears)')
if any([ARC_FOLDERFORMAT is None, ARC_FOLDERFORMAT == 'None']):
ARC_FOLDERFORMAT = '$arc ($spanyears)'
ARC_FILEOPS = check_setting_str(CFG,'General', 'arc_fileops', 'copy')
PROWL_ENABLED = bool(check_setting_int(CFG, 'Prowl', 'prowl_enabled', 0))
PROWL_KEYS = check_setting_str(CFG, 'Prowl', 'prowl_keys', '')
PROWL_ONSNATCH = bool(check_setting_int(CFG, 'Prowl', 'prowl_onsnatch', 0))
@ -756,6 +772,11 @@ def initialize():
SNATCHEDTORRENT_NOTIFY = bool(check_setting_int(CFG, 'Torrents', 'snatchedtorrent_notify', 0))
RTORRENT_HOST = check_setting_str(CFG, 'Torrents', 'rtorrent_host', '')
RTORRENT_AUTHENTICATION = check_setting_str(CFG, 'Torrents', 'rtorrent_authentication', 'basic')
RTORRENT_RPC_URL = check_setting_str(CFG, 'Torrents', 'rtorrent_rpc_url', '')
RTORRENT_SSL = bool(check_setting_int(CFG, 'Torrents', 'rtorrent_ssl', 0))
RTORRENT_VERIFY = bool(check_setting_int(CFG, 'Torrents', 'rtorrent_verify', 0))
RTORRENT_CA_BUNDLE = check_setting_str(CFG, 'Torrents', 'rtorrent_ca_bundle', '')
RTORRENT_USERNAME = check_setting_str(CFG, 'Torrents', 'rtorrent_username', '')
RTORRENT_PASSWORD = check_setting_str(CFG, 'Torrents', 'rtorrent_password', '')
RTORRENT_STARTONLOAD = bool(check_setting_int(CFG, 'Torrents', 'rtorrent_startonload', 0))
@ -877,7 +898,7 @@ def initialize():
NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0))
if CONFIG_VERSION:
NEWZNAB_HOST = check_setting_str(CFG, 'Newznab', 'newznab_host', '')
NEWZNAB_HOST = helpers.clean_url(check_setting_str(CFG, 'Newznab', 'newznab_host', ''))
NEWZNAB_APIKEY = check_setting_str(CFG, 'Newznab', 'newznab_apikey', '')
NEWZNAB_UID = 1
NEWZNAB_ENABLED = bool(check_setting_int(CFG, 'Newznab', 'newznab_enabled', 1))
@ -928,7 +949,7 @@ def initialize():
#to counteract the loss of the 1st newznab entry because of a switch, let's rewrite to the tuple
if NEWZNAB_HOST and CONFIG_VERSION:
EXTRA_NEWZNABS.append((NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_VERIFY, NEWZNAB_APIKEY, NEWZNAB_UID, int(NEWZNAB_ENABLED)))
EXTRA_NEWZNABS.append((NEWZNAB_NAME, helpers.clean_url(NEWZNAB_HOST), NEWZNAB_VERIFY, NEWZNAB_APIKEY, NEWZNAB_UID, int(NEWZNAB_ENABLED)))
#PR_NUM +=1
# Need to rewrite config here and bump up config version
CONFIG_VERSION = '6'
@ -1427,6 +1448,8 @@ def config_write():
new_config['General']['tab_directory'] = TAB_DIRECTORY
new_config['General']['storyarcdir'] = int(STORYARCDIR)
new_config['General']['copy2arcdir'] = int(COPY2ARCDIR)
new_config['General']['arc_folderformat'] = ARC_FOLDERFORMAT
new_config['General']['arc_fileops'] = ARC_FILEOPS
new_config['General']['use_minsize'] = int(USE_MINSIZE)
new_config['General']['minsize'] = MINSIZE
new_config['General']['use_maxsize'] = int(USE_MAXSIZE)
@ -1443,6 +1466,7 @@ def config_write():
new_config['General']['file_opts'] = FILE_OPTS
new_config['General']['weekfolder'] = int(WEEKFOLDER)
new_config['General']['weekfolder_loc'] = WEEKFOLDER_LOC
new_config['General']['weekfolder_format'] = int(WEEKFOLDER_FORMAT)
new_config['General']['locmove'] = int(LOCMOVE)
new_config['General']['newcom_dir'] = NEWCOM_DIR
new_config['General']['fftonewcom_dir'] = int(FFTONEWCOM_DIR)
@ -1504,6 +1528,11 @@ def config_write():
new_config['Torrents']['verify_32p'] = int(VERIFY_32P)
new_config['Torrents']['snatchedtorrent_notify'] = int(SNATCHEDTORRENT_NOTIFY)
new_config['Torrents']['rtorrent_host'] = RTORRENT_HOST
new_config['Torrents']['rtorrent_authentication'] = RTORRENT_AUTHENTICATION
new_config['Torrents']['rtorrent_rpc_url'] = RTORRENT_RPC_URL
new_config['Torrents']['rtorrent_ssl'] = int(RTORRENT_SSL)
new_config['Torrents']['rtorrent_verify'] = int(RTORRENT_VERIFY)
new_config['Torrents']['rtorrent_ca_bundle'] = RTORRENT_CA_BUNDLE
new_config['Torrents']['rtorrent_username'] = RTORRENT_USERNAME
new_config['Torrents']['rtorrent_password'] = RTORRENT_PASSWORD
new_config['Torrents']['rtorrent_startonload'] = int(RTORRENT_STARTONLOAD)
@ -1697,12 +1726,13 @@ def dbcheck():
# c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT, Volume TEXT, IssueNumber TEXT, DynamicName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT, StatusChange TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readinglist(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, StoreDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readinglist(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, StoreDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS searchresults (SRID TEXT, results Numeric, Series TEXT, publisher TEXT, haveit TEXT, name TEXT, deck TEXT, url TEXT, description TEXT, comicid TEXT, comicimage TEXT, issues TEXT, comicyear TEXT, ogcname TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS ref32p (ComicID TEXT UNIQUE, ID TEXT, Series TEXT)')
conn.commit
c.close
#new
@ -2116,6 +2146,11 @@ def dbcheck():
c.execute('ALTER TABLE readinglist ADD COLUMN DynamicComicName TEXT')
dynamic_upgrade = True
try:
c.execute('SELECT Volume from readinglist')
except sqlite3.OperationalError:
c.execute('ALTER TABLE readinglist ADD COLUMN Volume TEXT')
## -- searchresults Table --
try:
c.execute('SELECT SRID from searchresults')
@ -2180,6 +2215,7 @@ def dbcheck():
#let's delete errant comics that are stranded (ie. Comicname = Comic ID: )
c.execute("DELETE from comics WHERE ComicName='None' OR ComicName LIKE 'Comic ID%' OR ComicName is NULL")
c.execute("DELETE from issues WHERE ComicName='None' OR ComicName LIKE 'Comic ID%' OR ComicName is NULL")
c.execute("DELETE from issues WHERE ComicID is NULL")
c.execute("DELETE from annuals WHERE ComicName='None' OR ComicName is NULL or Issue_Number is NULL")
c.execute("DELETE from upcoming WHERE ComicName='None' OR ComicName is NULL or IssueNumber is NULL")
c.execute("DELETE from importresults WHERE ComicName='None' OR ComicName is NULL")

View File

@ -10,7 +10,7 @@ from cookielib import LWPCookieJar
from operator import itemgetter
import mylar
from mylar import logger, filechecker
from mylar import logger, filechecker, helpers
class info32p(object):
@ -152,6 +152,10 @@ class info32p(object):
def searchit(self):
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
series_search = self.searchterm['series']
comic_id = self.searchterm['id']
if comic_id:
chk_id = helpers.checkthe_id(comic_id)
annualize = False
if 'Annual' in series_search:
series_search = re.sub(' Annual', '', series_search).strip()
@ -162,140 +166,167 @@ class info32p(object):
spl = [x for x in self.publisher_list if x in publisher_search]
for x in spl:
publisher_search = re.sub(x, '', publisher_search).strip()
logger.info('publisher search set to : ' + publisher_search)
#generate the dynamic name of the series here so we can match it up
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(series_search)
mod_series = as_dinfo['mod_seriesname']
as_puinfo = as_d.dynamic_replace(publisher_search)
pub_series = as_puinfo['mod_seriesname']
chk_id = None
# lookup the ComicID in the 32p sqlite3 table to pull the series_id to use.
if comic_id:
chk_id = helpers.checkthe_id(comic_id)
if not chk_id:
#generate the dynamic name of the series here so we can match it up
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(series_search)
mod_series = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
as_puinfo = as_d.dynamic_replace(publisher_search)
pub_series = as_puinfo['mod_seriesname']
logger.info('series_search: ' + series_search)
logger.info('series_search: ' + series_search)
if '/' in series_search:
series_search = series_search[:series_search.find('/')]
if ':' in series_search:
series_search = series_search[:series_search.find(':')]
if ',' in series_search:
series_search = series_search[:series_search.find(',')]
if '/' in series_search:
series_search = series_search[:series_search.find('/')]
if ':' in series_search:
series_search = series_search[:series_search.find(':')]
if ',' in series_search:
series_search = series_search[:series_search.find(',')]
if not mylar.SEARCH_32P:
url = 'https://walksoftly.itsaninja.party/serieslist.php'
params = {'series': series_search}
try:
t = requests.get(url, params=params, verify=True)
except requests.exceptions.RequestException as e:
logger.warn(e)
return "no results"
if not mylar.SEARCH_32P:
url = 'https://walksoftly.itsaninja.party/serieslist.php'
params = {'series': re.sub('\|','', mod_series.lower()).strip()} #series_search}
try:
t = requests.get(url, params=params, verify=True, headers={'USER-AGENT': mylar.USER_AGENT[:mylar.USER_AGENT.find('/')+7] + mylar.USER_AGENT[mylar.USER_AGENT.find('(')+1]})
except requests.exceptions.RequestException as e:
logger.warn(e)
return "no results"
if t.status_code == '619':
logger.warn('[' + str(t.status_code) + '] Unable to retrieve data from site.')
return "no results"
elif t.status_code == '999':
logger.warn('[' + str(t.status_code) + '] No series title was provided to the search query.')
return "no results"
if t.status_code == '619':
logger.warn('[' + str(t.status_code) + '] Unable to retrieve data from site.')
return "no results"
elif t.status_code == '999':
logger.warn('[' + str(t.status_code) + '] No series title was provided to the search query.')
return "no results"
try:
results = t.json()
except:
results = t.text
try:
results = t.json()
except:
results = t.text
if len(results) == 0:
logger.warn('No results found for search on 32P.')
return "no results"
if len(results) == 0:
logger.warn('No results found for search on 32P.')
return "no results"
with requests.Session() as s:
s.headers = self.headers
cj = LWPCookieJar(os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat"))
cj.load()
s.cookies = cj
if mylar.SEARCH_32P:
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = s.get(url, params=params, verify=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
data = []
pdata = []
pubmatch = False
for r in results:
if not chk_id:
if mylar.SEARCH_32P:
torrentid = r['data-id']
torrentname = r.findNext(text=True)
torrentname = torrentname.strip()
else:
torrentid = r['id']
torrentname = r['series']
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = s.get(url, params=params, verify=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(torrentname)
seriesresult = as_dinfo['mod_seriesname']
logger.info('searchresult: ' + seriesresult + ' --- ' + mod_series + '[' + publisher_search + ']')
if seriesresult == mod_series:
logger.info('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
data.append({"id": torrentid,
"series": torrentname})
elif publisher_search in seriesresult:
tmp_torrentname = re.sub(publisher_search, '', seriesresult).strip()
as_t = filechecker.FileChecker()
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
if as_tinfo['mod_seriesname'] == mod_series:
for r in results:
if mylar.SEARCH_32P:
torrentid = r['data-id']
torrentname = r.findNext(text=True)
torrentname = torrentname.strip()
else:
torrentid = r['id']
torrentname = r['series']
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(torrentname)
seriesresult = as_dinfo['mod_seriesname']
logger.info('searchresult: ' + seriesresult + ' --- ' + mod_series + '[' + publisher_search + ']')
if seriesresult == mod_series:
logger.info('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
pdata.append({"id": torrentid,
"series": torrentname})
pubmatch = True
data.append({"id": torrentid,
"series": torrentname})
elif publisher_search in seriesresult:
logger.info('publisher match.')
tmp_torrentname = re.sub(publisher_search, '', seriesresult).strip()
as_t = filechecker.FileChecker()
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
logger.info('tmp_torrentname:' + tmp_torrentname)
logger.info('as_tinfo:' + as_tinfo['mod_seriesname'])
if as_tinfo['mod_seriesname'] == mod_series:
logger.info('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
pdata.append({"id": torrentid,
"series": torrentname})
pubmatch = True
logger.info(str(len(data)) + ' series listed for searching that match.')
else:
logger.info('Exact series ID already discovered previously. Setting to :' + chk_id['series'] + '[' + str(chk_id['id']) + ']')
pdata.append({"id": chk_id['id'],
"series": chk_id['series']})
pubmatch = True
logger.info(str(len(data)) + ' series listed for searching that match.')
if all([len(data) == 0, len(pdata) == 0]):
return "no results"
if len(data) == 1 or len(pdata) == 1:
logger.info(str(len(data)) + ' series match the title being search for')
if len(pdata) == 1:
dataset = pdata[0]['id']
else:
dataset = data[0]['id']
payload = {'action': 'groupsearch',
'id': dataset,
'issue': issue_search}
#in order to match up against 0-day stuff, volume has to be none at this point
#when doing other searches tho, this should be allowed to go through
#if all([volume_search != 'None', volume_search is not None]):
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
logger.info('payload: ' + str(payload))
url = 'https://32pag.es/ajax.php'
time.sleep(1) #just to make sure we don't hammer, 1s pause.
d = s.get(url, params=payload, verify=True)
logger.info(str(len(pdata)) + ' series match the title being search for')
dataset = pdata
searchid = pdata[0]['id']
elif len(data) == 1:
logger.info(str(len(data)) + ' series match the title being search for')
dataset = data
searchid = data[0]['id']
if chk_id is None:
#update the 32p_reference so we avoid doing a url lookup next time
helpers.checkthe_id(comic_id, dataset)
results32p = []
resultlist = {}
try:
searchResults = d.json()
except:
searchResults = d.text
logger.info(searchResults)
if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
for a in searchResults['details']:
results32p.append({'link': a['id'],
'title': self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'],
'filesize': a['size'],
'issues': a['issues'],
'pack': a['pack'],
'format': a['format'],
'language': a['language'],
'seeders': a['seeders'],
'leechers': a['leechers'],
'scanner': a['scanner'],
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%c')})
for x in dataset:
payload = {'action': 'groupsearch',
'id': x['id'], #searchid,
'issue': issue_search}
#in order to match up against 0-day stuff, volume has to be none at this point
#when doing other searches tho, this should be allowed to go through
#if all([volume_search != 'None', volume_search is not None]):
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
logger.info('payload: ' + str(payload))
url = 'https://32pag.es/ajax.php'
time.sleep(1) #just to make sure we don't hammer, 1s pause.
d = s.get(url, params=payload, verify=True)
try:
searchResults = d.json()
except:
searchResults = d.text
logger.info(searchResults)
if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
for a in searchResults['details']:
results32p.append({'link': a['id'],
'title': self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'],
'filesize': a['size'],
'issues': a['issues'],
'pack': a['pack'],
'format': a['format'],
'language': a['language'],
'seeders': a['seeders'],
'leechers': a['leechers'],
'scanner': a['scanner'],
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%c')})
if len(results32p) > 0:
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
else:
resultlist = 'no results'

View File

@ -72,7 +72,7 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
elif type == 'storyarc':
PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date'
elif type == 'comicyears':
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher&offset=' + str(offset)
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck&offset=' + str(offset)
elif type == 'import':
PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset)
elif type == 'update_dates':
@ -159,7 +159,7 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
dom = pulldetails(arc, 'storyarc', None, 1)
return GetComicInfo(issueid, dom)
elif type == 'comicyears':
#used by the story arc searcher when adding a given arc to poll each ComicID in order to populate the Series Year.
#used by the story arc searcher when adding a given arc to poll each ComicID in order to populate the Series Year & volume (hopefully).
#this grabs each issue based on issueid, and then subsets the comicid for each to be used later.
#set the offset to 0, since we're doing a filter.
dom = pulldetails(arcid, 'comicyears', offset=0, comicidlist=comicidlist)
@ -572,10 +572,100 @@ def GetSeriesYears(dom):
logger.warn('There was a problem retrieving the start year for a particular series within the story arc.')
tempseries['SeriesYear'] = '0000'
desdeck = 0
tempseries['Volume'] = 'None'
#the description field actually holds the Volume# - so let's grab it
try:
descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText
comic_desc = drophtml(descchunk)
desdeck +=1
except:
comic_desc = 'None'
#sometimes the deck has volume labels
try:
deckchunk = dm.getElementsByTagName('deck')[0].firstChild.wholeText
comic_deck = deckchunk
desdeck +=1
except:
comic_deck = 'None'
while (desdeck > 0):
if desdeck == 1:
if comic_desc == 'None':
comicDes = comic_deck[:30]
else:
#extract the first 60 characters
comicDes = comic_desc[:60].replace('New 52', '')
elif desdeck == 2:
#extract the characters from the deck
comicDes = comic_deck[:30].replace('New 52', '')
else:
break
i = 0
while (i < 2):
if 'volume' in comicDes.lower():
#found volume - let's grab it.
v_find = comicDes.lower().find('volume')
#arbitrarily grab the next 10 chars (6 for volume + 1 for space + 3 for the actual vol #)
#increased to 10 to allow for text numbering (+5 max)
#sometimes it's volume 5 and ocassionally it's fifth volume.
if i == 0:
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)
else:
vfind = comicDes[:v_find] # if it's fifth volume format
basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('X volume format - ' + str(i) + ': ' + vfind)
volconv = ''
for nums in basenums:
if nums in vfind.lower():
sconv = basenums[nums]
vfind = re.sub(nums, sconv, vfind.lower())
break
#logger.info('volconv: ' + str(volconv))
if i == 0:
volthis = vfind.lower().find('volume')
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
vfind = vfind[volthis:volthis + 4] # grab the next 4 characters ;)
elif i == 1:
volthis = vfind.lower().find('volume')
vfind = vfind[volthis - 4:volthis] # grab the next 4 characters ;)
if '(' in vfind:
#bracket detected in versioning'
vfindit = re.findall('[^()]+', vfind)
vfind = vfindit[0]
vf = re.findall('[^<>]+', vfind)
try:
ledigit = re.sub("[^0-9]", "", vf[0])
if ledigit != '':
tempseries['Volume'] = ledigit
logger.fdebug("Volume information found! Adding to series record : volume " + tempseries['Volume'])
break
except:
pass
i += 1
else:
i += 1
if tempseries['Volume'] == 'None':
logger.fdebug('tempseries[Volume]:' + str(tempseries['Volume']))
desdeck -= 1
else:
break
serieslist.append({"ComicID": tempseries['ComicID'],
"ComicName": tempseries['Series'],
"SeriesYear": tempseries['SeriesYear'],
"Publisher": tempseries['Publisher']})
"Publisher": tempseries['Publisher'],
"Volume": tempseries['Volume']})
return serieslist

View File

@ -91,7 +91,7 @@ class FileChecker(object):
self.failed_files = []
self.dynamic_handlers = ['/','-',':','\'',',','&','?','!','+']
self.dynamic_handlers = ['/','-',':','\'',',','&','?','!','+','(',')']
self.dynamic_replacements = ['and','the']
self.rippers = ['-empire','-empire-hd','minutemen-','-dcp']

View File

@ -1747,6 +1747,32 @@ def listLibrary():
library[row['ReleaseComicId']] = row['ComicID']
return library
def listIssues(weeknumber, year):
import db
library = []
myDB = db.DBConnection()
# Get individual issues
list = myDB.select("SELECT issues.Status, issues.ComicID, issues.IssueID, issues.ComicName, weekly.publisher, issues.Issue_Number from weekly, issues where weekly.IssueID = issues.IssueID and weeknumber = ? and year = ?", [weeknumber, year])
for row in list:
library.append({'ComicID': row['ComicID'],
'Status': row['Status'],
'IssueID': row['IssueID'],
'ComicName': row['ComicName'],
'Publisher': row['publisher'],
'Issue_Number': row['Issue_Number']})
# Add the annuals
if mylar.ANNUALS_ON:
list = myDB.select("SELECT annuals.Status, annuals.ComicID, annuals.ReleaseComicID, annuals.IssueID, annuals.ComicName, weekly.publisher, annuals.Issue_Number from weekly, annuals where weekly.IssueID = annuals.IssueID and weeknumber = ? and year = ?", [weeknumber, year])
for row in list:
library.append({'ComicID': row['ComicID'],
'Status': row['Status'],
'IssueID': row['IssueID'],
'ComicName': row['ComicName'],
'Publisher': row['publisher'],
'Issue_Number': row['Issue_Number']})
return library
def incr_snatched(ComicID):
import db, logger
myDB = db.DBConnection()
@ -2117,20 +2143,157 @@ def conversion(value):
value = value.decode('windows-1252')
return value
def clean_url(url):
leading = len(url) - len(url.lstrip(' '))
ending = len(url) - len(url.rstrip(' '))
if leading >= 1:
url = url[leading:]
if ending >=1:
url = url[:-ending]
return url
def chunker(seq, size):
#returns a list from a large group of tuples by size (ie. for group in chunker(seq, 3))
return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
def file_ops(path,dst,arc=False):
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
""" Return a cleaned up host with given url options set
taken verbatim from CouchPotato
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def checkthe_id(comicid=None, up_vals=None):
import db, logger
myDB = db.DBConnection()
if not up_vals:
chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone()
if chk is None:
return None
else:
return {'id': chk['ID'],
'series': chk['Series']}
else:
ctrlVal = {'ComicID': comicid}
newVal = {'Series': up_vals[0]['series'],
'ID': up_vals[0]['id']}
myDB.upsert("ref32p", newVal, ctrlVal)
def spantheyears(storyarcid):
import db
myDB = db.DBConnection()
totalcnt = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=?", [storyarcid])
lowyear = 9999
maxyear = 0
for la in totalcnt:
if la['IssueDate'] is None:
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = str(lowyear) + ' - ' + str(maxyear) #la['SeriesYear'] + ' - ' + str(maxyear)
return spanyears
def arcformat(arc, spanyears, publisher):
arcdir = filesafe(arc)
if publisher is None:
publisher = 'None'
values = {'$arc': arcdir,
'$spanyears': spanyears,
'$publisher': publisher}
tmp_folderformat = mylar.ARC_FOLDERFORMAT
if publisher == 'None':
chunk_f_f = re.sub('\$publisher', '', tmp_folderformat)
chunk_f = re.compile(r'\s+')
tmp_folderformat = chunk_f.sub(' ', chunk_f_f)
if any([tmp_folderformat == '', tmp_folderformat is None]):
arcpath = arcdir
else:
arcpath = replace_all(tmp_folderformat, values)
if mylar.REPLACE_SPACES:
arcpath = arcpath.replace(' ', mylar.REPLACE_CHAR)
if arcpath.startswith('/'):
arcpath = arcpath[1:]
elif arcpath.startswith('//'):
arcpath = arcpath[2:]
if mylar.STORYARCDIR:
logger.info(mylar.DESTINATION_DIR)
logger.info('StoryArcs')
logger.info(arcpath)
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arcpath)
else:
logger.warn('Story arc directory is not configured. Defaulting to grabbag directory: ' + mylar.GRABBAG_DIR)
dstloc = mylar.GRABBAG_DIR
return dstloc
def file_ops(path,dst,arc=False,one_off=False):
# # path = source path + filename
# # dst = destination path + filename
# # arc = to denote if the file_operation is being performed as part of a story arc or not
# # arc = to denote if the file_operation is being performed as part of a story arc or not where the series exists on the watchlist already
# # one-off = if the file_operation is being performed where it is either going into the grabbab_dst or story arc folder
# #get the crc of the file prior to the operation and then compare after to ensure it's complete.
# crc_check = mylar.filechecker.crc(path)
# #will be either copy / move
if mylar.FILE_OPTS == 'copy' or (arc is True and any([mylar.FILE_OPTS == 'copy', mylar.FILE_OPTS == 'move'])):
if any([one_off, arc]):
action_op = mylar.ARC_FILEOPS
else:
action_op = mylar.FILE_OPTS
if action_op == 'copy' or (arc is True and any([action_op == 'copy', action_op == 'move'])):
try:
shutil.copy( path , dst )
# if crc_check == mylar.filechecker.crc(dst):
@ -2138,7 +2301,7 @@ def file_ops(path,dst,arc=False):
return False
return True
elif mylar.FILE_OPTS == 'move':
elif action_op == 'move':
try:
shutil.move( path , dst )
# if crc_check == mylar.filechecker.crc(dst):
@ -2146,10 +2309,10 @@ def file_ops(path,dst,arc=False):
return False
return True
elif any([mylar.FILE_OPTS == 'hardlink', mylar.FILE_OPTS == 'softlink']):
elif any([action_op == 'hardlink', action_op == 'softlink']):
if 'windows' not in mylar.OS_DETECT.lower():
# if it's an arc, then in needs to go reverse since we want to keep the src files (in the series directory)
if mylar.FILE_OPTS == 'hardlink':
if action_op == 'hardlink':
import sys
# Open a file
@ -2181,7 +2344,7 @@ def file_ops(path,dst,arc=False):
return True
elif mylar.FILE_OPTS == 'softlink':
elif action_op == 'softlink':
try:
#first we need to copy the file to the new location, then create the symlink pointing from new -> original
if not arc:
@ -2189,9 +2352,10 @@ def file_ops(path,dst,arc=False):
if os.path.lexists( path ):
os.remove( path )
os.symlink( dst, path )
logger.fdebug('Successfully created softlink [' + dst + ' --> ' + path + ']')
else:
os.symlink ( path, dst )
logger.fdebug('Successfully created softlink [' + dst + ' --> ' + path + ']')
logger.fdebug('Successfully created softlink [' + path + ' --> ' + dst + ']')
except OSError, e:
#if e.errno == errno.EEXIST:
# os.remove(dst)

View File

@ -53,7 +53,7 @@ def locg(pulldate=None,weeknumber=None,year=None):
url = 'https://walksoftly.itsaninja.party/newcomics.php'
try:
r = requests.get(url, params=params, verify=True)
r = requests.get(url, params=params, verify=True, headers={'User-Agent': mylar.USER_AGENT[:mylar.USER_AGENT.find('/')+7] + mylar.USER_AGENT[mylar.USER_AGENT.find('(')+1]})
except requests.exceptions.RequestException as e:
logger.warn(e)
return {'status': 'failure'}
@ -114,7 +114,6 @@ def locg(pulldate=None,weeknumber=None,year=None):
'COMIC': comicname,
'COMICID': comicid,
'ISSUEID': issueid,
#'DYNAMICNAME': dynamic_name,
'WEEKNUMBER': x['weeknumber'],
'YEAR': x['year']}
myDB.upsert("weekly", newValueDict, controlValueDict)

View File

@ -404,8 +404,8 @@ def storyarcinfo(xmlid):
comicapi = mylar.COMICVINE_API
#respawn to the exact id for the story arc and count the # of issues present.
ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
#logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
#new CV API restriction - one api request / second.
if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
@ -487,6 +487,11 @@ def storyarcinfo(xmlid):
except:
xmldesc = "None"
try:
xmlpub = arcdom.getElementsByTagName('publisher')[0].firstChild.wholeText
except:
xmlpub = "None"
try:
xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
except:
@ -508,7 +513,8 @@ def storyarcinfo(xmlid):
'description': xmldesc,
'deck': xmldeck,
'arclist': arclist,
'haveit': haveit
'haveit': haveit,
'publisher': xmlpub
}
return arcinfo

View File

@ -91,8 +91,11 @@ class NMA:
self._session = requests.Session()
def _send(self, data, module):
r = self._session.post(self.NMA_URL, data=data, verify=True)
try:
r = self._session.post(self.NMA_URL, data=data, verify=True)
except requests.exceptions.RequestException as e:
logger.error(module + '[' + str(e) + '] Unable to send via NMA. Aborting notification for this item.')
return False
logger.fdebug('[NMA] Status code returned: ' + str(r.status_code))
if r.status_code == 200:

View File

@ -305,7 +305,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None):
if any([allow_packs is None, allow_packs == 'None', allow_packs == 0]):
if any([allow_packs is None, allow_packs == 'None', allow_packs == 0]) and all([mylar.ENABLE_TORRENT_SEARCH, mylar.ENABLE_32P]):
allow_packs = False
logger.info('allow_packs set to :' + str(allow_packs))
@ -533,7 +533,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
rss = "no"
if nzbprov == '32P':
if all([mylar.MODE_32P == 1,mylar.ENABLE_32P]):
searchterm = {'series': ComicName, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
#first we find the id on the serieslist of 32P
#then we call the ajax against the id and issue# and volume (if exists)
a = auth32p.info32p(searchterm=searchterm)
@ -629,28 +629,34 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
try:
r = requests.get(findurl, params=payload, verify=verify, headers=headers)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (nzbprov, e))
if 'HTTP Error 503' in e:
except requests.exceptions.Timeout as e:
logger.warn('Timeout occured fetching data from %s: %s' % (nzbprov, e))
break
except requests.exceptions.ConnectionError as e:
logger.warn('Connection error trying to retrieve data from %s: %s' % (nzbprov, e))
break
except requests.exceptions.RequestException as e:
logger.warn('General Error fetching data from %s: %s' % (nzbprov, e))
if e.r.status_code == 503:
#HTTP Error 503
logger.warn('Aborting search due to Provider unavailability')
foundc = "no"
break
data = False
#logger.fdebug('status code: ' + str(r.status_code))
if str(r.status_code) != '200':
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
try:
if str(r.status_code) != '200':
logger.warn('Unable to retrieve search results from ' + tmpprov + ' [Status Code returned: ' + str(r.status_code) + ']')
data = False
else:
data = r.content
except:
data = False
else:
data = r.content
if data:
bb = feedparser.parse(data)
else:
bb = "no results"
#logger.info('Search results:' + str(bb))
try:
if bb['feed']['error']:
logger.error('[ERROR CODE: ' + str(bb['feed']['error']['code']) + '] ' + str(bb['feed']['error']['description']))
@ -665,7 +671,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
done = True
break
except:
#logger.info('no errors on data retrieval...proceeding')
logger.info('no errors on data retrieval...proceeding')
pass
elif nzbprov == 'experimental':
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
@ -1219,7 +1225,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#find the pack range.
pack_issuelist = entry['issues']
issueid_info = helpers.issue_find_ids(ComicName,ComicID, pack_issuelist, IssueNumber)
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
logger.info('issueid_info:' + str(issueid_info))
if issueid_info['valid'] == True:
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.')

View File

@ -36,7 +36,8 @@ class RTorrent(object):
self.client = TorClient.TorrentClient()
if not self.client.connect(mylar.RTORRENT_HOST,
mylar.RTORRENT_USERNAME,
mylar.RTORRENT_PASSWORD):
mylar.RTORRENT_PASSWORD,
mylar.RTORRENT_AUTHENTICATION):
logger.error('could not connect to %s, exiting', mylar.RTORRENT_HOST)
sys.exit(-1)

View File

@ -1,4 +1,5 @@
import os
from urlparse import urlparse
from lib.rtorrent import RTorrent
@ -9,21 +10,53 @@ class TorrentClient(object):
def __init__(self):
self.conn = None
def connect(self, host, username, password):
def getVerifySsl(self):
# Ensure verification has been enabled
if not mylar.RTORRENT_VERIFY:
return False
# Use ca bundle if defined
if mylar.RTORRENT_CA_BUNDLE and os.path.exists(ca_bundle):
return mylar.RTORRENT_CA_BUNDLE
# Use default ssl verification
return True
def connect(self, host, username, password, auth):
if self.conn is not None:
return self.conn
if not host:
return False
url = helpers.cleanHost(host, protocol = True, ssl = mylar.RTORRENT_SSL)
# Automatically add '+https' to 'httprpc' protocol if SSL is enabled
if mylar.RTORRENT_SSL and url.startswith('httprpc://'):
url = url.replace('httprpc://', 'httprpc+https://')
parsed = urlparse(url)
# rpc_url is only used on http/https scgi pass-through
if parsed.scheme in ['http', 'https']:
url += mylar.RTORRENT_RPC_URL
logger.info(url)
if username and password:
self.conn = RTorrent(
host,
username,
password
try:
self.conn = RTorrent(
url,(auth, username, password),
verify_server=True,
verify_ssl=self.getVerifySsl()
)
except:
return False
else:
self.conn = RTorrent(host)
try:
self.conn = RTorrent(host)
except:
return False
return self.conn

View File

@ -34,7 +34,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
if mylar.UPDATE_ENDED:
logger.info('Updating only Continuing Series (option enabled) - this might cause problems with the pull-list matching for rebooted series')
comiclist = []
completelist = myDB.select('SELECT LatestDate, ComicPublished, ForceContinuing, NewPublish, LastUpdated, ComicID, ComicName, Corrected_SeriesYear from comics WHERE Status="Active" or Status="Loading" order by LatestDate DESC, LastUpdated ASC')
completelist = myDB.select('SELECT LatestDate, ComicPublished, ForceContinuing, NewPublish, LastUpdated, ComicID, ComicName, Corrected_SeriesYear, ComicYear from comics WHERE Status="Active" or Status="Loading" order by LatestDate DESC, LastUpdated ASC')
for comlist in completelist:
if comlist['LatestDate'] is None:
recentstatus = 'Loading'
@ -62,10 +62,11 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
"LastUpdated": comlist['LastUpdated'],
"ComicID": comlist['ComicID'],
"ComicName": comlist['ComicName'],
"ComicYear": comlist['ComicYear'],
"Corrected_SeriesYear": comlist['Corrected_SeriesYear']})
else:
comiclist = myDB.select('SELECT LatestDate, LastUpdated, ComicID, ComicName from comics WHERE Status="Active" or Status="Loading" order by LatestDate DESC, LastUpdated ASC')
comiclist = myDB.select('SELECT LatestDate, LastUpdated, ComicID, ComicName, ComicYear, Corrected_SeriesYear from comics WHERE Status="Active" or Status="Loading" order by LatestDate DESC, LastUpdated ASC')
else:
comiclist = []
comiclisting = ComicIDList
@ -78,6 +79,15 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
cnt = 1
for comic in comiclist:
dspyear = comic['ComicYear']
csyear = None
if comic['Corrected_SeriesYear'] is not None:
csyear = comic['Corrected_SeriesYear']
if int(csyear) != int(comic['ComicYear']):
comic['ComicYear'] = csyear
dspyear = csyear
if ComicIDList is None:
ComicID = comic['ComicID']
ComicName = comic['ComicName']
@ -93,20 +103,10 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
logger.info(ComicName + '[' + str(ComicID) + '] Was refreshed less than 5 hours ago. Skipping Refresh at this time.')
cnt +=1
continue
logger.info('[' + str(cnt) + '/' + str(len(comiclist)) + '] Refreshing :' + ComicName + ' [' + str(ComicID) + ']')
logger.info('[' + str(cnt) + '/' + str(len(comiclist)) + '] Refreshing :' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']')
else:
ComicID = comic['ComicID']
ComicName = comic['ComicName']
logger.info('csyear: ' + str(comic['Corrected_SeriesYear']))
dspyear = comic['ComicYear']
csyear = None
if comic['Corrected_SeriesYear'] is not None:
csyear = comic['Corrected_SeriesYear']
if int(csyear) != int(comic['ComicYear']):
comic['ComicYear'] = csyear
dspyear = csyear
logger.fdebug('Refreshing: ' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']')
@ -356,6 +356,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
if 'annual' in ComicName.lower():
if mylar.ANNUALS_ON:
logger.info('checking: ' + str(ComicID) + ' -- issue#: ' + str(IssueNumber))
issuechk = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone()
else:
logger.fdebug('Annual detected, but annuals not enabled. Ignoring result.')

View File

@ -512,6 +512,7 @@ class WebInterface(object):
for AD in issuedata:
seriesYear = 'None'
issuePublisher = 'None'
seriesVolume = 'None'
if AD['IssueName'] is None:
IssueName = 'None'
@ -522,15 +523,19 @@ class WebInterface(object):
if cid['ComicID'] == AD['ComicID']:
seriesYear = cid['SeriesYear']
issuePublisher = cid['Publisher']
seriesVolume = cid['Volume']
if storyarcpublisher is None:
#assume that the arc is the same
storyarcpublisher = issuePublisher
break
newCtrl = {"IssueID": AD['IssueID'],
"StoryArcID": AD['StoryArcID']}
newVals = {"ComicID": AD['ComicID'],
"IssueID": AD['IssueID'],
"IssueArcID": AD['IssueArcID'],
"StoryArc": storyarcname,
"ComicName": AD['ComicName'],
"Volume": seriesVolume,
"DynamicComicName": AD['DynamicName'],
"IssueName": IssueName,
"IssueNumber": AD['Issue_Number'],
@ -546,6 +551,7 @@ class WebInterface(object):
myDB.upsert("readinglist", newVals, newCtrl)
logger.info(newVals)
#run the Search for Watchlist matches now.
logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.')
self.ArcWatchlist(storyarcid)
@ -1292,12 +1298,15 @@ class WebInterface(object):
SeriesYear = dateload['SeriesYear']
if ComicYear is None: ComicYear = SeriesYear
logger.info('Marking ' + ComicName + ' #' + ComicIssue + ' as wanted...')
if dateload['Volume'] is None:
logger.info('Marking ' + ComicName + ' #' + ComicIssue + ' as wanted...')
else:
logger.info('Marking ' + ComicName + ' (' + dateload['Volume'] + ') #' + ComicIssue + ' as wanted...')
logger.fdebug('publisher: ' + Publisher)
controlValueDict = {"IssueArcID": IssueArcID}
newStatus = {"Status": "Wanted"}
myDB.upsert("readinglist", newStatus, controlValueDict)
foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=Publisher, IssueDate=IssueDate, StoreDate=StoreDate, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=Publisher, IssueDate=IssueDate, StoreDate=StoreDate, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=dateload['Volume'], SARC=SARC, IssueArcID=IssueArcID)
if foundcom == "yes":
logger.info(u"Downloaded " + ComicName + " #" + ComicIssue + " (" + str(ComicYear) + ")")
controlValueDict = {"IssueArcID": IssueArcID}
@ -1544,10 +1553,12 @@ class WebInterface(object):
prev_week = int(weeknumber) - 1
next_week = int(weeknumber) + 1
date_fmt = "%B %d, %Y"
weekinfo = {'weeknumber': weeknumber,
'startweek': startweek.strftime('%B %d, %Y'),
'startweek': u"" + startweek.strftime(date_fmt).decode('utf-8'),
'midweek': midweek.strftime('%Y-%m-%d'),
'endweek': endweek.strftime('%B %d, %Y'),
'endweek': u"" + endweek.strftime(date_fmt).decode('utf-8'),
'year': year,
'prev_weeknumber': prev_week,
'next_weeknumber': next_week,
@ -1557,14 +1568,17 @@ class WebInterface(object):
weekdst = mylar.WEEKFOLDER_LOC
else:
weekdst = mylar.DESTINATION_DIR
weekfold = os.path.join(weekdst, str( str(weekinfo['year']) + '-' + str(weeknumber) ))
logger.info(weekinfo)
if mylar.WEEKFOLDER_FORMAT == 0:
weekfold = os.path.join(weekdst, str( str(weekinfo['year']) + '-' + str(weeknumber) ))
else:
weekfold = os.path.join(weekdst, str( str(weekinfo['midweek']) ))
popit = myDB.select("SELECT * FROM sqlite_master WHERE name='weekly' and type='table'")
if popit:
w_results = myDB.select("SELECT * from weekly WHERE weeknumber=?", [str(weeknumber)])
if len(w_results) == 0:
logger.info('trying to repopulate to different week')
logger.info('trying to repopulate to week: ' + str(weeknumber) + '-' + str(year))
repoll = self.manualpull(weeknumber=weeknumber,year=year)
if repoll['status'] == 'success':
w_results = myDB.select("SELECT * from weekly WHERE weeknumber=?", [str(weeknumber)])
@ -1578,14 +1592,27 @@ class WebInterface(object):
else:
logger.warn('Unable to populate the pull-list. Not continuing at this time (will try again in abit)')
if len(w_results) == 0:
if w_results is None:
return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pullfilter=True, weekfold=weekfold, wantedcount=0, weekinfo=weekinfo)
watchlibrary = helpers.listLibrary()
issueLibrary = helpers.listIssues(weeknumber, year)
for weekly in w_results:
xfound = False
tmp_status = weekly['Status']
if weekly['ComicID'] in watchlibrary:
haveit = watchlibrary[weekly['ComicID']]
if all([week >= weeknumber, mylar.AUTOWANT_UPCOMING, tmp_status == 'Skipped']):
tmp_status = 'Wanted'
for x in issueLibrary:
if weekly['IssueID'] == x['IssueID']:
xfound = True
tmp_status = x['Status']
break
else:
haveit = "No"
@ -1606,7 +1633,7 @@ class WebInterface(object):
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": weekly['STATUS'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"HAVEIT": haveit,
@ -1619,7 +1646,7 @@ class WebInterface(object):
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": weekly['STATUS'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"HAVEIT": haveit,
@ -1631,7 +1658,7 @@ class WebInterface(object):
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": weekly['STATUS'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"HAVEIT": haveit,
@ -1639,7 +1666,7 @@ class WebInterface(object):
"AUTOWANT": False
})
if weekly['STATUS'] == 'Wanted':
if tmp_status == 'Wanted':
wantedcount +=1
weeklyresults = sorted(weeklyresults, key=itemgetter('PUBLISHER', 'COMIC'), reverse=False)
@ -1784,6 +1811,7 @@ class WebInterface(object):
filterpull.exposed = True
def manualpull(self,weeknumber=None,year=None):
logger.info('ALT_PULL: ' + str(mylar.ALT_PULL) + ' PULLBYFILE: ' + str(mylar.PULLBYFILE) + ' week: ' + str(weeknumber) + ' year: ' + str(year))
if all([mylar.ALT_PULL == 2, mylar.PULLBYFILE is False]) and weeknumber:
return mylar.locg.locg(weeknumber=weeknumber,year=year)
#raise cherrypy.HTTPRedirect("pullist?week=" + str(weeknumber) + "&year=" + str(year))
@ -2342,25 +2370,7 @@ class WebInterface(object):
arclist = []
alist = myDB.select("SELECT * from readinglist WHERE ComicName is not Null group by StoryArcID") #COLLATE NOCASE")
for al in alist:
totalcnt = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=?", [al['StoryArcID']])
totalissues = myDB.select("SELECT COUNT(*) as count from readinglist WHERE StoryARcID=?", [al['StoryArcID']])
lowyear = 9999
maxyear = 0
for la in totalcnt:
if la['IssueDate'] is None:
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = str(lowyear) + ' - ' + str(maxyear) #la['SeriesYear'] + ' - ' + str(maxyear)
havecnt = myDB.select("SELECT COUNT(*) as count FROM readinglist WHERE StoryArcID=? AND (Status='Downloaded' or Status='Archived')", [al['StoryArcID']])
havearc = havecnt[0][0]
@ -2375,16 +2385,18 @@ class WebInterface(object):
percent = 0
totalarc = '?'
arclist.append({"StoryArcID": al['StoryArcID'],
"StoryArc": al['StoryArc'],
"TotalIssues": al['TotalIssues'],
"SeriesYear": al['SeriesYear'],
"Status": al['Status'],
"percent": percent,
"Have": havearc,
"SpanYears": spanyears,
"Total": totalarc,
"CV_ArcID": al['CV_ArcID']})
arclist.append({"StoryArcID": al['StoryArcID'],
"StoryArc": al['StoryArc'],
"TotalIssues": al['TotalIssues'],
"SeriesYear": al['SeriesYear'],
"StoryArcDir": al['StoryArc'],
"Status": al['Status'],
"percent": percent,
"Have": havearc,
"SpanYears": helpers.spantheyears(al['StoryArcID']),
"Total": totalarc,
"CV_ArcID": al['CV_ArcID']})
return serve_template(templatename="storyarc.html", title="Story Arcs", arclist=arclist, delete_type=0)
storyarc_main.exposed = True
@ -2393,15 +2405,27 @@ class WebInterface(object):
arcinfo = myDB.select("SELECT * from readinglist WHERE StoryArcID=? order by ReadingOrder ASC", [StoryArcID])
try:
cvarcid = arcinfo[0]['CV_ArcID']
arcdir = helpers.filesafe(arcinfo[0]['StoryArc'])
if mylar.REPLACE_SPACES:
arcdir = arcdir.replace(' ', mylar.REPLACE_CHAR)
arcpub = arcinfo[0]['Publisher']
lowyear = 9999
maxyear = 0
for la in arcinfo:
if la['IssueDate'] is None:
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if mylar.STORYARCDIR:
sdir = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arcdir)
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
logger.warn('Story arc directory is not configured. Defaulting to grabbag directory: ' + mylar.GRABBAG_DIR)
sdir = mylar.GRABBAG_DIR
spanyears = str(lowyear) + ' - ' + str(maxyear)
sdir = helpers.arcformat(arcinfo[0]['StoryArc'], spanyears, arcpub)
except:
cvarcid = None
sdir = mylar.GRABBAG_DIR
@ -2673,13 +2697,29 @@ class WebInterface(object):
else:
#cycle through the story arcs here for matches on the watchlist
arcdir = helpers.filesafe(ArcWatch[0]['StoryArc'])
if mylar.REPLACE_SPACES:
arcdir = arcdir.replace(' ', mylar.REPLACE_CHAR)
if mylar.STORYARCDIR:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arcdir)
arcpub = ArcWatch[0]['Publisher']
if arcpub is None:
arcpub = ArcWatch[0]['IssuePublisher']
lowyear = 9999
maxyear = 0
for la in ArcWatch:
if la['IssueDate'] is None:
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
logger.warn('Story arc directory is not configured. Defaulting to grabbag directory: ' + mylar.GRABBAG_DIR)
dstloc = mylar.GRABBAG_DIR
spanyears = str(lowyear) + ' - ' + str(maxyear)
logger.info('arcpub: ' + arcpub)
dstloc = helpers.arcformat(arcdir, spanyears, arcpub)
if not os.path.isdir(dstloc):
logger.info('Story Arc Directory [' + dstloc + '] does not exist! - attempting to create now.')
@ -2698,6 +2738,8 @@ class WebInterface(object):
cvinfo_arcid = ArcWatch[0]['CV_ArcID']
text_file.write('https://comicvine.gamespot.com/storyarc/4045-' + str(cvinfo_arcid))
if mylar.ENFORCE_PERMS:
filechecker.setperms(os.path.join(dstloc, 'cvinfo'))
#get the list of files within the storyarc directory, if any.
fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True)
@ -2869,8 +2911,6 @@ class WebInterface(object):
logger.fdebug(module + ' Failed to ' + mylar.FILE_OPTS + ' ' + issloc + ' - check directories and manually re-run.')
else:
logger.fdebug('Destination file exists: ' + dstloc)
else:
logger.fdebug('Source file does not exist: ' + issloc)
else:
logger.fdebug("We don't have " + issue['ComicName'] + " :# " + issue['Issue_Number'])
@ -3146,7 +3186,8 @@ class WebInterface(object):
except IOError as e:
logger.error("Could not copy " + str(issuePATH) + " to " + str(dstPATH) + ". Copy to Cache terminated.")
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
logger.debug("sucessfully copied to cache...Enabling Download link")
#logger.debug("sucessfully copied to cache...Enabling Download link")
controlValueDict = {'IssueID': IssueID}
RLnewValueDict = {'inCacheDIR': 'True',
@ -3184,42 +3225,35 @@ class WebInterface(object):
import ast
pulldate = ast.literal_eval(pulldate)
logger.info('pulldate: ' + str(pulldate))
yr = pulldate['year']
wk = pulldate['weeknumber']
desdir = os.path.join(dstdir, str(yr) + '-' + str(wk))
if os.path.isdir(desdir):
logger.info(u"Directory (" + desdir + ") already exists! Continuing...")
else:
logger.info("Directory doesn't exist!")
try:
os.makedirs(desdir)
logger.info(u"Directory successfully created at: " + desdir)
except OSError:
logger.error(u"Could not create comicdir : " + desdir)
logger.error(u"Defaulting to : " + mylar.DESTINATION_DIR)
desdir = mylar.DESTINATION_DIR
if mylar.WEEKFOLDER_FORMAT == 0:
#0 = YYYY-mm
desdir = os.path.join(dstdir, str(pulldate['year']) + '-' + str(pulldate['weeknumber']))
elif mylar.WEEKFOLDER_FORMAT == 1:
#1 = YYYY-mm-dd (midweek)
desdir = os.path.join(dstdir, str(pulldate['midweek']))
chkdir = filechecker.validateAndCreateDirectory(desdir, create=True, module='WEEKLY-FOLDER')
if not chkdir:
logger.warn('Unable to create weekly directory. Check location & permissions. Aborting Copy.')
return
else:
desdir = mylar.GRABBAG_DIR
clist = myDB.select("SELECT * FROM weekly WHERE weeknumber=? AND Status='Downloaded'", [pulldate['weeknumber']])
if clist is None: # nothing on the list, just go go gone
issuelist = helpers.listIssues(pulldate['weeknumber'],pulldate['year'])
if issuelist is None: # nothing on the list, just go go gone
logger.info("There aren't any issues downloaded from this week yet.")
else:
iscount = 0
for cl in clist:
isslist = myDB.select("SELECT * FROM Issues WHERE ComicID=? AND Status='Downloaded'", [cl['ComicID']])
if isslist is None: pass # no issues found for comicid - boo/boo
else:
for iss in isslist:
#go through issues downloaded until found one we want.
if iss['Issue_Number'] == cl['ISSUE']:
self.downloadLocal(iss['IssueID'], dir=desdir)
logger.info("Copied " + iss['ComicName'] + " #" + str(iss['Issue_Number']) + " to " + desdir.encode('utf-8').strip())
iscount+=1
break
logger.info("I have copied " + str(iscount) + " issues from this Week's pullist as requested.")
raise cherrypy.HTTPRedirect("pullist")
for issue in issuelist:
#logger.fdebug('Checking status of ' + issue['ComicName'] + ' #' + str(issue['Issue_Number']))
if issue['Status'] == 'Downloaded':
logger.info('Status Downloaded.')
self.downloadLocal(issue['IssueID'], dir=desdir)
logger.info("Copied " + issue['ComicName'] + " #" + str(issue['Issue_Number']) + " to " + desdir.encode('utf-8').strip())
iscount+=1
logger.info('I have copied ' + str(iscount) + ' issues from week #' + str(pulldate['weeknumber']) + ' pullist as requested.')
raise cherrypy.HTTPRedirect("pullist?week=%s&year=%s" % (pulldate['weeknumber'], pulldate['year']))
MassWeeklyDownload.exposed = True
def idirectory(self):
@ -3903,6 +3937,10 @@ class WebInterface(object):
"utorrent_password": mylar.UTORRENT_PASSWORD,
"utorrent_label": mylar.UTORRENT_LABEL,
"rtorrent_host": mylar.RTORRENT_HOST,
"rtorrent_rpc_url": mylar.RTORRENT_RPC_URL,
"rtorrent_authentication": mylar.RTORRENT_AUTHENTICATION,
"rtorrent_ssl": helpers.checked(mylar.RTORRENT_SSL),
"rtorrent_verify": helpers.checked(mylar.RTORRENT_VERIFY),
"rtorrent_username": mylar.RTORRENT_USERNAME,
"rtorrent_password": mylar.RTORRENT_PASSWORD,
"rtorrent_directory": mylar.RTORRENT_DIRECTORY,
@ -4218,23 +4256,26 @@ class WebInterface(object):
readlistOptions.exposed = True
def arcOptions(self, StoryArcID=None, StoryArcName=None, read2filename=0, storyarcdir=0, copy2arcdir=0):
def arcOptions(self, StoryArcID=None, StoryArcName=None, read2filename=0, storyarcdir=0, arc_folderformat=None, copy2arcdir=0, arc_fileops='copy'):
mylar.READ2FILENAME = int(read2filename)
mylar.STORYARCDIR = int(storyarcdir)
mylar.ARC_FOLDERFORMAT = arc_folderformat
mylar.COPY2ARCDIR = int(copy2arcdir)
mylar.ARC_FILEOPS = arc_fileops
mylar.config_write()
logger.info(mylar.ARC_FOLDERFORMAT)
#force the check/creation of directory com_location here
if mylar.STORYARCDIR:
arcdir = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs')
if os.path.isdir(str(arcdir)):
logger.info(u"Validating Directory (" + str(arcdir) + "). Already exists! Continuing...")
else:
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
checkdirectory = filechecker.validateAndCreateDirectory(arcdir, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
#if mylar.STORYARCDIR:
# arcdir = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs')
# if os.path.isdir(str(arcdir)):
# logger.info(u"Validating Directory (" + str(arcdir) + "). Already exists! Continuing...")
# else:
# logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
# checkdirectory = filechecker.validateAndCreateDirectory(arcdir, True)
# if not checkdirectory:
# logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
# return
if StoryArcID is not None:
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (StoryArcID, StoryArcName))
else:
@ -4251,7 +4292,7 @@ class WebInterface(object):
enable_torrents=0, minseeds=0, local_watchdir=None, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None,
prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None,
pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0, telegram_enabled=0, telegram_token=None, telegram_userid=None, telegram_onsnatch=0, torrent_downloader=0, torrent_local=0, torrent_seedbox=0, utorrent_host=None, utorrent_username=None, utorrent_password=None, utorrent_label=None,
rtorrent_host=None, rtorrent_username=None, rtorrent_password=None, rtorrent_directory=None, rtorrent_label=None, rtorrent_startonload=0, transmission_host=None, transmission_username=None, transmission_password=None, transmission_directory=None,
rtorrent_host=None, rtorrent_ssl=0, rtorrent_verify=0, rtorrent_authentication='basic', rtorrent_rpc_url=None, rtorrent_username=None, rtorrent_password=None, rtorrent_directory=None, rtorrent_label=None, rtorrent_startonload=0, transmission_host=None, transmission_username=None, transmission_password=None, transmission_directory=None,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, file_opts=None, syno_fix=0, search_delay=None, enforce_perms=1, chmod_dir=0777, chmod_file=0660, chowner=None, chgroup=None,
tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, ddump=0, duplicate_dump=None, **kwargs):
mylar.COMICVINE_API = comicvine_api
@ -4330,6 +4371,10 @@ class WebInterface(object):
mylar.UTORRENT_PASSWORD = utorrent_password
mylar.UTORRENT_LABEL = utorrent_label
mylar.RTORRENT_HOST = rtorrent_host
mylar.RTORRENT_AUTHENTICATION = rtorrent_authentication
mylar.RTORRENT_SSL = rtorrent_ssl
mylar.RTORRENT_VERIFY = rtorrent_verify
mylar.RTORRENT_RPC_URL = rtorrent_rpc_url
mylar.RTORRENT_USERNAME = rtorrent_username
mylar.RTORRENT_PASSWORD = rtorrent_password
mylar.RTORRENT_DIRECTORY = rtorrent_directory
@ -4428,7 +4473,7 @@ class WebInterface(object):
#changing this for simplicty - adding all newznabs into extra_newznabs
if newznab_host is not None:
#this
mylar.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_verify, newznab_apikey, newznab_uid, int(newznab_enabled)))
mylar.EXTRA_NEWZNABS.append((newznab_name, helpers.clean_url(newznab_host), newznab_verify, newznab_apikey, newznab_uid, int(newznab_enabled)))
for kwarg in kwargs:
if kwarg.startswith('newznab_name'):
@ -4439,7 +4484,7 @@ class WebInterface(object):
if newznab_name == "":
logger.fdebug('Blank newznab provider has been entered - removing.')
continue
newznab_host = kwargs['newznab_host' + newznab_number]
newznab_host = helpers.clean_url(kwargs['newznab_host' + newznab_number])
try:
newznab_verify = kwargs['newznab_verify' + newznab_number]
except:

View File

@ -55,7 +55,7 @@ def pullit(forcecheck=None):
#only for pw-file or ALT_PULL = 1
newrl = os.path.join(mylar.CACHE_DIR, 'newreleases.txt')
mylar.PULLBYFILE = None
mylar.PULLBYFILE = False
if mylar.ALT_PULL == 1:
#logger.info('[PULL-LIST] The Alt-Pull method is currently broken. Defaulting back to the normal method of grabbing the pull-list.')
@ -953,7 +953,10 @@ def new_pullcheck(weeknumber, pullyear, comic1off_name=None, comic1off_id=None,
elif annualidmatch:
comicname = annualidmatch[0]['AnnualIDs'][0]['ComicName'].strip()
latestiss = annualidmatch[0]['latestIssue'].strip()
comicid = annualidmatch[0]['AnnualIDs'][0]['ComicID'].strip()
if mylar.ANNUALS_ON:
comicid = annualidmatch[0]['ComicID'].strip()
else:
comicid = annualidmatch[0]['AnnualIDs'][0]['ComicID'].strip()
logger.fdebug('[WEEKLY-PULL] Series Match to ID --- ' + comicname + ' [' + comicid + ']')
else:
#if it's a name metch, it means that CV hasn't been populated yet with the necessary data
@ -1112,12 +1115,16 @@ def new_pullcheck(weeknumber, pullyear, comic1off_name=None, comic1off_id=None,
#if the issueid exists on the pull, but not in the series issue list, we need to forcibly refresh the series so it's in line
if issueid:
logger.info('issue id check passed.')
isschk = myDB.selectone('SELECT * FROM issues where IssueID=?', [issueid]).fetchone()
#logger.info('issue id check passed.')
if annualidmatch:
isschk = myDB.selectone('SELECT * FROM annuals where IssueID=?', [issueid]).fetchone()
else:
isschk = myDB.selectone('SELECT * FROM issues where IssueID=?', [issueid]).fetchone()
if isschk is None:
isschk = myDB.selectone('SELECT * FROM annuals where IssueID=?', [issueid]).fetchone()
if isschk is None:
logger.fdebug('REFRESH THE SERIES.')
logger.fdebug('[WEEKLY-PULL] Forcing a refresh of the series to ensure it is current.')
cchk = mylar.importer.updateissuedata(comicid, comicname, calledfrom='weeklycheck')
#refresh series.
else:
@ -1133,7 +1140,10 @@ def new_pullcheck(weeknumber, pullyear, comic1off_name=None, comic1off_id=None,
#make sure the status is Wanted and that the issue status is identical if not.
newStat = {'Status': 'Wanted'}
ctrlStat = {'IssueID': issueid}
myDB.upsert("issues", newStat, ctrlStat)
if all([annualidmatch, mylar.ANNUALS_ON]):
myDB.upsert("annuals", newStat, ctrlStat)
else:
myDB.upsert("issues", newStat, ctrlStat)
else:
continue
# else: