mirror of
https://github.com/evilhero/mylar
synced 2024-12-21 23:32:23 +00:00
Merge branch 'development'
This commit is contained in:
commit
d1ff6079a0
19 changed files with 725 additions and 340 deletions
|
@ -818,12 +818,14 @@
|
|||
<input id="enable_torrent_search" type="checkbox" onclick="initConfigCheckbox($(this));" name="enable_torrent_search" value=1 ${config['enable_torrent_search']} /><legend>Torrents</legned>
|
||||
</div>
|
||||
<div class="config">
|
||||
<!--
|
||||
<div class="row checkbox left clearfix">
|
||||
<input id="enable_public" title="Use Public Torrents" type="checkbox" name="enable_public" value=1 ${config['enable_public']} /><label>Enable Public Torrent Search</label>
|
||||
<div align="left">
|
||||
<small class="heading"><span style="float: left; margin-left: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Search: WWT / RSS: WWT</small>
|
||||
<small class="heading"><span style="float: left; margin-left: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Search: None / RSS: None</small>
|
||||
</div>
|
||||
</div>
|
||||
-->
|
||||
<div class="row checkbox left clearfix">
|
||||
<input type="checkbox" id="enable_32p" onclick="initConfigCheckbox($(this));" name="enable_32p" value=1 ${config['enable_32p']} /><label>Enable 32P</label>
|
||||
<div align="left">
|
||||
|
@ -1212,7 +1214,11 @@
|
|||
%>
|
||||
<a href="#" title="${folder_options}"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
<small>Use: $Publisher, $Series, $Year<br />
|
||||
E.g.: $Publisher/$Series ($Year) = DC Comics/Action Comics (2011)</small>
|
||||
%if 'windows' in mylar.OS_DETECT.lower():
|
||||
E.g.: $Publisher\$Series ($Year) = DC Comics\Action Comics (2011)</small>
|
||||
%else:
|
||||
E.g.: $Publisher/$Series ($Year) = DC Comics/Action Comics (2011)</small>
|
||||
%endif
|
||||
</div>
|
||||
<div class="row">
|
||||
<label> File Format</label>
|
||||
|
|
|
@ -55,7 +55,7 @@ $.fn.dataTable.ext.search.push( function ( context, searchData ) {
|
|||
if ( context.alphabetSearch.match('nonalpha') && !(searchData[1].charAt(0).match(/^[a-zA-Z]/)) ) {
|
||||
return true;
|
||||
}
|
||||
if ( searchData[1].charAt(0) === context.alphabetSearch ) {
|
||||
if ( searchData[1].charAt(0).toUpperCase() === context.alphabetSearch ) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ function bin ( data ) {
|
|||
bins['nonalpha'] = 0;
|
||||
for ( var i=0, ien=data.length ; i<ien ; i++ ) {
|
||||
letter = data[i].charAt(13).toUpperCase();
|
||||
if ( !letter.match(/^[A-Z]/) ) {
|
||||
if ( !letter.match(/^[a-zA-Z]/) ) {
|
||||
bins['nonalpha']++;
|
||||
}
|
||||
else if ( bins[letter] ) {
|
||||
|
|
|
@ -1,4 +1,13 @@
|
|||
#!/usr/bin/env
|
||||
### BEGIN INIT INFO
|
||||
# Provides: mylar
|
||||
# Required-Start: $all
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop:
|
||||
# Short-Description: Mylar
|
||||
### END INIT INFO
|
||||
|
||||
# Script name
|
||||
NAME=mylar
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .client import DelugeRPCClient
|
||||
from .client import DelugeRPCClient, FailedToReconnectException
|
|
@ -2,6 +2,7 @@ import logging
|
|||
import socket
|
||||
import ssl
|
||||
import struct
|
||||
import warnings
|
||||
import zlib
|
||||
|
||||
from .rencode import dumps, loads
|
||||
|
@ -10,102 +11,265 @@ RPC_RESPONSE = 1
|
|||
RPC_ERROR = 2
|
||||
RPC_EVENT = 3
|
||||
|
||||
#MESSAGE_HEADER_SIZE = 5
|
||||
MESSAGE_HEADER_SIZE = 5
|
||||
READ_SIZE = 10
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ConnectionLostException(Exception):
|
||||
|
||||
class DelugeClientException(Exception):
|
||||
"""Base exception for all deluge client exceptions"""
|
||||
|
||||
|
||||
class ConnectionLostException(DelugeClientException):
|
||||
pass
|
||||
|
||||
class CallTimeoutException(Exception):
|
||||
|
||||
class CallTimeoutException(DelugeClientException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidHeaderException(DelugeClientException):
|
||||
pass
|
||||
|
||||
|
||||
class FailedToReconnectException(DelugeClientException):
|
||||
pass
|
||||
|
||||
|
||||
class RemoteException(DelugeClientException):
|
||||
pass
|
||||
|
||||
|
||||
class DelugeRPCClient(object):
|
||||
timeout = 20
|
||||
|
||||
def __init__(self, host, port, username, password):
|
||||
|
||||
def __init__(self, host, port, username, password, decode_utf8=False, automatic_reconnect=True):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
self.deluge_version = None
|
||||
# This is only applicable if deluge_version is 2
|
||||
self.deluge_protocol_version = None
|
||||
|
||||
self.decode_utf8 = decode_utf8
|
||||
if not self.decode_utf8:
|
||||
warnings.warn('Using `decode_utf8=False` is deprecated, please set it to True.'
|
||||
'The argument will be removed in a future release where it will be always True', DeprecationWarning)
|
||||
|
||||
self.automatic_reconnect = automatic_reconnect
|
||||
|
||||
self.request_id = 1
|
||||
self.connected = False
|
||||
self._create_socket()
|
||||
|
||||
|
||||
def _create_socket(self, ssl_version=None):
|
||||
if ssl_version is not None:
|
||||
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), ssl_version=ssl_version)
|
||||
else:
|
||||
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
|
||||
self._socket.settimeout(self.timeout)
|
||||
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connects to the Deluge instance
|
||||
"""
|
||||
self._connect()
|
||||
logger.debug('Connected to Deluge, detecting daemon version')
|
||||
self._detect_deluge_version()
|
||||
logger.debug('Daemon version {} detected, logging in'.format(self.deluge_version))
|
||||
if self.deluge_version == 2:
|
||||
result = self.call('daemon.login', self.username, self.password, client_version='deluge-client')
|
||||
else:
|
||||
result = self.call('daemon.login', self.username, self.password)
|
||||
logger.debug('Logged in with value %r' % result)
|
||||
self.connected = True
|
||||
|
||||
def _connect(self):
|
||||
logger.info('Connecting to %s:%s' % (self.host, self.port))
|
||||
try:
|
||||
self._socket.connect((self.host, self.port))
|
||||
except ssl.SSLError as e:
|
||||
if e.reason != 'UNSUPPORTED_PROTOCOL' or not hasattr(ssl, 'PROTOCOL_SSLv3'):
|
||||
# Note: have not verified that we actually get errno 258 for this error
|
||||
if (hasattr(ssl, 'PROTOCOL_SSLv3') and
|
||||
(getattr(e, 'reason', None) == 'UNSUPPORTED_PROTOCOL' or e.errno == 258)):
|
||||
logger.warning('Was unable to ssl handshake, trying to force SSLv3 (insecure)')
|
||||
self._create_socket(ssl_version=ssl.PROTOCOL_SSLv3)
|
||||
self._socket.connect((self.host, self.port))
|
||||
else:
|
||||
raise
|
||||
|
||||
logger.warning('Was unable to ssl handshake, trying to force SSLv3 (insecure)')
|
||||
self._create_socket(ssl_version=ssl.PROTOCOL_SSLv3)
|
||||
self._socket.connect((self.host, self.port))
|
||||
|
||||
logger.debug('Connected to Deluge, logging in')
|
||||
result = self.call('daemon.login', self.username, self.password)
|
||||
logger.debug('Logged in with value %r' % result)
|
||||
self.connected = True
|
||||
|
||||
|
||||
def disconnect(self):
|
||||
"""
|
||||
Disconnect from deluge
|
||||
"""
|
||||
if self.connected:
|
||||
self._socket.close()
|
||||
|
||||
def call(self, method, *args, **kwargs):
|
||||
"""
|
||||
Calls an RPC function
|
||||
"""
|
||||
self._socket = None
|
||||
self.connected = False
|
||||
|
||||
def _detect_deluge_version(self):
|
||||
if self.deluge_version is not None:
|
||||
return
|
||||
|
||||
self._send_call(1, None, 'daemon.info')
|
||||
self._send_call(2, None, 'daemon.info')
|
||||
self._send_call(2, 1, 'daemon.info')
|
||||
result = self._socket.recv(1)
|
||||
if result[:1] == b'D':
|
||||
# This is a protocol deluge 2.0 was using before release
|
||||
self.deluge_version = 2
|
||||
self.deluge_protocol_version = None
|
||||
# If we need the specific version of deluge 2, this is it.
|
||||
daemon_version = self._receive_response(2, None, partial_data=result)
|
||||
elif ord(result[:1]) == 1:
|
||||
self.deluge_version = 2
|
||||
self.deluge_protocol_version = 1
|
||||
# If we need the specific version of deluge 2, this is it.
|
||||
daemon_version = self._receive_response(2, 1, partial_data=result)
|
||||
else:
|
||||
self.deluge_version = 1
|
||||
# Deluge 1 doesn't recover well from the bad request. Re-connect the socket.
|
||||
self._socket.close()
|
||||
self._create_socket()
|
||||
self._connect()
|
||||
|
||||
def _send_call(self, deluge_version, protocol_version, method, *args, **kwargs):
|
||||
self.request_id += 1
|
||||
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, args, kwargs))
|
||||
|
||||
if method == 'daemon.login':
|
||||
debug_args = list(args)
|
||||
if len(debug_args) >= 2:
|
||||
debug_args[1] = '<password hidden>'
|
||||
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, debug_args, kwargs))
|
||||
else:
|
||||
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, args, kwargs))
|
||||
|
||||
req = ((self.request_id, method, args, kwargs), )
|
||||
req = zlib.compress(dumps(req))
|
||||
|
||||
#self._socket.send('D' + struct.pack("!i", len(req))) # seems to be for the future !
|
||||
|
||||
if deluge_version == 2:
|
||||
if protocol_version is None:
|
||||
# This was a protocol for deluge 2 before they introduced protocol version numbers
|
||||
self._socket.send(b'D' + struct.pack("!i", len(req)))
|
||||
elif protocol_version == 1:
|
||||
self._socket.send(struct.pack('!BI', protocol_version, len(req)))
|
||||
else:
|
||||
raise Exception('Deluge protocol version {} is not (yet) supported.'.format(protocol_version))
|
||||
self._socket.send(req)
|
||||
|
||||
data = b''
|
||||
|
||||
def _receive_response(self, deluge_version, protocol_version, partial_data=b''):
|
||||
expected_bytes = None
|
||||
data = partial_data
|
||||
while True:
|
||||
try:
|
||||
d = self._socket.recv(READ_SIZE)
|
||||
except ssl.SSLError:
|
||||
raise CallTimeoutException()
|
||||
|
||||
|
||||
data += d
|
||||
try:
|
||||
data = zlib.decompress(data)
|
||||
except zlib.error:
|
||||
if not d:
|
||||
raise ConnectionLostException()
|
||||
continue
|
||||
break
|
||||
|
||||
data = list(loads(data))
|
||||
if deluge_version == 2:
|
||||
if expected_bytes is None:
|
||||
if len(data) < 5:
|
||||
continue
|
||||
|
||||
header = data[:MESSAGE_HEADER_SIZE]
|
||||
data = data[MESSAGE_HEADER_SIZE:]
|
||||
|
||||
if protocol_version is None:
|
||||
if header[0] != b'D'[0]:
|
||||
raise InvalidHeaderException('Expected D as first byte in reply')
|
||||
elif ord(header[:1]) != protocol_version:
|
||||
raise InvalidHeaderException(
|
||||
'Expected protocol version ({}) as first byte in reply'.format(protocol_version)
|
||||
)
|
||||
|
||||
if protocol_version is None:
|
||||
expected_bytes = struct.unpack('!i', header[1:])[0]
|
||||
else:
|
||||
expected_bytes = struct.unpack('!I', header[1:])[0]
|
||||
|
||||
if len(data) >= expected_bytes:
|
||||
data = zlib.decompress(data)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
data = zlib.decompress(data)
|
||||
except zlib.error:
|
||||
if not d:
|
||||
raise ConnectionLostException()
|
||||
continue
|
||||
break
|
||||
|
||||
data = list(loads(data, decode_utf8=self.decode_utf8))
|
||||
msg_type = data.pop(0)
|
||||
request_id = data.pop(0)
|
||||
|
||||
|
||||
if msg_type == RPC_ERROR:
|
||||
exception_type, exception_msg, traceback = data[0]
|
||||
exception = type(str(exception_type), (Exception, ), {})
|
||||
exception_msg = '%s\n\n%s' % (exception_msg, traceback)
|
||||
if self.deluge_version == 2:
|
||||
exception_type, exception_msg, _, traceback = data
|
||||
# On deluge 2, exception arguments are sent as tuple
|
||||
if self.decode_utf8:
|
||||
exception_msg = ', '.join(exception_msg)
|
||||
else:
|
||||
exception_msg = b', '.join(exception_msg)
|
||||
else:
|
||||
exception_type, exception_msg, traceback = data[0]
|
||||
if self.decode_utf8:
|
||||
exception = type(str(exception_type), (RemoteException, ), {})
|
||||
exception_msg = '%s\n%s' % (exception_msg,
|
||||
traceback)
|
||||
else:
|
||||
exception = type(str(exception_type.decode('utf-8', 'ignore')), (RemoteException, ), {})
|
||||
exception_msg = '%s\n%s' % (exception_msg.decode('utf-8', 'ignore'),
|
||||
traceback.decode('utf-8', 'ignore'))
|
||||
raise exception(exception_msg)
|
||||
elif msg_type == RPC_RESPONSE:
|
||||
retval = data[0]
|
||||
return retval
|
||||
|
||||
def reconnect(self):
|
||||
"""
|
||||
Reconnect
|
||||
"""
|
||||
self.disconnect()
|
||||
self._create_socket()
|
||||
self.connect()
|
||||
|
||||
def call(self, method, *args, **kwargs):
|
||||
"""
|
||||
Calls an RPC function
|
||||
"""
|
||||
tried_reconnect = False
|
||||
for _ in range(2):
|
||||
try:
|
||||
self._send_call(self.deluge_version, self.deluge_protocol_version, method, *args, **kwargs)
|
||||
return self._receive_response(self.deluge_version, self.deluge_protocol_version)
|
||||
except (socket.error, ConnectionLostException, CallTimeoutException):
|
||||
if self.automatic_reconnect:
|
||||
if tried_reconnect:
|
||||
raise FailedToReconnectException()
|
||||
else:
|
||||
try:
|
||||
self.reconnect()
|
||||
except (socket.error, ConnectionLostException, CallTimeoutException):
|
||||
raise FailedToReconnectException()
|
||||
|
||||
tried_reconnect = True
|
||||
else:
|
||||
raise
|
||||
|
||||
def __getattr__(self, item):
|
||||
return RPCCaller(self.call, item)
|
||||
|
||||
|
||||
class RPCCaller(object):
|
||||
def __init__(self, caller, method=''):
|
||||
self.caller = caller
|
||||
self.method = method
|
||||
|
||||
def __getattr__(self, item):
|
||||
return RPCCaller(self.caller, self.method+'.'+item)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return self.caller(self.method, *args, **kwargs)
|
||||
|
|
|
@ -1,27 +1,3 @@
|
|||
|
||||
"""
|
||||
rencode -- Web safe object pickling/unpickling.
|
||||
|
||||
Public domain, Connelly Barnes 2006-2007.
|
||||
|
||||
The rencode module is a modified version of bencode from the
|
||||
BitTorrent project. For complex, heterogeneous data structures with
|
||||
many small elements, r-encodings take up significantly less space than
|
||||
b-encodings:
|
||||
|
||||
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
|
||||
13
|
||||
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
|
||||
26
|
||||
|
||||
The rencode format is not standardized, and may change with different
|
||||
rencode module versions, so you should check that you are using the
|
||||
same rencode version throughout your project.
|
||||
"""
|
||||
|
||||
__version__ = '1.0.2'
|
||||
__all__ = ['dumps', 'loads']
|
||||
|
||||
# Original bencode module by Petru Paler, et al.
|
||||
#
|
||||
# Modifications by Connelly Barnes:
|
||||
|
@ -62,23 +38,50 @@ __all__ = ['dumps', 'loads']
|
|||
# (The rencode module is licensed under the above license as well).
|
||||
#
|
||||
|
||||
import sys
|
||||
"""
|
||||
rencode -- Web safe object pickling/unpickling.
|
||||
|
||||
Public domain, Connelly Barnes 2006-2007.
|
||||
|
||||
The rencode module is a modified version of bencode from the
|
||||
BitTorrent project. For complex, heterogeneous data structures with
|
||||
many small elements, r-encodings take up significantly less space than
|
||||
b-encodings:
|
||||
|
||||
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
|
||||
13
|
||||
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
|
||||
26
|
||||
|
||||
The rencode format is not standardized, and may change with different
|
||||
rencode module versions, so you should check that you are using the
|
||||
same rencode version throughout your project.
|
||||
"""
|
||||
|
||||
py3 = False
|
||||
if sys.version_info >= (3, 0):
|
||||
py3 = True
|
||||
long = int
|
||||
unicode = str
|
||||
|
||||
def int2byte(c):
|
||||
if py3:
|
||||
return bytes([c])
|
||||
else:
|
||||
return chr(c)
|
||||
|
||||
import struct
|
||||
import sys
|
||||
from threading import Lock
|
||||
|
||||
try:
|
||||
from future_builtins import zip
|
||||
except ImportError:
|
||||
# Ignore on Py3.
|
||||
pass
|
||||
|
||||
__version__ = ('Python', 1, 0, 4)
|
||||
__all__ = ['dumps', 'loads']
|
||||
|
||||
py3 = sys.version_info[0] >= 3
|
||||
if py3:
|
||||
long = int # pylint: disable=redefined-builtin
|
||||
unicode = str # pylint: disable=redefined-builtin
|
||||
|
||||
def int2byte(c):
|
||||
return bytes([c])
|
||||
else:
|
||||
def int2byte(c):
|
||||
return chr(c)
|
||||
|
||||
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
|
||||
DEFAULT_FLOAT_BITS = 32
|
||||
|
||||
|
@ -87,19 +90,19 @@ MAX_INT_LENGTH = 64
|
|||
|
||||
# The bencode 'typecodes' such as i, d, etc have been extended and
|
||||
# relocated on the base-256 character set.
|
||||
CHR_LIST = int2byte(59)
|
||||
CHR_DICT = int2byte(60)
|
||||
CHR_INT = int2byte(61)
|
||||
CHR_INT1 = int2byte(62)
|
||||
CHR_INT2 = int2byte(63)
|
||||
CHR_INT4 = int2byte(64)
|
||||
CHR_INT8 = int2byte(65)
|
||||
CHR_LIST = int2byte(59)
|
||||
CHR_DICT = int2byte(60)
|
||||
CHR_INT = int2byte(61)
|
||||
CHR_INT1 = int2byte(62)
|
||||
CHR_INT2 = int2byte(63)
|
||||
CHR_INT4 = int2byte(64)
|
||||
CHR_INT8 = int2byte(65)
|
||||
CHR_FLOAT32 = int2byte(66)
|
||||
CHR_FLOAT64 = int2byte(44)
|
||||
CHR_TRUE = int2byte(67)
|
||||
CHR_FALSE = int2byte(68)
|
||||
CHR_NONE = int2byte(69)
|
||||
CHR_TERM = int2byte(127)
|
||||
CHR_TRUE = int2byte(67)
|
||||
CHR_FALSE = int2byte(68)
|
||||
CHR_NONE = int2byte(69)
|
||||
CHR_TERM = int2byte(127)
|
||||
|
||||
# Positive integers with value embedded in typecode.
|
||||
INT_POS_FIXED_START = 0
|
||||
|
@ -118,12 +121,13 @@ STR_FIXED_START = 128
|
|||
STR_FIXED_COUNT = 64
|
||||
|
||||
# Lists with length embedded in typecode.
|
||||
LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT
|
||||
LIST_FIXED_START = STR_FIXED_START + STR_FIXED_COUNT
|
||||
LIST_FIXED_COUNT = 64
|
||||
|
||||
# Whether strings should be decoded when loading
|
||||
_decode_utf8 = False
|
||||
|
||||
|
||||
def decode_int(x, f):
|
||||
f += 1
|
||||
newf = x.index(CHR_TERM, f)
|
||||
|
@ -133,39 +137,46 @@ def decode_int(x, f):
|
|||
n = int(x[f:newf])
|
||||
except (OverflowError, ValueError):
|
||||
n = long(x[f:newf])
|
||||
if x[f:f+1] == '-':
|
||||
if x[f:f + 1] == '-':
|
||||
if x[f + 1:f + 2] == '0':
|
||||
raise ValueError
|
||||
elif x[f:f+1] == '0' and newf != f+1:
|
||||
elif x[f:f + 1] == '0' and newf != f + 1:
|
||||
raise ValueError
|
||||
return (n, newf+1)
|
||||
return (n, newf + 1)
|
||||
|
||||
|
||||
def decode_intb(x, f):
|
||||
f += 1
|
||||
return (struct.unpack('!b', x[f:f+1])[0], f+1)
|
||||
return (struct.unpack('!b', x[f:f + 1])[0], f + 1)
|
||||
|
||||
|
||||
def decode_inth(x, f):
|
||||
f += 1
|
||||
return (struct.unpack('!h', x[f:f+2])[0], f+2)
|
||||
return (struct.unpack('!h', x[f:f + 2])[0], f + 2)
|
||||
|
||||
|
||||
def decode_intl(x, f):
|
||||
f += 1
|
||||
|
||||
return (struct.unpack('!l', x[f:f+4])[0], f+4)
|
||||
return (struct.unpack('!l', x[f:f + 4])[0], f + 4)
|
||||
|
||||
|
||||
def decode_intq(x, f):
|
||||
f += 1
|
||||
return (struct.unpack('!q', x[f:f+8])[0], f+8)
|
||||
return (struct.unpack('!q', x[f:f + 8])[0], f + 8)
|
||||
|
||||
|
||||
def decode_float32(x, f):
|
||||
f += 1
|
||||
n = struct.unpack('!f', x[f:f+4])[0]
|
||||
return (n, f+4)
|
||||
n = struct.unpack('!f', x[f:f + 4])[0]
|
||||
return (n, f + 4)
|
||||
|
||||
|
||||
def decode_float64(x, f):
|
||||
f += 1
|
||||
n = struct.unpack('!d', x[f:f+8])[0]
|
||||
return (n, f+8)
|
||||
n = struct.unpack('!d', x[f:f + 8])[0]
|
||||
return (n, f + 8)
|
||||
|
||||
|
||||
def decode_string(x, f):
|
||||
colon = x.index(b':', f)
|
||||
|
@ -173,36 +184,42 @@ def decode_string(x, f):
|
|||
n = int(x[f:colon])
|
||||
except (OverflowError, ValueError):
|
||||
n = long(x[f:colon])
|
||||
if x[f] == '0' and colon != f+1:
|
||||
if x[f] == '0' and colon != f + 1:
|
||||
raise ValueError
|
||||
colon += 1
|
||||
s = x[colon:colon+n]
|
||||
s = x[colon:colon + n]
|
||||
if _decode_utf8:
|
||||
s = s.decode('utf8')
|
||||
return (s, colon+n)
|
||||
return (s, colon + n)
|
||||
|
||||
|
||||
def decode_list(x, f):
|
||||
r, f = [], f+1
|
||||
while x[f:f+1] != CHR_TERM:
|
||||
v, f = decode_func[x[f:f+1]](x, f)
|
||||
r, f = [], f + 1
|
||||
while x[f:f + 1] != CHR_TERM:
|
||||
v, f = decode_func[x[f:f + 1]](x, f)
|
||||
r.append(v)
|
||||
return (tuple(r), f + 1)
|
||||
|
||||
|
||||
def decode_dict(x, f):
|
||||
r, f = {}, f+1
|
||||
while x[f:f+1] != CHR_TERM:
|
||||
k, f = decode_func[x[f:f+1]](x, f)
|
||||
r[k], f = decode_func[x[f:f+1]](x, f)
|
||||
r, f = {}, f + 1
|
||||
while x[f:f + 1] != CHR_TERM:
|
||||
k, f = decode_func[x[f:f + 1]](x, f)
|
||||
r[k], f = decode_func[x[f:f + 1]](x, f)
|
||||
return (r, f + 1)
|
||||
|
||||
|
||||
def decode_true(x, f):
|
||||
return (True, f+1)
|
||||
return (True, f + 1)
|
||||
|
||||
|
||||
def decode_false(x, f):
|
||||
return (False, f+1)
|
||||
return (False, f + 1)
|
||||
|
||||
|
||||
def decode_none(x, f):
|
||||
return (None, f+1)
|
||||
return (None, f + 1)
|
||||
|
||||
|
||||
decode_func = {}
|
||||
decode_func[b'0'] = decode_string
|
||||
|
@ -215,72 +232,81 @@ decode_func[b'6'] = decode_string
|
|||
decode_func[b'7'] = decode_string
|
||||
decode_func[b'8'] = decode_string
|
||||
decode_func[b'9'] = decode_string
|
||||
decode_func[CHR_LIST ] = decode_list
|
||||
decode_func[CHR_DICT ] = decode_dict
|
||||
decode_func[CHR_INT ] = decode_int
|
||||
decode_func[CHR_INT1 ] = decode_intb
|
||||
decode_func[CHR_INT2 ] = decode_inth
|
||||
decode_func[CHR_INT4 ] = decode_intl
|
||||
decode_func[CHR_INT8 ] = decode_intq
|
||||
decode_func[CHR_LIST] = decode_list
|
||||
decode_func[CHR_DICT] = decode_dict
|
||||
decode_func[CHR_INT] = decode_int
|
||||
decode_func[CHR_INT1] = decode_intb
|
||||
decode_func[CHR_INT2] = decode_inth
|
||||
decode_func[CHR_INT4] = decode_intl
|
||||
decode_func[CHR_INT8] = decode_intq
|
||||
decode_func[CHR_FLOAT32] = decode_float32
|
||||
decode_func[CHR_FLOAT64] = decode_float64
|
||||
decode_func[CHR_TRUE ] = decode_true
|
||||
decode_func[CHR_FALSE ] = decode_false
|
||||
decode_func[CHR_NONE ] = decode_none
|
||||
decode_func[CHR_TRUE] = decode_true
|
||||
decode_func[CHR_FALSE] = decode_false
|
||||
decode_func[CHR_NONE] = decode_none
|
||||
|
||||
|
||||
def make_fixed_length_string_decoders():
|
||||
def make_decoder(slen):
|
||||
def f(x, f):
|
||||
s = x[f+1:f+1+slen]
|
||||
s = x[f + 1:f + 1 + slen]
|
||||
if _decode_utf8:
|
||||
s = s.decode("utf8")
|
||||
return (s, f+1+slen)
|
||||
s = s.decode('utf8')
|
||||
return (s, f + 1 + slen)
|
||||
return f
|
||||
for i in range(STR_FIXED_COUNT):
|
||||
decode_func[int2byte(STR_FIXED_START+i)] = make_decoder(i)
|
||||
decode_func[int2byte(STR_FIXED_START + i)] = make_decoder(i)
|
||||
|
||||
|
||||
make_fixed_length_string_decoders()
|
||||
|
||||
|
||||
def make_fixed_length_list_decoders():
|
||||
def make_decoder(slen):
|
||||
def f(x, f):
|
||||
r, f = [], f+1
|
||||
for i in range(slen):
|
||||
v, f = decode_func[x[f:f+1]](x, f)
|
||||
r, f = [], f + 1
|
||||
for _ in range(slen):
|
||||
v, f = decode_func[x[f:f + 1]](x, f)
|
||||
r.append(v)
|
||||
return (tuple(r), f)
|
||||
return f
|
||||
for i in range(LIST_FIXED_COUNT):
|
||||
decode_func[int2byte(LIST_FIXED_START+i)] = make_decoder(i)
|
||||
decode_func[int2byte(LIST_FIXED_START + i)] = make_decoder(i)
|
||||
|
||||
|
||||
make_fixed_length_list_decoders()
|
||||
|
||||
|
||||
def make_fixed_length_int_decoders():
|
||||
def make_decoder(j):
|
||||
def f(x, f):
|
||||
return (j, f+1)
|
||||
return (j, f + 1)
|
||||
return f
|
||||
for i in range(INT_POS_FIXED_COUNT):
|
||||
decode_func[int2byte(INT_POS_FIXED_START+i)] = make_decoder(i)
|
||||
decode_func[int2byte(INT_POS_FIXED_START + i)] = make_decoder(i)
|
||||
for i in range(INT_NEG_FIXED_COUNT):
|
||||
decode_func[int2byte(INT_NEG_FIXED_START+i)] = make_decoder(-1-i)
|
||||
decode_func[int2byte(INT_NEG_FIXED_START + i)] = make_decoder(-1 - i)
|
||||
|
||||
|
||||
make_fixed_length_int_decoders()
|
||||
|
||||
|
||||
def make_fixed_length_dict_decoders():
|
||||
def make_decoder(slen):
|
||||
def f(x, f):
|
||||
r, f = {}, f+1
|
||||
for j in range(slen):
|
||||
k, f = decode_func[x[f:f+1]](x, f)
|
||||
r[k], f = decode_func[x[f:f+1]](x, f)
|
||||
r, f = {}, f + 1
|
||||
for _ in range(slen):
|
||||
k, f = decode_func[x[f:f + 1]](x, f)
|
||||
r[k], f = decode_func[x[f:f + 1]](x, f)
|
||||
return (r, f)
|
||||
return f
|
||||
for i in range(DICT_FIXED_COUNT):
|
||||
decode_func[int2byte(DICT_FIXED_START+i)] = make_decoder(i)
|
||||
decode_func[int2byte(DICT_FIXED_START + i)] = make_decoder(i)
|
||||
|
||||
|
||||
make_fixed_length_dict_decoders()
|
||||
|
||||
|
||||
def loads(x, decode_utf8=False):
|
||||
global _decode_utf8
|
||||
_decode_utf8 = decode_utf8
|
||||
|
@ -292,11 +318,12 @@ def loads(x, decode_utf8=False):
|
|||
raise ValueError
|
||||
return r
|
||||
|
||||
|
||||
def encode_int(x, r):
|
||||
if 0 <= x < INT_POS_FIXED_COUNT:
|
||||
r.append(int2byte(INT_POS_FIXED_START+x))
|
||||
r.append(int2byte(INT_POS_FIXED_START + x))
|
||||
elif -INT_NEG_FIXED_COUNT <= x < 0:
|
||||
r.append(int2byte(INT_NEG_FIXED_START-1-x))
|
||||
r.append(int2byte(INT_NEG_FIXED_START - 1 - x))
|
||||
elif -128 <= x < 128:
|
||||
r.extend((CHR_INT1, struct.pack('!b', x)))
|
||||
elif -32768 <= x < 32768:
|
||||
|
@ -308,35 +335,42 @@ def encode_int(x, r):
|
|||
else:
|
||||
s = str(x)
|
||||
if py3:
|
||||
s = bytes(s, "ascii")
|
||||
|
||||
s = bytes(s, 'ascii')
|
||||
|
||||
if len(s) >= MAX_INT_LENGTH:
|
||||
raise ValueError('overflow')
|
||||
r.extend((CHR_INT, s, CHR_TERM))
|
||||
|
||||
|
||||
def encode_float32(x, r):
|
||||
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
|
||||
|
||||
|
||||
def encode_float64(x, r):
|
||||
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
|
||||
|
||||
|
||||
def encode_bool(x, r):
|
||||
r.append({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
|
||||
|
||||
|
||||
def encode_none(x, r):
|
||||
r.append(CHR_NONE)
|
||||
|
||||
|
||||
def encode_string(x, r):
|
||||
if len(x) < STR_FIXED_COUNT:
|
||||
r.extend((int2byte(STR_FIXED_START + len(x)), x))
|
||||
else:
|
||||
s = str(len(x))
|
||||
if py3:
|
||||
s = bytes(s, "ascii")
|
||||
s = bytes(s, 'ascii')
|
||||
r.extend((s, b':', x))
|
||||
|
||||
|
||||
def encode_unicode(x, r):
|
||||
encode_string(x.encode("utf8"), r)
|
||||
encode_string(x.encode('utf8'), r)
|
||||
|
||||
|
||||
def encode_list(x, r):
|
||||
if len(x) < LIST_FIXED_COUNT:
|
||||
|
@ -349,7 +383,8 @@ def encode_list(x, r):
|
|||
encode_func[type(i)](i, r)
|
||||
r.append(CHR_TERM)
|
||||
|
||||
def encode_dict(x,r):
|
||||
|
||||
def encode_dict(x, r):
|
||||
if len(x) < DICT_FIXED_COUNT:
|
||||
r.append(int2byte(DICT_FIXED_START + len(x)))
|
||||
for k, v in x.items():
|
||||
|
@ -362,6 +397,7 @@ def encode_dict(x,r):
|
|||
encode_func[type(v)](v, r)
|
||||
r.append(CHR_TERM)
|
||||
|
||||
|
||||
encode_func = {}
|
||||
encode_func[int] = encode_int
|
||||
encode_func[long] = encode_int
|
||||
|
@ -375,14 +411,14 @@ encode_func[bool] = encode_bool
|
|||
|
||||
lock = Lock()
|
||||
|
||||
|
||||
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
|
||||
"""
|
||||
Dump data structure to str.
|
||||
|
||||
Here float_bits is either 32 or 64.
|
||||
"""
|
||||
lock.acquire()
|
||||
try:
|
||||
with lock:
|
||||
if float_bits == 32:
|
||||
encode_func[float] = encode_float32
|
||||
elif float_bits == 64:
|
||||
|
@ -391,39 +427,41 @@ def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
|
|||
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
|
||||
r = []
|
||||
encode_func[type(x)](x, r)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
return b''.join(r)
|
||||
|
||||
|
||||
def test():
|
||||
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
|
||||
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
|
||||
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
|
||||
L = (({b'a':15, b'bb':f1, b'ccc':f2, b'':(f3,(),False,True,b'')},(b'a',10**20),tuple(range(-100000,100000)),b'b'*31,b'b'*62,b'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),)
|
||||
assert loads(dumps(L)) == L
|
||||
d = dict(zip(range(-100000,100000),range(-100000,100000)))
|
||||
d.update({b'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False})
|
||||
L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: b''})
|
||||
assert loads(dumps(L)) == L
|
||||
L = (b'', b'a'*10, b'a'*100, b'a'*1000, b'a'*10000, b'a'*100000, b'a'*1000000, b'a'*10000000)
|
||||
assert loads(dumps(L)) == L
|
||||
L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + (b'b',)
|
||||
assert loads(dumps(L)) == L
|
||||
L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + (b'b',)
|
||||
assert loads(dumps(L)) == L
|
||||
L = tuple([tuple(range(n)) for n in range(100)]) + (b'b',)
|
||||
assert loads(dumps(L)) == L
|
||||
L = tuple([b'a'*n for n in range(1000)]) + (b'b',)
|
||||
assert loads(dumps(L)) == L
|
||||
L = tuple([b'a'*n for n in range(1000)]) + (None,True,None)
|
||||
assert loads(dumps(L)) == L
|
||||
assert loads(dumps(None)) == None
|
||||
assert loads(dumps({None:None})) == {None:None}
|
||||
assert 1e-10<abs(loads(dumps(1.1))-1.1)<1e-6
|
||||
assert 1e-10<abs(loads(dumps(1.1,32))-1.1)<1e-6
|
||||
assert abs(loads(dumps(1.1,64))-1.1)<1e-12
|
||||
assert loads(dumps("Hello World!!"), decode_utf8=True)
|
||||
ld = (({b'a': 15, b'bb': f1, b'ccc': f2, b'': (f3, (), False, True, b'')}, (b'a', 10**20),
|
||||
tuple(range(-100000, 100000)), b'b' * 31, b'b' * 62, b'b' * 64, 2**30, 2**33, 2**62,
|
||||
2**64, 2**30, 2**33, 2**62, 2**64, False, False, True, -1, 2, 0),)
|
||||
assert loads(dumps(ld)) == ld
|
||||
d = dict(zip(range(-100000, 100000), range(-100000, 100000)))
|
||||
d.update({b'a': 20, 20: 40, 40: 41, f1: f2, f2: f3, f3: False, False: True, True: False})
|
||||
ld = (d, {}, {5: 6}, {7: 7, True: 8}, {9: 10, 22: 39, 49: 50, 44: b''})
|
||||
assert loads(dumps(ld)) == ld
|
||||
ld = (b'', b'a' * 10, b'a' * 100, b'a' * 1000, b'a' * 10000, b'a' * 100000, b'a' * 1000000, b'a' * 10000000)
|
||||
assert loads(dumps(ld)) == ld
|
||||
ld = tuple([dict(zip(range(n), range(n))) for n in range(100)]) + (b'b',)
|
||||
assert loads(dumps(ld)) == ld
|
||||
ld = tuple([dict(zip(range(n), range(-n, 0))) for n in range(100)]) + (b'b',)
|
||||
assert loads(dumps(ld)) == ld
|
||||
ld = tuple([tuple(range(n)) for n in range(100)]) + (b'b',)
|
||||
assert loads(dumps(ld)) == ld
|
||||
ld = tuple([b'a' * n for n in range(1000)]) + (b'b',)
|
||||
assert loads(dumps(ld)) == ld
|
||||
ld = tuple([b'a' * n for n in range(1000)]) + (None, True, None)
|
||||
assert loads(dumps(ld)) == ld
|
||||
assert loads(dumps(None)) is None
|
||||
assert loads(dumps({None: None})) == {None: None}
|
||||
assert 1e-10 < abs(loads(dumps(1.1)) - 1.1) < 1e-6
|
||||
assert 1e-10 < abs(loads(dumps(1.1, 32)) - 1.1) < 1e-6
|
||||
assert abs(loads(dumps(1.1, 64)) - 1.1) < 1e-12
|
||||
assert loads(dumps('Hello World!!'), decode_utf8=True)
|
||||
|
||||
|
||||
try:
|
||||
import psyco
|
||||
psyco.bind(dumps)
|
||||
|
@ -433,4 +471,4 @@ except ImportError:
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
test()
|
||||
|
|
|
@ -1,41 +1,65 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from unittest import TestCase
|
||||
import pytest
|
||||
|
||||
from .client import DelugeRPCClient
|
||||
from .client import DelugeRPCClient, RemoteException
|
||||
|
||||
class TestDelugeClient(TestCase):
|
||||
def setUp(self):
|
||||
|
||||
if sys.version_info > (3,):
|
||||
long = int
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client(request):
|
||||
if sys.platform.startswith('win'):
|
||||
auth_path = os.path.join(os.getenv('APPDATA'), 'deluge', 'auth')
|
||||
else:
|
||||
auth_path = os.path.expanduser("~/.config/deluge/auth")
|
||||
|
||||
with open(auth_path, 'rb') as f:
|
||||
filedata = f.read().decode("utf-8").split('\n')[0].split(':')
|
||||
|
||||
self.username, self.password = filedata[:2]
|
||||
self.ip = '127.0.0.1'
|
||||
self.port = 58846
|
||||
self.client = DelugeRPCClient(self.ip, self.port, self.username, self.password)
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.client.disconnect()
|
||||
except:
|
||||
pass
|
||||
|
||||
def test_connect(self):
|
||||
self.client.connect()
|
||||
|
||||
def test_call_method(self):
|
||||
self.client.connect()
|
||||
self.assertIsInstance(self.client.call('core.get_free_space'), int)
|
||||
|
||||
def test_call_method_arguments(self):
|
||||
self.client.connect()
|
||||
self.assertIsInstance(self.client.call('core.get_free_space', '/'), int)
|
||||
|
||||
def test_call_method_exception(self):
|
||||
self.client.connect()
|
||||
try:
|
||||
self.client.call('core.get_free_space', '1', '2')
|
||||
except Exception as e:
|
||||
self.assertEqual('deluge_client.client', e.__module__)
|
||||
|
||||
with open(auth_path, 'rb') as f:
|
||||
filedata = f.read().decode("utf-8").split('\n')[0].split(':')
|
||||
|
||||
username, password = filedata[:2]
|
||||
ip = '127.0.0.1'
|
||||
port = 58846
|
||||
kwargs = {'decode_utf8': True}
|
||||
if hasattr(request, 'param'):
|
||||
kwargs.update(request.param)
|
||||
client = DelugeRPCClient(ip, port, username, password, **kwargs)
|
||||
client.connect()
|
||||
|
||||
yield client
|
||||
|
||||
try:
|
||||
client.disconnect()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def test_connect(client):
|
||||
assert client.connected
|
||||
|
||||
|
||||
def test_call_method(client):
|
||||
assert isinstance(client.call('core.get_free_space'), (int, long))
|
||||
|
||||
|
||||
def test_call_method_arguments(client):
|
||||
assert isinstance(client.call('core.get_free_space', '/'), (int, long))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('client',
|
||||
[{'decode_utf8': True}, {'decode_utf8': False}],
|
||||
ids=['decode_utf8_on', 'decode_utf8_off'],
|
||||
indirect=True)
|
||||
def test_call_method_exception(client):
|
||||
with pytest.raises(RemoteException) as ex_info:
|
||||
client.call('core.get_free_space', '1', '2')
|
||||
assert ('takes at most 2 arguments' in str(ex_info.value) or
|
||||
'takes from 1 to 2 positional arguments' in str(ex_info.value)) # deluge 2.0
|
||||
|
||||
|
||||
def test_attr_caller(client):
|
||||
assert isinstance(client.core.get_free_space(), (int, long))
|
||||
assert isinstance(client.core.get_free_space('/'), (int, long))
|
||||
|
|
|
@ -170,15 +170,21 @@ class FailedProcessor(object):
|
|||
sandwich = issueid
|
||||
elif 'G' in issueid or '-' in issueid:
|
||||
sandwich = 1
|
||||
if helpers.is_number(sandwich):
|
||||
if sandwich < 900000:
|
||||
try:
|
||||
if helpers.is_number(sandwich):
|
||||
if sandwich < 900000:
|
||||
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
|
||||
pass
|
||||
else:
|
||||
logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
|
||||
self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
|
||||
pass
|
||||
else:
|
||||
logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
|
||||
self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
|
||||
self.valreturn.append({"self.log": self.log,
|
||||
"mode": 'stop'})
|
||||
return self.queue.put(self.valreturn)
|
||||
except NameError:
|
||||
logger.info('sandwich was not defined. Post-processing aborted...')
|
||||
self.valreturn.append({"self.log": self.log,
|
||||
"mode": 'stop'})
|
||||
"mode": 'stop'})
|
||||
|
||||
return self.queue.put(self.valreturn)
|
||||
|
||||
|
|
|
@ -644,6 +644,10 @@ class PostProcessor(object):
|
|||
temploc = None
|
||||
datematch = "False"
|
||||
|
||||
if temploc is None and all([cs['WatchValues']['Type'] != 'TPB', cs['WatchValues']['Type'] != 'One-Shot']):
|
||||
logger.info('this should have an issue number to match to this particular series: %s' % cs['ComicID'])
|
||||
continue
|
||||
|
||||
if temploc is not None and (any(['annual' in temploc.lower(), 'special' in temploc.lower()]) and mylar.CONFIG.ANNUALS_ON is True):
|
||||
biannchk = re.sub('-', '', temploc.lower()).strip()
|
||||
if 'biannual' in biannchk:
|
||||
|
|
|
@ -841,6 +841,9 @@ class Config(object):
|
|||
self.ALT_PULL = 2
|
||||
config.set('Weekly', 'alt_pull', str(self.ALT_PULL))
|
||||
|
||||
#force off public torrents usage as currently broken.
|
||||
self.ENABLE_PUBLIC = False
|
||||
|
||||
try:
|
||||
if not any([self.SAB_HOST is None, self.SAB_HOST == '', 'http://' in self.SAB_HOST[:7], 'https://' in self.SAB_HOST[:8]]):
|
||||
self.SAB_HOST = 'http://' + self.SAB_HOST
|
||||
|
|
|
@ -344,7 +344,7 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
comic['Type'] = 'Print'
|
||||
|
||||
if comic_desc != 'None' and comic['Type'] == 'None':
|
||||
if 'print' in comic_desc[:60].lower() and all(['print edition can be found' not in comic_desc.lower(), 'reprints' not in comic_desc.lower()]):
|
||||
if 'print' in comic_desc[:60].lower() and all(['for the printed edition' not in comic_desc.lower(), 'print edition can be found' not in comic_desc.lower(), 'reprints' not in comic_desc.lower()]):
|
||||
comic['Type'] = 'Print'
|
||||
elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower():
|
||||
comic['Type'] = 'Digital'
|
||||
|
@ -352,10 +352,10 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
comic['Type'] = 'TPB'
|
||||
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
|
||||
comic['Type'] = 'HC'
|
||||
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
|
||||
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower(), 'after the' not in comic_desc.lower()]):
|
||||
i = 0
|
||||
comic['Type'] = 'One-Shot'
|
||||
avoidwords = ['preceding', 'after the special', 'following the']
|
||||
avoidwords = ['preceding', 'after the', 'following the']
|
||||
while i < 2:
|
||||
if i == 0:
|
||||
cbd = 'one-shot'
|
||||
|
|
|
@ -101,7 +101,7 @@ class FileChecker(object):
|
|||
self.failed_files = []
|
||||
self.dynamic_handlers = ['/','-',':',';','\'',',','&','?','!','+','(',')','\u2014','\u2013']
|
||||
self.dynamic_replacements = ['and','the']
|
||||
self.rippers = ['-empire','-empire-hd','minutemen-','-dcp']
|
||||
self.rippers = ['-empire','-empire-hd','minutemen-','-dcp','Glorith-HD']
|
||||
|
||||
#pre-generate the AS_Alternates now
|
||||
AS_Alternates = self.altcheck()
|
||||
|
@ -143,7 +143,7 @@ class FileChecker(object):
|
|||
if filename.startswith('.'):
|
||||
continue
|
||||
|
||||
logger.debug('[FILENAME]: ' + filename)
|
||||
logger.debug('[FILENAME]: %s' % filename)
|
||||
runresults = self.parseit(self.dir, filename, filedir)
|
||||
if runresults:
|
||||
try:
|
||||
|
@ -208,10 +208,12 @@ class FileChecker(object):
|
|||
watchmatch['comiccount'] = comiccnt
|
||||
if len(comiclist) > 0:
|
||||
watchmatch['comiclist'] = comiclist
|
||||
|
||||
else:
|
||||
watchmatch['comiclist'] = []
|
||||
|
||||
if len(self.failed_files) > 0:
|
||||
logger.info('FAILED FILES: %s' % self.failed_files)
|
||||
|
||||
|
||||
return watchmatch
|
||||
|
||||
def parseit(self, path, filename, subpath=None):
|
||||
|
@ -241,7 +243,7 @@ class FileChecker(object):
|
|||
if '/' == path_list[0] or '\\' == path_list[0]:
|
||||
#need to remove any leading slashes so the os join can properly join the components
|
||||
path_list = path_list[1:]
|
||||
logger.fdebug('[SUB-PATH] subpath set to : ' + path_list)
|
||||
logger.fdebug('[SUB-PATH] subpath set to : %s' % path_list)
|
||||
|
||||
|
||||
#parse out the extension for type
|
||||
|
@ -261,14 +263,31 @@ class FileChecker(object):
|
|||
if self.sarc and mylar.CONFIG.READ2FILENAME:
|
||||
removest = modfilename.find('-') # the - gets removed above so we test for the first blank space...
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[SARC] Checking filename for Reading Order sequence - Reading Sequence Order found #: ' + str(modfilename[:removest]))
|
||||
logger.fdebug('[SARC] Checking filename for Reading Order sequence - Reading Sequence Order found #: %s' % modfilename[:removest])
|
||||
if modfilename[:removest].isdigit() and removest <= 3:
|
||||
reading_order = {'reading_sequence': str(modfilename[:removest]),
|
||||
'filename': filename[removest+1:]}
|
||||
modfilename = modfilename[removest+1:]
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : ' + modfilename)
|
||||
logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : %s' % modfilename)
|
||||
|
||||
#make sure all the brackets are properly spaced apart
|
||||
m = re.findall('[^()]+', modfilename)
|
||||
cnt = 1
|
||||
#2019-12-24----fixed to accomodate naming convention like Amazing Mary Jane (2019) 002.cbr, and to account for brackets properly
|
||||
try:
|
||||
while cnt < len(m):
|
||||
#logger.fdebug('[m=%s] modfilename.find: %s' % (m[cnt], modfilename[modfilename.find('('+m[cnt]+')')+len(m[cnt])+2]))
|
||||
#logger.fdebug('mod_1: %s' % modfilename.find('('+m[cnt]+')'))
|
||||
if modfilename[modfilename.find('('+m[cnt]+')')-1] != ' ' and modfilename.find('('+m[cnt]+')') != -1:
|
||||
#logger.fdebug('before_space: %s' % modfilename[modfilename.find('('+m[cnt]+')')-1])
|
||||
#logger.fdebug('after_space: %s' % modfilename[modfilename.find('('+m[cnt]+')')+len(m[cnt])+2])
|
||||
modfilename = '%s%s%s' % (modfilename[:modfilename.find('('+m[cnt]+')')], ' ', modfilename[modfilename.find('('+m[cnt]+')'):])
|
||||
cnt+=1
|
||||
except Exception as e:
|
||||
#logger.warn('[ERROR] %s' % e)
|
||||
pass
|
||||
#---end 2019-12-24
|
||||
|
||||
#grab the scanner tags here.
|
||||
scangroup = None
|
||||
|
@ -277,6 +296,23 @@ class FileChecker(object):
|
|||
#it's always possible that this could grab something else since tags aren't unique. Try and figure it out.
|
||||
if len(rippers) > 0:
|
||||
m = re.findall('[^()]+', modfilename)
|
||||
#--2019-11-30 needed for Glorith naming conventions when it's an nzb name with all formatting removed.
|
||||
if len(m) == 1:
|
||||
spf30 = re.compile(ur"[^.]+", re.UNICODE)
|
||||
#logger.fdebug('spf30: %s' % spf30)
|
||||
split_file30 = spf30.findall(modfilename)
|
||||
#logger.fdebug('split_file30: %s' % split_file30)
|
||||
if len(split_file30) > 3 and 'Glorith-HD' in modfilename:
|
||||
scangroup = 'Glorith-HD'
|
||||
sp_pos = 0
|
||||
for x in split_file30:
|
||||
if sp_pos+1 > len(split_file30):
|
||||
break
|
||||
if x[-1] == ',' and self.checkthedate(split_file30[sp_pos+1]):
|
||||
modfilename = re.sub(x, x[:-1], modfilename, count=1)
|
||||
break
|
||||
sp_pos+=1
|
||||
#-- end 2019-11-30
|
||||
cnt = 1
|
||||
for rp in rippers:
|
||||
while cnt < len(m):
|
||||
|
@ -284,7 +320,7 @@ class FileChecker(object):
|
|||
pass
|
||||
elif rp.lower() in m[cnt].lower():
|
||||
scangroup = re.sub('[\(\)]', '', m[cnt]).strip()
|
||||
logger.fdebug('Scanner group tag discovered: ' + scangroup)
|
||||
logger.fdebug('Scanner group tag discovered: %s' % scangroup)
|
||||
modfilename = modfilename.replace(m[cnt],'').strip()
|
||||
break
|
||||
cnt +=1
|
||||
|
@ -321,11 +357,13 @@ class FileChecker(object):
|
|||
|
||||
sf3 = re.compile(ur"[^,\s_]+", re.UNICODE)
|
||||
split_file3 = sf3.findall(modfilename)
|
||||
if len(split_file3) == 1:
|
||||
#--2019-11-30
|
||||
if len(split_file3) == 1 or all([len(split_file3) == 2, scangroup == 'Glorith-HD']):
|
||||
#--end 2019-11-30
|
||||
logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.')
|
||||
sf3 = re.compile(ur"[^,\s_\.]+", re.UNICODE)
|
||||
split_file3 = sf3.findall(modfilename)
|
||||
logger.fdebug('NEW split_file3: ' + str(split_file3))
|
||||
logger.fdebug('NEW split_file3: %s' % split_file3)
|
||||
|
||||
ret_sf2 = ' '.join(split_file3)
|
||||
|
||||
|
@ -343,8 +381,9 @@ class FileChecker(object):
|
|||
ret_sf1 = re.sub('\&', 'f11', ret_sf1).strip()
|
||||
ret_sf1 = re.sub('\'', 'g11', ret_sf1).strip()
|
||||
|
||||
#split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
|
||||
split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+[\s]COVERS+|\d{4}-\d{2}-\d{2}|\d+[(th|nd|rd|st)]+|\d+|[\w-]+|#?\d\.\d+|#[\.-]\w+|#[\d*\.\d+|\w+\d+]+|#(?<![\w\d])XCV(?![\w\d])+|#[\w+]|\)', ret_sf1, re.UNICODE)
|
||||
#split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+[\s]COVERS+|\d{4}-\d{2}-\d{2}|\d+[(th|nd|rd|st)]+|\d+|[\w-]+|#?\d\.\d+|#[\.-]\w+|#[\d*\.\d+|\w+\d+]+|#(?<![\w\d])XCV(?![\w\d])+|#[\w+]|\)', ret_sf1, re.UNICODE)
|
||||
split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+[\s]COVERS+|\d{4}-\d{2}-\d{2}|\d+[(th|nd|rd|st)]+|[\(^\)+]|\d+|[\w-]+|#?\d\.\d+|#[\.-]\w+|#[\d*\.\d+|\w+\d+]+|#(?<![\w\d])XCV(?![\w\d])+|#[\w+]|\)', ret_sf1, re.UNICODE)
|
||||
|
||||
#10-20-2018 ---START -- attempt to detect '01 (of 7.3)'
|
||||
#10-20-2018 -- attempt to detect '36p ctc' as one element
|
||||
spf = []
|
||||
|
@ -369,7 +408,7 @@ class FileChecker(object):
|
|||
except Exception as e:
|
||||
spf.append(x)
|
||||
|
||||
elif x == ')':
|
||||
elif x == ')' or x == '(':
|
||||
pass
|
||||
elif x == 'p' or x == 'ctc':
|
||||
try:
|
||||
|
@ -426,10 +465,10 @@ class FileChecker(object):
|
|||
dtcheck = re.sub('[\(\)\,]', '', sf).strip()
|
||||
#if there's more than one date, assume the right-most date is the actual issue date.
|
||||
if any(['19' in dtcheck, '20' in dtcheck]) and not any([dtcheck.lower().startswith('v19'), dtcheck.lower().startswith('v20')]) and len(dtcheck) >=4:
|
||||
logger.fdebug('checking date : ' + str(dtcheck))
|
||||
logger.fdebug('checking date : %s' % dtcheck)
|
||||
checkdate_response = self.checkthedate(dtcheck)
|
||||
if checkdate_response:
|
||||
logger.fdebug('date: ' + str(checkdate_response))
|
||||
logger.fdebug('date: %s' % checkdate_response)
|
||||
datecheck.append({'date': dtcheck,
|
||||
'position': split_file.index(sf),
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position)})
|
||||
|
@ -437,10 +476,10 @@ class FileChecker(object):
|
|||
#this handles the exceptions list in the match for alpha-numerics
|
||||
test_exception = ''.join([i for i in sf if not i.isdigit()])
|
||||
if any([x for x in exceptions if x.lower() == test_exception.lower()]):
|
||||
logger.fdebug('Exception match: ' + test_exception)
|
||||
logger.fdebug('Exception match: %s' % test_exception)
|
||||
if lastissue_label is not None:
|
||||
if lastissue_position == (split_file.index(sf) -1):
|
||||
logger.fdebug('alphanumeric issue number detected as : ' + str(lastissue_label) + ' ' + str(sf))
|
||||
logger.fdebug('alphanumeric issue number detected as : %s %s' % (lastissue_label,sf))
|
||||
for x in possible_issuenumbers:
|
||||
possible_issuenumbers = []
|
||||
if int(x['position']) != int(lastissue_position):
|
||||
|
@ -449,7 +488,7 @@ class FileChecker(object):
|
|||
'mod_position': x['mod_position'],
|
||||
'validcountchk': x['validcountchk']})
|
||||
|
||||
possible_issuenumbers.append({'number': str(lastissue_label) + ' ' + str(sf),
|
||||
possible_issuenumbers.append({'number': '%s %s' % (lastissue_label, sf),
|
||||
'position': lastissue_position,
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
'validcountchk': validcountchk})
|
||||
|
@ -458,7 +497,7 @@ class FileChecker(object):
|
|||
#test_exception is the alpha-numeric
|
||||
logger.fdebug('Possible alpha numeric issue (or non-numeric only). Testing my theory.')
|
||||
test_sf = re.sub(test_exception.lower(), '', sf.lower()).strip()
|
||||
logger.fdebug('[' + test_exception + '] Removing possible alpha issue leaves: ' + test_sf + ' (Should be a numeric)')
|
||||
logger.fdebug('[%s] Removing possible alpha issue leaves: %s (Should be a numeric)' % (test_exception, test_sf))
|
||||
if test_sf.isdigit():
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
'position': split_file.index(sf),
|
||||
|
@ -477,7 +516,7 @@ class FileChecker(object):
|
|||
for x in list(wrds):
|
||||
if x != '':
|
||||
tmpissue_number = re.sub('XCV', x, split_file[split_file.index(sf)])
|
||||
logger.info('[SPECIAL-CHARACTER ISSUE] Possible issue # : ' + tmpissue_number)
|
||||
logger.info('[SPECIAL-CHARACTER ISSUE] Possible issue # : %s' % tmpissue_number)
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
'position': split_file.index(sf),
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
|
@ -501,10 +540,10 @@ class FileChecker(object):
|
|||
|
||||
if count:
|
||||
# count = count.lstrip("0")
|
||||
logger.fdebug('Mini-Series Count detected. Maximum issue # set to : ' + count.lstrip('0'))
|
||||
logger.fdebug('Mini-Series Count detected. Maximum issue # set to : %s' % count.lstrip('0'))
|
||||
# if the count was detected, then it's in a '(of 4)' or whatever pattern
|
||||
# 95% of the time the digit immediately preceding the '(of 4)' is the actual issue #
|
||||
logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label))
|
||||
logger.fdebug('Issue Number SHOULD BE: %s' % lastissue_label)
|
||||
validcountchk = True
|
||||
|
||||
match2 = re.search('(\d+[\s])covers', sf, re.IGNORECASE)
|
||||
|
@ -516,9 +555,9 @@ class FileChecker(object):
|
|||
if all([lastissue_position == (split_file.index(sf) -1), lastissue_label is not None, '#' not in sf, sf != 'p']):
|
||||
#find it in the original file to see if there's a decimal between.
|
||||
findst = lastissue_mod_position+1
|
||||
if findst > len(modfilename):
|
||||
if findst >= len(modfilename):
|
||||
findst = len(modfilename) -1
|
||||
|
||||
|
||||
if modfilename[findst] != '.' or modfilename[findst] != '#': #findst != '.' and findst != '#':
|
||||
if sf.isdigit():
|
||||
seper_num = False
|
||||
|
@ -549,7 +588,7 @@ class FileChecker(object):
|
|||
#logger.fdebug('diff: ' + str(bb) + '[' + modfilename[bb] + ']')
|
||||
if modfilename[bb] == '.':
|
||||
#logger.fdebug('decimal detected.')
|
||||
logger.fdebug('[DECiMAL-DETECTION] Issue being stored for validation as : ' + modfilename[findst:cf+len(sf)])
|
||||
logger.fdebug('[DECiMAL-DETECTION] Issue being stored for validation as : %s' % modfilename[findst:cf+len(sf)])
|
||||
for x in possible_issuenumbers:
|
||||
possible_issuenumbers = []
|
||||
#logger.fdebug('compare: ' + str(x['position']) + ' .. ' + str(lastissue_position))
|
||||
|
@ -583,12 +622,14 @@ class FileChecker(object):
|
|||
lastissue_mod_position = file_length
|
||||
|
||||
elif '#' in sf:
|
||||
logger.fdebug('Iissue number found: ' + sf)
|
||||
logger.fdebug('Issue number found: %s' % sf)
|
||||
#pound sign will almost always indicate an issue #, so just assume it's as such.
|
||||
locateiss_st = modfilename.find('#')
|
||||
locateiss_end = modfilename.find(' ', locateiss_st)
|
||||
if locateiss_end == -1:
|
||||
locateiss_end = len(modfilename)
|
||||
if modfilename[locateiss_end-1] == ')':
|
||||
locateiss_end = locateiss_end -1
|
||||
possible_issuenumbers.append({'number': modfilename[locateiss_st:locateiss_end],
|
||||
'position': split_file.index(sf), #locateiss_st})
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
|
@ -597,11 +638,14 @@ class FileChecker(object):
|
|||
#now we try to find the series title &/or volume lablel.
|
||||
if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower(), 'part' in sf.lower()] ) and sf.lower() not in {'one','two','three','four','five','six'}:
|
||||
if any([ split_file[split_file.index(sf)].isdigit(), split_file[split_file.index(sf)][3:].isdigit(), split_file[split_file.index(sf)][1:].isdigit() ]):
|
||||
volume = re.sub("[^0-9]", "", sf)
|
||||
if all(identifier in sf for identifier in ['.', 'v']):
|
||||
volume = sf.split('.')[0]
|
||||
else:
|
||||
volume = re.sub("[^0-9]", "", sf)
|
||||
if volumeprior:
|
||||
try:
|
||||
volume_found['position'] = split_file.index(volumeprior_label, current_pos -1) #if this passes, then we're ok, otherwise will try exception
|
||||
logger.fdebug('volume_found: ' + str(volume_found['position']))
|
||||
logger.fdebug('volume_found: %s' % volume_found['position'])
|
||||
#remove volume numeric from split_file
|
||||
split_file.pop(volume_found['position'])
|
||||
split_file.pop(split_file.index(sf, current_pos-1))
|
||||
|
@ -662,13 +706,13 @@ class FileChecker(object):
|
|||
lastissue_position = split_file.index(sf, current_pos)
|
||||
lastissue_label = sf
|
||||
lastissue_mod_position = file_length
|
||||
#logger.fdebug('possible issue found: ' + str(sf)
|
||||
#logger.fdebug('possible issue found: %s' % sf)
|
||||
else:
|
||||
try:
|
||||
x = float(sf)
|
||||
#validity check
|
||||
if x < 0:
|
||||
logger.fdebug('I have encountered a negative issue #: ' + str(sf))
|
||||
logger.fdebug('I have encountered a negative issue #: %s' % sf)
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)})
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
|
@ -678,7 +722,7 @@ class FileChecker(object):
|
|||
lastissue_label = sf
|
||||
lastissue_mod_position = file_length
|
||||
elif x > 0:
|
||||
logger.fdebug('I have encountered a decimal issue #: ' + str(sf))
|
||||
logger.fdebug('I have encountered a decimal issue #: %s' % sf)
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)})
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
|
@ -756,13 +800,13 @@ class FileChecker(object):
|
|||
issue_year = None
|
||||
possible_years = []
|
||||
yearmodposition = None
|
||||
logger.fdebug('datecheck: ' + str(datecheck))
|
||||
logger.fdebug('datecheck: %s' % datecheck)
|
||||
if len(datecheck) > 0:
|
||||
for dc in sorted(datecheck, key=operator.itemgetter('position'), reverse=True):
|
||||
a = self.checkthedate(dc['date'])
|
||||
ab = str(a)
|
||||
sctd = self.checkthedate(str(dt.datetime.now().year))
|
||||
logger.fdebug('sctd: ' + str(sctd))
|
||||
logger.fdebug('sctd: %s' % sctd)
|
||||
# + 1 sctd so that we can allow for issue dates that cross over into the following year when it's nearer to the end of said year.
|
||||
if int(ab) > int(sctd) + 1:
|
||||
logger.fdebug('year is in the future, ignoring and assuming part of series title.')
|
||||
|
@ -771,19 +815,19 @@ class FileChecker(object):
|
|||
continue
|
||||
else:
|
||||
issue_year = dc['date']
|
||||
logger.fdebug('year verified as : ' + str(issue_year))
|
||||
logger.fdebug('year verified as : %s' % issue_year)
|
||||
if highest_series_pos > dc['position']: highest_series_pos = dc['position']
|
||||
yearposition = dc['position']
|
||||
yearmodposition = dc['mod_position']
|
||||
if len(ab) == 4:
|
||||
issue_year = ab
|
||||
logger.fdebug('year verified as: ' + str(issue_year))
|
||||
logger.fdebug('year verified as: %s' % issue_year)
|
||||
possible_years.append({'year': issue_year,
|
||||
'yearposition': dc['position'],
|
||||
'yearmodposition': dc['mod_position']})
|
||||
else:
|
||||
issue_year = ab
|
||||
logger.fdebug('date verified as: ' + str(issue_year))
|
||||
logger.fdebug('date verified as: %s' % issue_year)
|
||||
|
||||
if len(possible_years) == 1:
|
||||
issueyear = possible_years[0]['year']
|
||||
|
@ -809,20 +853,35 @@ class FileChecker(object):
|
|||
logger.fdebug('No year present within title - ignoring as a variable.')
|
||||
|
||||
|
||||
logger.fdebug('highest_series_position: ' + str(highest_series_pos))
|
||||
logger.fdebug('highest_series_position: %s' % highest_series_pos)
|
||||
#---2019-11-30 account for scanner Glorith-HD stupid naming conventions
|
||||
if len(possible_issuenumbers) == 0 and scangroup == 'Glorith-HD':
|
||||
logger.fdebug('Abnormal formatting detected. Time to fix this shiet, yo.')
|
||||
if any([yearposition == 0, yearposition is None]):
|
||||
logger.fdebug('Too stupid of a format. Nope. Not gonna happen - just reinvent the wheel you fooker.')
|
||||
else:
|
||||
issposs = yearposition + 1
|
||||
#logger.fdebug('split_file: %s' % split_file[issposs])
|
||||
if '(' and ')' in split_file[issposs]:
|
||||
new_issuenumber = split_file[issposs]
|
||||
possible_issuenumbers.append({'number': re.sub('[/(/)]', '', split_file[issposs]).strip(),
|
||||
'position': split_file.index(new_issuenumber, yearposition),
|
||||
'mod_position': self.char_file_position(modfilename, new_issuenumber, yearmodposition),
|
||||
'validcountchk': False})
|
||||
#---end 2019-11-30
|
||||
issue_number = None
|
||||
dash_numbers = []
|
||||
issue_number_position = len(split_file)
|
||||
if len(possible_issuenumbers) > 0:
|
||||
logger.fdebug('possible_issuenumbers: ' + str(possible_issuenumbers))
|
||||
logger.fdebug('possible_issuenumbers: %s' % possible_issuenumbers)
|
||||
if len(possible_issuenumbers) >= 1:
|
||||
p = 1
|
||||
if '-' not in split_file[0]:
|
||||
finddash = modfilename.find('-')
|
||||
if finddash != -1:
|
||||
logger.fdebug('hyphen located at position: ' + str(finddash))
|
||||
logger.fdebug('hyphen located at position: %s' % finddash)
|
||||
if yearposition:
|
||||
logger.fdebug('yearposition: ' + str(yearposition))
|
||||
logger.fdebug('yearposition: %s' % yearposition)
|
||||
else:
|
||||
finddash = -1
|
||||
logger.fdebug('dash is in first word, not considering for determing issue number.')
|
||||
|
@ -841,7 +900,7 @@ class FileChecker(object):
|
|||
elif pis['validcountchk'] == True:
|
||||
issue_number = pis['number']
|
||||
issue_number_position = pis['position']
|
||||
logger.fdebug('Issue verified and detected as part of a numeric count sequnce: ' + issue_number)
|
||||
logger.fdebug('Issue verified and detected as part of a numeric count sequnce: %s' % issue_number)
|
||||
if highest_series_pos > pis['position']: highest_series_pos = pis['position']
|
||||
break
|
||||
elif pis['mod_position'] > finddash and finddash != -1:
|
||||
|
@ -851,13 +910,18 @@ class FileChecker(object):
|
|||
'number': pis['number'],
|
||||
'position': pis['position']})
|
||||
continue
|
||||
#2019-10-05 fix - if decimal-spaced filename has a series title with a hyphen will include issue # as part of series title
|
||||
elif yearposition == pis['position']:
|
||||
logger.info('Already validated year, ignoring as possible issue number: %s' % pis['number'])
|
||||
continue
|
||||
#end 2019-10-05
|
||||
elif yearposition == pis['position']:
|
||||
logger.fdebug('Already validated year, ignoring as possible issue number: ' + str(pis['number']))
|
||||
logger.fdebug('Already validated year, ignoring as possible issue number: %s' % pis['number'])
|
||||
continue
|
||||
if p == 1:
|
||||
issue_number = pis['number']
|
||||
issue_number_position = pis['position']
|
||||
logger.fdebug('issue number :' + issue_number) #(pis)
|
||||
logger.fdebug('issue number :%s' % issue_number) #(pis)
|
||||
if highest_series_pos > pis['position'] and issue2year is False: highest_series_pos = pis['position']
|
||||
#else:
|
||||
#logger.fdebug('numeric probably belongs to series title: ' + str(pis))
|
||||
|
@ -881,12 +945,12 @@ class FileChecker(object):
|
|||
fin_pos = dn['position']
|
||||
|
||||
if fin_num:
|
||||
logger.fdebug('Issue number re-corrected to : ' + fin_num)
|
||||
logger.fdebug('Issue number re-corrected to : %s' % fin_num)
|
||||
issue_number = fin_num
|
||||
if highest_series_pos > fin_pos: highest_series_pos = fin_pos
|
||||
|
||||
#--- this is new - 2016-09-18 /account for unicode in issue number when issue number is not deteted above
|
||||
logger.fdebug('issue_position: ' + str(issue_number_position))
|
||||
logger.fdebug('issue_position: %s' % issue_number_position)
|
||||
if all([issue_number_position == highest_series_pos, 'XCV' in split_file, issue_number is None]):
|
||||
for x in list(wrds):
|
||||
if x != '':
|
||||
|
@ -903,23 +967,25 @@ class FileChecker(object):
|
|||
else:
|
||||
logger.info('No issue number present in filename.')
|
||||
else:
|
||||
logger.fdebug('issue verified as : ' + issue_number)
|
||||
logger.fdebug('issue verified as : %s' % issue_number)
|
||||
issue_volume = None
|
||||
if len(volume_found) > 0:
|
||||
issue_volume = 'v' + str(volume_found['volume'])
|
||||
if all([highest_series_pos + 1 != volume_found['position'], highest_series_pos != volume_found['position'] + 1, sep_volume == False, booktype == 'issue', len(possible_issuenumbers) > 0]):
|
||||
logger.fdebug('Extra item(s) are present between the volume label and the issue number. Checking..')
|
||||
split_file.insert(int(issue_number_position), split_file.pop(volume_found['position'])) #highest_series_pos-1, split_file.pop(volume_found['position']))
|
||||
logger.fdebug('new split: ' + str(split_file))
|
||||
logger.fdebug('new split: %s' % split_file)
|
||||
highest_series_pos = volume_found['position'] -1
|
||||
issue_number_position -=1
|
||||
#2019-10-02 - account for volume BEFORE issue number
|
||||
if issue_number_position > highest_series_pos:
|
||||
issue_number_position -=1
|
||||
else:
|
||||
if highest_series_pos > volume_found['position']:
|
||||
if sep_volume:
|
||||
highest_series_pos = volume_found['position'] - 1
|
||||
else:
|
||||
highest_series_pos = volume_found['position']
|
||||
logger.fdebug('Volume detected as : ' + issue_volume)
|
||||
logger.fdebug('Volume detected as : %s' % issue_volume)
|
||||
|
||||
if all([len(volume_found) == 0, booktype != 'issue']) or all([len(volume_found) == 0, issue_number_position == len(split_file)]):
|
||||
issue_volume = 'v1'
|
||||
|
@ -946,9 +1012,9 @@ class FileChecker(object):
|
|||
if len(possible_years) > 1:
|
||||
for x in sorted(possible_years, key=operator.itemgetter('yearposition'), reverse=False):
|
||||
if x['yearposition'] <= highest_series_pos:
|
||||
logger.fdebug('year ' + str(x['year']) + ' is within series title. Ignoring as YEAR value')
|
||||
logger.fdebug('year %s is within series title. Ignoring as YEAR value' % x['year'])
|
||||
else:
|
||||
logger.fdebug('year ' + str(x['year']) + ' is outside of series title range. Accepting of year.')
|
||||
logger.fdebug('year %s is outside of series title range. Accepting of year.' % x['year'])
|
||||
issue_year = x['year']
|
||||
highest_series_pos = x['yearposition']
|
||||
break
|
||||
|
@ -969,7 +1035,13 @@ class FileChecker(object):
|
|||
alt_issue = None
|
||||
try:
|
||||
if yearposition is not None:
|
||||
tmpval = yearposition - issue_number_position
|
||||
try:
|
||||
if volume_found['position'] >= issue_number_position:
|
||||
tmpval = highest_series_pos + (issue_number_position - volume_found['position'])
|
||||
else:
|
||||
tmpval = yearposition - issue_number_position
|
||||
except:
|
||||
tmpval = yearposition - issue_number_position
|
||||
else:
|
||||
tmpval = 1
|
||||
except:
|
||||
|
@ -1075,7 +1147,7 @@ class FileChecker(object):
|
|||
if '\?' in series_name:
|
||||
series_name = re.sub('\?', '', series_name).strip()
|
||||
|
||||
logger.fdebug('series title possibly: ' + series_name)
|
||||
logger.fdebug('series title possibly: %s' % series_name)
|
||||
if splitvalue is not None:
|
||||
logger.fdebug('[SPLITVALUE] possible issue title: %s' % splitvalue)
|
||||
alt_series = '%s %s' % (series_name, splitvalue)
|
||||
|
@ -1214,7 +1286,7 @@ class FileChecker(object):
|
|||
|
||||
try:
|
||||
if self.AS_ALT[0] != '127372873872871091383 abdkhjhskjhkjdhakajhf':
|
||||
logger.fdebug('Possible Alternate Names to match against (if necessary): ' + str(self.AS_Alt))
|
||||
logger.fdebug('Possible Alternate Names to match against (if necessary): %s' % self.AS_Alt)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
@ -1259,7 +1331,7 @@ class FileChecker(object):
|
|||
loopchk = [x for x in self.AS_Alt if re.sub('[\|\s]','', x.lower()).strip() == re.sub('[\|\s]','', nspace_seriesname.lower()).strip()]
|
||||
if len(loopchk) > 0 and loopchk[0] != '':
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] This should be an alternate: ' + str(loopchk))
|
||||
logger.fdebug('[FILECHECKER] This should be an alternate: %s' % loopchk)
|
||||
if any(['annual' in series_name.lower(), 'special' in series_name.lower()]):
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] Annual/Special detected - proceeding')
|
||||
|
@ -1290,40 +1362,40 @@ class FileChecker(object):
|
|||
enable_annual = False
|
||||
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] Complete matching list of names to this file [' + str(len(loopchk)) + '] : ' + str(loopchk))
|
||||
logger.fdebug('[FILECHECKER] Complete matching list of names to this file [%s] : %s' % (len(loopchk), loopchk))
|
||||
|
||||
for loopit in loopchk:
|
||||
#now that we have the list of all possible matches for the watchcomic + alternate search names, we go through the list until we find a match.
|
||||
modseries_name = loopit
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] AS_Tuple : ' + str(self.AS_Tuple))
|
||||
logger.fdebug('[FILECHECKER] AS_Tuple : %s' % self.AS_Tuple)
|
||||
for ATS in self.AS_Tuple:
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] ' + str(ATS['AS_Alternate']) + ' comparing to ' + nspace_seriesname)
|
||||
logger.fdebug('[FILECHECKER] %s comparing to %s' % (ATS['AS_Alternate'], nspace_seriesname))
|
||||
if re.sub('\|','', ATS['AS_Alternate'].lower()).strip() == re.sub('\|','', nspace_seriesname.lower()).strip():
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] Associating ComiciD : ' + str(ATS['ComicID']))
|
||||
logger.fdebug('[FILECHECKER] Associating ComiciD : %s' % ATS['ComicID'])
|
||||
annual_comicid = str(ATS['ComicID'])
|
||||
modseries_name = ATS['AS_Alternate']
|
||||
break
|
||||
|
||||
logger.fdebug('[FILECHECKER] ' + modseries_name + ' - watchlist match on : ' + filename)
|
||||
logger.fdebug('[FILECHECKER] %s - watchlist match on : %s' % (modseries_name, filename))
|
||||
|
||||
if enable_annual:
|
||||
if annual_comicid is not None:
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('enable annual is on')
|
||||
logger.fdebug('annual comicid is ' + str(annual_comicid))
|
||||
logger.fdebug('annual comicid is %s' % annual_comicid)
|
||||
if 'biannual' in nspace_watchcomic.lower():
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('bi annual detected')
|
||||
justthedigits = 'BiAnnual ' + justthedigits
|
||||
justthedigits = 'BiAnnual %s' % justthedigits
|
||||
elif 'annual' in nspace_watchcomic.lower():
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('annual detected')
|
||||
justthedigits = 'Annual ' + justthedigits
|
||||
justthedigits = 'Annual %s' % justthedigits
|
||||
elif 'special' in nspace_watchcomic.lower():
|
||||
justthedigits = 'Special ' + justthedigits
|
||||
justthedigits = 'Special %s' % justthedigits
|
||||
|
||||
return {'process_status': 'match',
|
||||
'sub': series_info['sub'],
|
||||
|
@ -1405,7 +1477,7 @@ class FileChecker(object):
|
|||
'filename': fname,
|
||||
'comicsize': comicsize})
|
||||
|
||||
logger.info('there are ' + str(len(filelist)) + ' files.')
|
||||
logger.info('there are %s files.' % len(filelist))
|
||||
|
||||
return filelist
|
||||
|
||||
|
@ -1507,15 +1579,15 @@ class FileChecker(object):
|
|||
# extract the !!, store it and then remove it so things will continue.
|
||||
as_start = AS_Alternate.find('!!')
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('as_start: ' + str(as_start) + ' --- ' + str(AS_Alternate[as_start:]))
|
||||
logger.fdebug('as_start: %s --- %s' % (as_start, AS_Alternate[as_start:]))
|
||||
as_end = AS_Alternate.find('##', as_start)
|
||||
if as_end == -1:
|
||||
as_end = len(AS_Alternate)
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('as_start: ' + str(as_end) + ' --- ' + str(AS_Alternate[as_start:as_end]))
|
||||
logger.fdebug('as_start: %s --- %s' % (as_end, AS_Alternate[as_start:as_end]))
|
||||
AS_ComicID = AS_Alternate[as_start +2:as_end]
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[FILECHECKER] Extracted comicid for given annual : ' + str(AS_ComicID))
|
||||
logger.fdebug('[FILECHECKER] Extracted comicid for given annual : %s' % AS_ComicID)
|
||||
AS_Alternate = re.sub('!!' + str(AS_ComicID), '', AS_Alternate)
|
||||
AS_tupled = True
|
||||
as_dyninfo = self.dynamic_replace(AS_Alternate)
|
||||
|
@ -1611,19 +1683,24 @@ class FileChecker(object):
|
|||
|
||||
return dateline
|
||||
|
||||
def validateAndCreateDirectory(dir, create=False, module=None):
|
||||
def validateAndCreateDirectory(dir, create=False, module=None, dmode=None):
|
||||
if module is None:
|
||||
module = ''
|
||||
module += '[DIRECTORY-CHECK]'
|
||||
if dmode is None:
|
||||
dirmode = 'comic'
|
||||
else:
|
||||
dirmode = dmode
|
||||
|
||||
try:
|
||||
if os.path.exists(dir):
|
||||
logger.info(module + ' Found comic directory: ' + dir)
|
||||
logger.info('%s Found %s directory: %s' % (module, dirmode, dir))
|
||||
return True
|
||||
else:
|
||||
logger.warn(module + ' Could not find comic directory: ' + dir)
|
||||
logger.warn('%s Could not find %s directory: %s' % (module, dirmode, dir))
|
||||
if create:
|
||||
if dir.strip():
|
||||
logger.info(module + ' Creating comic directory (' + str(mylar.CONFIG.CHMOD_DIR) + ') : ' + dir)
|
||||
logger.info('%s Creating %s directory (%s) : %s' % (module, dirmode, mylar.CONFIG.CHMOD_DIR, dir))
|
||||
try:
|
||||
os.umask(0) # this is probably redudant, but it doesn't hurt to clear the umask here.
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
|
@ -1633,15 +1710,15 @@ def validateAndCreateDirectory(dir, create=False, module=None):
|
|||
else:
|
||||
os.makedirs(dir.rstrip())
|
||||
except OSError as e:
|
||||
logger.warn(module + ' Could not create directory: ' + dir + '[' + str(e) + ']. Aborting.')
|
||||
logger.warn('%s Could not create directory: %s [%s]. Aborting' % (module, dir, e))
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
logger.warn(module + ' Provided directory [' + dir + '] is blank. Aborting.')
|
||||
logger.warn('%s Provided directory [%s] is blank. Aborting.' % (module, dir))
|
||||
return False
|
||||
except OSError as e:
|
||||
logger.warn(module + ' Could not create directory: ' + dir + '[' + str(e) + ']. Aborting.')
|
||||
logger.warn('%s Could not create directory: %s [%s]. Aborting.' % (module, dir, e))
|
||||
return False
|
||||
return False
|
||||
|
||||
|
@ -1704,7 +1781,7 @@ def setperms(path, dir=False):
|
|||
logger.fdebug('Successfully changed permissions [' + str(mylar.CONFIG.CHMOD_DIR) + ' / ' + str(mylar.CONFIG.CHMOD_FILE) + ']')
|
||||
|
||||
except OSError:
|
||||
logger.error('Could not change permissions : ' + path + '. Exiting...')
|
||||
logger.error('Could not change permissions : %s. Exiting...' % path)
|
||||
|
||||
return
|
||||
|
||||
|
|
|
@ -375,6 +375,14 @@ class GC(object):
|
|||
#write the filename to the db for tracking purposes...
|
||||
myDB.upsert('ddl_info', {'filename': filename, 'remote_filesize': remote_filesize}, {'id': id})
|
||||
|
||||
if mylar.CONFIG.DDL_LOCATION is not None and not os.path.isdir(mylar.CONFIG.DDL_LOCATION):
|
||||
checkdirectory = mylar.filechecker.validateAndCreateDirectory(mylar.CONFIG.DDL_LOCATION, True)
|
||||
if not checkdirectory:
|
||||
logger.warn('[ABORTING] Error trying to validate/create DDL download directory: %s.' % mylar.CONFIG.DDL_LOCATION)
|
||||
return ({"success": False,
|
||||
"filename": filename,
|
||||
"path": None})
|
||||
|
||||
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
|
||||
|
||||
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
|
||||
|
|
|
@ -2476,7 +2476,8 @@ def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
|
|||
tmp_annuals = pack[pack.find('Annual'):]
|
||||
tmp_ann = re.sub('[annual/annuals/+]', '', tmp_annuals.lower()).strip()
|
||||
tmp_pack = re.sub('[annual/annuals/+]', '', pack.lower()).strip()
|
||||
pack_issues = range(int(tmp_pack[:tmp_pack.find('-')]),int(tmp_pack[tmp_pack.find('-')+1:])+1)
|
||||
pack_issues_numbers = re.findall(r'\d+', tmp_pack)
|
||||
pack_issues = range(int(pack_issues_numbers[0]),int(pack_issues_numbers[1])+1)
|
||||
annualize = True
|
||||
|
||||
issues = {}
|
||||
|
@ -2939,6 +2940,14 @@ def weekly_info(week=None, year=None, current=None):
|
|||
weeknumber = 51
|
||||
year = 2018
|
||||
|
||||
#monkey patch for 2019/2020 - week 52/week 0
|
||||
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2019]):
|
||||
weeknumber = 0
|
||||
year = 2020
|
||||
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2020]):
|
||||
weeknumber = 51
|
||||
year = 2019
|
||||
|
||||
#view specific week (prev_week, next_week)
|
||||
startofyear = date(year,1,1)
|
||||
week0 = startofyear - timedelta(days=startofyear.isoweekday())
|
||||
|
@ -2959,6 +2968,14 @@ def weekly_info(week=None, year=None, current=None):
|
|||
weeknumber = 51
|
||||
year = 2018
|
||||
|
||||
#monkey patch for 2019/2020 - week 52/week 0
|
||||
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2019]) or all([weeknumber == '52', year == '2019']):
|
||||
weeknumber = 0
|
||||
year = 2020
|
||||
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2020]):
|
||||
weeknumber = 51
|
||||
year = 2019
|
||||
|
||||
stweek = datetime.datetime.strptime(todaydate.strftime('%Y-%m-%d'), '%Y-%m-%d')
|
||||
startweek = stweek - timedelta(days = (stweek.weekday() + 1) % 7)
|
||||
midweek = startweek + timedelta(days = 3)
|
||||
|
@ -3110,6 +3127,7 @@ def postprocess_main(queue):
|
|||
time.sleep(5)
|
||||
|
||||
elif mylar.APILOCK is False and queue.qsize() >= 1: #len(queue) > 1:
|
||||
pp = None
|
||||
item = queue.get(True)
|
||||
logger.info('Now loading from post-processing queue: %s' % item)
|
||||
if item == 'exit':
|
||||
|
@ -3124,6 +3142,11 @@ def postprocess_main(queue):
|
|||
pp = pprocess.post_process()
|
||||
time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing
|
||||
|
||||
if pp is not None:
|
||||
if pp['mode'] == 'stop':
|
||||
#reset the lock so any subsequent items can pp and not keep the queue locked up.
|
||||
mylar.APILOCK = False
|
||||
|
||||
if mylar.APILOCK is True:
|
||||
logger.info('Another item is post-processing still...')
|
||||
time.sleep(15)
|
||||
|
|
|
@ -30,6 +30,7 @@ import requests
|
|||
import smtplib
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import formatdate, make_msgid
|
||||
|
||||
# This was obviously all taken from headphones with great appreciation :)
|
||||
|
||||
|
@ -383,6 +384,8 @@ class EMAIL:
|
|||
msg['From'] = str(self.emailfrom)
|
||||
msg['To'] = str(self.emailto)
|
||||
msg['Subject'] = subject
|
||||
msg['Date'] = formatdate()
|
||||
msg['Message-ID'] = make_msgid('mylar')
|
||||
msg.attach(MIMEText(message, 'plain'))
|
||||
|
||||
if self.emailenc is 1:
|
||||
|
|
|
@ -1122,6 +1122,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
continue
|
||||
else:
|
||||
logger.fdebug('match_check: %s' % filecomic)
|
||||
if filecomic['process_status'] == 'fail':
|
||||
logger.fdebug('%s was not a match to %s (%s)' % (cleantitle, ComicName, SeriesYear))
|
||||
continue
|
||||
elif booktype != parsed_comic['booktype']:
|
||||
logger.fdebug('Booktypes do not match. Looking for %s, this is a %s. Ignoring this result.' % (booktype, parsed_comic['booktype']))
|
||||
continue
|
||||
|
@ -1148,39 +1151,38 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
fndcomicversion = None
|
||||
|
||||
if parsed_comic['series_volume'] is not None:
|
||||
versionfound = "yes"
|
||||
if len(parsed_comic['series_volume'][1:]) == 4 and parsed_comic['series_volume'][1:].isdigit(): #v2013
|
||||
logger.fdebug("[Vxxxx] Version detected as %s" % (parsed_comic['series_volume']))
|
||||
vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
|
||||
versionfound = "yes"
|
||||
if len(parsed_comic['series_volume'][1:]) == 4 and parsed_comic['series_volume'][1:].isdigit(): #v2013
|
||||
logger.fdebug("[Vxxxx] Version detected as %s" % (parsed_comic['series_volume']))
|
||||
vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif len(parsed_comic['series_volume'][1:]) == 1 and parsed_comic['series_volume'][1:].isdigit(): #v2
|
||||
logger.fdebug("[Vx] Version detected as %s" % parsed_comic['series_volume'])
|
||||
vers4vol = parsed_comic['series_volume']
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif parsed_comic['series_volume'][1:].isdigit() and len(parsed_comic['series_volume']) < 4:
|
||||
logger.fdebug('[Vxxx] Version detected as %s' % parsed_comic['series_volume'])
|
||||
vers4vol = parsed_comic['series_volume']
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif parsed_comic['series_volume'].isdigit() and len(parsed_comic['series_volume']) <=4:
|
||||
# this stuff is necessary for 32P volume manipulation
|
||||
if len(parsed_comic['series_volume']) == 4:
|
||||
vers4year = "yes"
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif len(parsed_comic['series_volume'][1:]) == 1 and parsed_comic['series_volume'][1:].isdigit(): #v2
|
||||
logger.fdebug("[Vx] Version detected as %s" % parsed_comic['series_volume'])
|
||||
elif len(parsed_comic['series_volume']) == 1:
|
||||
vers4vol = parsed_comic['series_volume']
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif parsed_comic['series_volume'][1:].isdigit() and len(parsed_comic['series_volume']) < 4:
|
||||
logger.fdebug('[Vxxx] Version detected as %s' % parsed_comic['series_volume'])
|
||||
elif len(parsed_comic['series_volume']) < 4:
|
||||
vers4vol = parsed_comic['series_volume']
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif parsed_comic['series_volume'].isdigit() and len(parsed_comic['series_volume']) <=4:
|
||||
# this stuff is necessary for 32P volume manipulation
|
||||
if len(parsed_comic['series_volume']) == 4:
|
||||
vers4year = "yes"
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif len(parsed_comic['series_volume']) == 1:
|
||||
vers4vol = parsed_comic['series_volume']
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
elif len(parsed_comic['series_volume']) < 4:
|
||||
vers4vol = parsed_comic['series_volume']
|
||||
fndcomicversion = parsed_comic['series_volume']
|
||||
else:
|
||||
logger.fdebug("error - unknown length for : %s" % parsed_comic['series_volume'])
|
||||
|
||||
else:
|
||||
logger.fdebug("error - unknown length for : %s" % parsed_comic['series_volume'])
|
||||
|
||||
yearmatch = "false"
|
||||
if vers4vol != "no" or vers4year != "no":
|
||||
logger.fdebug("Series Year not provided but Series Volume detected of %s. Bypassing Year Match." % fndcomicversion)
|
||||
yearmatch = "true"
|
||||
elif ComVersChk == 0:
|
||||
elif ComVersChk == 0 and parsed_comic['issue_year'] is None:
|
||||
logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check")
|
||||
yearmatch = "true"
|
||||
elif any([UseFuzzy == "0", UseFuzzy == "2", UseFuzzy is None, IssDateFix != "no"]) and parsed_comic['issue_year'] is not None:
|
||||
|
@ -1529,7 +1531,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
issinfo = mylar.COMICINFO['pack_issuelist']
|
||||
if issinfo is not None:
|
||||
#we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack.
|
||||
logger.fdebug('Found matching comic within pack...preparing to send to Updater with IssueIDs: %s and nzbname of %s' % (issueid_info, nzbname))
|
||||
|
||||
try:
|
||||
logger.fdebug('Found matching comic within pack...preparing to send to Updater with IssueIDs: %s and nzbname of %s' % (issueid_info, nzbname))
|
||||
except NameError:
|
||||
logger.fdebug('Did not find issueid_info')
|
||||
|
||||
#because packs need to have every issue that's not already Downloaded in a Snatched status, throw it to the updater here as well.
|
||||
for isid in issinfo['issues']:
|
||||
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
|
||||
|
|
|
@ -856,8 +856,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
|
|||
pass
|
||||
|
||||
myDB.upsert("oneoffhistory", newValue, ctlVal)
|
||||
|
||||
logger.info(module + ' Updated the status (Snatched) complete for ' + ComicName + ' Issue: ' + str(IssueNum))
|
||||
logger.info('%s Updated the status (Snatched) complete for %s Issue: %s' % (module, ComicName, IssueNum))
|
||||
else:
|
||||
if down == 'PP':
|
||||
logger.info(module + ' Setting status to Post-Processed in history.')
|
||||
|
@ -917,7 +916,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
|
|||
newVal['year'] = pullinfo['year']
|
||||
myDB.upsert("oneoffhistory", newVal, ctlVal)
|
||||
|
||||
logger.info(module + ' Updating Status (' + downstatus + ') now complete for ' + ComicName + ' issue: ' + IssueNum)
|
||||
logger.info('%s Updating Status (%s) now completed for %s issue: %s' % (module, downstatus, ComicName, IssueNum))
|
||||
return
|
||||
|
||||
def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
||||
|
|
|
@ -2580,7 +2580,7 @@ class WebInterface(object):
|
|||
interval = str(mylar.CONFIG.DOWNLOAD_SCAN_INTERVAL) + ' mins'
|
||||
if 'version' in jb['JobName'].lower():
|
||||
status = mylar.VERSION_STATUS
|
||||
interval = str(mylar.CONFIG.CHECK_GITHUB_INTERVAL) + 'mins'
|
||||
interval = str(mylar.CONFIG.CHECK_GITHUB_INTERVAL) + ' mins'
|
||||
|
||||
if status != jb['Status'] and not('rss' in jb['JobName'].lower()):
|
||||
status = jb['Status']
|
||||
|
@ -5370,14 +5370,14 @@ class WebInterface(object):
|
|||
checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search',
|
||||
'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload',
|
||||
'enable_torrents', 'enable_rss', 'nzbsu', 'nzbsu_verify',
|
||||
'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_public', 'enable_32p', 'enable_torznab',
|
||||
'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_32p', 'enable_torznab',
|
||||
'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing',
|
||||
'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts',
|
||||
'enable_meta', 'cbr2cbz_only', 'ct_tag_cr', 'ct_tag_cbl', 'ct_cbz_overwrite', 'rename_files', 'replace_spaces', 'zero_level',
|
||||
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
|
||||
'prowl_enabled', 'prowl_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
|
||||
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
|
||||
'email_enabled', 'email_enc', 'email_ongrab', 'email_onpost', 'opds_enable', 'opds_authentication', 'opds_metainfo', 'enable_ddl', 'deluge_pause']
|
||||
'email_enabled', 'email_enc', 'email_ongrab', 'email_onpost', 'opds_enable', 'opds_authentication', 'opds_metainfo', 'enable_ddl', 'deluge_pause'] #enable_public
|
||||
|
||||
for checked_config in checked_configs:
|
||||
if checked_config not in kwargs:
|
||||
|
|
|
@ -3,14 +3,26 @@ import os.path
|
|||
import ConfigParser
|
||||
import urllib2
|
||||
import urllib
|
||||
import platform
|
||||
|
||||
try:
|
||||
import requests
|
||||
use_requests = True
|
||||
except ImportError:
|
||||
print "Requests module not found on system. I'll revert so this will work, but you probably should install "
|
||||
print "requests to bypass this in the future (ie. pip install requests)"
|
||||
print '''Requests module not found on system. I'll revert so this will work, but you probably should install
|
||||
requests to bypass this in the future (i.e. pip install requests)'''
|
||||
use_requests = False
|
||||
|
||||
use_win32api = False
|
||||
if platform.system() == 'Windows':
|
||||
try:
|
||||
import win32api
|
||||
use_win32api = True
|
||||
except ImportError:
|
||||
print '''The win32api module was not found on this system. While it's fine to run without it, you're
|
||||
running a Windows-based OS, so it would benefit you to install it. It enables ComicRN to better
|
||||
work with file paths beyond the 260 character limit. Run "pip install pypiwin32".'''
|
||||
|
||||
apc_version = "2.04"
|
||||
|
||||
def processEpisode(dirName, nzbName=None):
|
||||
|
@ -18,6 +30,8 @@ def processEpisode(dirName, nzbName=None):
|
|||
return processIssue(dirName, nzbName)
|
||||
|
||||
def processIssue(dirName, nzbName=None, failed=False, comicrn_version=None):
|
||||
if use_win32api is True:
|
||||
dirName = win32api.GetShortPathName(dirName)
|
||||
|
||||
config = ConfigParser.ConfigParser()
|
||||
configFilename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessComics.cfg")
|
||||
|
|
Loading…
Reference in a new issue