mirror of https://github.com/evilhero/mylar
Merge branch 'development'
This commit is contained in:
commit
6ab1715c53
|
@ -220,10 +220,14 @@
|
|||
<input type="radio" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="fuzzy_year" value="0" ${comicConfig['fuzzy_year0']} /> Default <input type="radio" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="fuzzy_year" value="1" ${comicConfig['fuzzy_year1']} /> Year Removal <input type="radio" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="fuzzy_year" value="2" ${comicConfig['fuzzy_year2']} /> Fuzzy the Year
|
||||
</div>
|
||||
|
||||
%if mylar.CONFIG.ENABLE_32P and mylar.CONFIG.MODE_32P == 1:
|
||||
%if all([mylar.CONFIG.ENABLE_32P is True, mylar.CONFIG.ENABLE_TORRENT_SEARCH is True, mylar.CONFIG.MODE_32P == 1]):
|
||||
<div class="row checkbox right clearfix">
|
||||
<input type="checkbox" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="allow_packs" value="1" ${comicConfig['allow_packs']} /><label>Enable Pack Downloads<a href="#" title="Will allow downloading of multiple issues in one file (packs), but will search individual issues first"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a></label>
|
||||
</div>
|
||||
<div class="row">
|
||||
<label>Manual specify series ID for 32p</label>
|
||||
<input type="text" name="torrentid_32p" placeholder="torrent id #" value="${comicConfig['torrentid_32p']}" size="40">
|
||||
</div>
|
||||
%endif
|
||||
</fieldset>
|
||||
<input type="submit" style="float:right;" value="Update"/>
|
||||
|
|
|
@ -104,7 +104,15 @@
|
|||
${weekly['COMIC']}
|
||||
%endif
|
||||
%else:
|
||||
<a href="comicDetails?ComicID=${weekly['COMICID']}">${weekly['COMIC']}</a>
|
||||
%if weekly['HAVEIT'] == 'OneOff':
|
||||
%if all([weekly['COMICID'] != '', weekly['COMICID'] is not None]):
|
||||
<a href="${weekly['LINK']}" target="_blank">${weekly['COMIC']}</a>
|
||||
%else:
|
||||
${weekly['COMIC']}
|
||||
%endif
|
||||
%else:
|
||||
<a href="comicDetails?ComicID=${weekly['COMICID']}">${weekly['COMIC']}</a>
|
||||
%endif
|
||||
%endif
|
||||
%if weekly['VOLUME'] is not None:
|
||||
 V${weekly['VOLUME']}
|
||||
|
|
|
@ -21,8 +21,8 @@ http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
|||
# found in the LICENSE file.
|
||||
|
||||
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||
__version__ = "4.5.1"
|
||||
__copyright__ = "Copyright (c) 2004-2016 Leonard Richardson"
|
||||
__version__ = "4.6.0"
|
||||
__copyright__ = "Copyright (c) 2004-2017 Leonard Richardson"
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = ['BeautifulSoup']
|
||||
|
@ -82,7 +82,7 @@ class BeautifulSoup(Tag):
|
|||
|
||||
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
|
||||
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup(YOUR_MARKUP})\n\nto this:\n\n BeautifulSoup(YOUR_MARKUP, \"%(parser)s\")\n"
|
||||
|
||||
def __init__(self, markup="", features=None, builder=None,
|
||||
parse_only=None, from_encoding=None, exclude_encodings=None,
|
||||
|
@ -215,8 +215,8 @@ class BeautifulSoup(Tag):
|
|||
markup = markup.encode("utf8")
|
||||
warnings.warn(
|
||||
'"%s" looks like a filename, not markup. You should'
|
||||
'probably open this file and pass the filehandle into'
|
||||
'Beautiful Soup.' % markup)
|
||||
' probably open this file and pass the filehandle into'
|
||||
' Beautiful Soup.' % markup)
|
||||
self._check_markup_is_url(markup)
|
||||
|
||||
for (self.markup, self.original_encoding, self.declared_html_encoding,
|
||||
|
|
|
@ -232,8 +232,13 @@ class HTMLTreeBuilder(TreeBuilder):
|
|||
"""
|
||||
|
||||
preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags
|
||||
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
|
||||
'spacer', 'link', 'frame', 'base'])
|
||||
empty_element_tags = set([
|
||||
# These are from HTML5.
|
||||
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
|
||||
|
||||
# These are from HTML4, removed in HTML5.
|
||||
'spacer', 'frame'
|
||||
])
|
||||
|
||||
# The HTML standard defines these attributes as containing a
|
||||
# space-separated list of values, not a single value. That is,
|
||||
|
|
|
@ -6,6 +6,7 @@ __all__ = [
|
|||
]
|
||||
|
||||
import warnings
|
||||
import re
|
||||
from bs4.builder import (
|
||||
PERMISSIVE,
|
||||
HTML,
|
||||
|
@ -17,7 +18,10 @@ from bs4.element import (
|
|||
whitespace_re,
|
||||
)
|
||||
import html5lib
|
||||
from html5lib.constants import namespaces
|
||||
from html5lib.constants import (
|
||||
namespaces,
|
||||
prefixes,
|
||||
)
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
|
@ -83,7 +87,7 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||
|
||||
def create_treebuilder(self, namespaceHTMLElements):
|
||||
self.underlying_builder = TreeBuilderForHtml5lib(
|
||||
self.soup, namespaceHTMLElements)
|
||||
namespaceHTMLElements, self.soup)
|
||||
return self.underlying_builder
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
|
@ -93,8 +97,12 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||
|
||||
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
|
||||
def __init__(self, soup, namespaceHTMLElements):
|
||||
self.soup = soup
|
||||
def __init__(self, namespaceHTMLElements, soup=None):
|
||||
if soup:
|
||||
self.soup = soup
|
||||
else:
|
||||
from bs4 import BeautifulSoup
|
||||
self.soup = BeautifulSoup("", "html.parser")
|
||||
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
|
||||
|
||||
def documentClass(self):
|
||||
|
@ -117,7 +125,8 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
|||
return TextNode(Comment(data), self.soup)
|
||||
|
||||
def fragmentClass(self):
|
||||
self.soup = BeautifulSoup("")
|
||||
from bs4 import BeautifulSoup
|
||||
self.soup = BeautifulSoup("", "html.parser")
|
||||
self.soup.name = "[document_fragment]"
|
||||
return Element(self.soup, self.soup, None)
|
||||
|
||||
|
@ -131,6 +140,56 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
|||
def getFragment(self):
|
||||
return treebuilder_base.TreeBuilder.getFragment(self).element
|
||||
|
||||
def testSerializer(self, element):
|
||||
from bs4 import BeautifulSoup
|
||||
rv = []
|
||||
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')
|
||||
|
||||
def serializeElement(element, indent=0):
|
||||
if isinstance(element, BeautifulSoup):
|
||||
pass
|
||||
if isinstance(element, Doctype):
|
||||
m = doctype_re.match(element)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
if m.lastindex > 1:
|
||||
publicId = m.group(2) or ""
|
||||
systemId = m.group(3) or m.group(4) or ""
|
||||
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
|
||||
(' ' * indent, name, publicId, systemId))
|
||||
else:
|
||||
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name))
|
||||
else:
|
||||
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
|
||||
elif isinstance(element, Comment):
|
||||
rv.append("|%s<!-- %s -->" % (' ' * indent, element))
|
||||
elif isinstance(element, NavigableString):
|
||||
rv.append("|%s\"%s\"" % (' ' * indent, element))
|
||||
else:
|
||||
if element.namespace:
|
||||
name = "%s %s" % (prefixes[element.namespace],
|
||||
element.name)
|
||||
else:
|
||||
name = element.name
|
||||
rv.append("|%s<%s>" % (' ' * indent, name))
|
||||
if element.attrs:
|
||||
attributes = []
|
||||
for name, value in element.attrs.items():
|
||||
if isinstance(name, NamespacedAttribute):
|
||||
name = "%s %s" % (prefixes[name.namespace], name.name)
|
||||
if isinstance(value, list):
|
||||
value = " ".join(value)
|
||||
attributes.append((name, value))
|
||||
|
||||
for name, value in sorted(attributes):
|
||||
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
||||
indent += 2
|
||||
for child in element.children:
|
||||
serializeElement(child, indent)
|
||||
serializeElement(element, 0)
|
||||
|
||||
return "\n".join(rv)
|
||||
|
||||
class AttrList(object):
|
||||
def __init__(self, element):
|
||||
self.element = element
|
||||
|
@ -182,8 +241,10 @@ class Element(treebuilder_base.Node):
|
|||
child = node
|
||||
elif node.element.__class__ == NavigableString:
|
||||
string_child = child = node.element
|
||||
node.parent = self
|
||||
else:
|
||||
child = node.element
|
||||
node.parent = self
|
||||
|
||||
if not isinstance(child, basestring) and child.parent is not None:
|
||||
node.element.extract()
|
||||
|
@ -221,6 +282,8 @@ class Element(treebuilder_base.Node):
|
|||
most_recent_element=most_recent_element)
|
||||
|
||||
def getAttributes(self):
|
||||
if isinstance(self.element, Comment):
|
||||
return {}
|
||||
return AttrList(self.element)
|
||||
|
||||
def setAttributes(self, attributes):
|
||||
|
@ -248,11 +311,11 @@ class Element(treebuilder_base.Node):
|
|||
attributes = property(getAttributes, setAttributes)
|
||||
|
||||
def insertText(self, data, insertBefore=None):
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
if insertBefore:
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
self.insertBefore(data, insertBefore)
|
||||
self.insertBefore(text, insertBefore)
|
||||
else:
|
||||
self.appendChild(data)
|
||||
self.appendChild(text)
|
||||
|
||||
def insertBefore(self, node, refNode):
|
||||
index = self.element.index(refNode.element)
|
||||
|
@ -274,6 +337,7 @@ class Element(treebuilder_base.Node):
|
|||
# print "MOVE", self.element.contents
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent.element
|
||||
|
||||
element = self.element
|
||||
new_parent_element = new_parent.element
|
||||
# Determine what this tag's next_element will be once all the children
|
||||
|
@ -292,7 +356,6 @@ class Element(treebuilder_base.Node):
|
|||
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||
|
||||
to_append = element.contents
|
||||
append_after = new_parent_element.contents
|
||||
if len(to_append) > 0:
|
||||
# Set the first child's previous_element and previous_sibling
|
||||
# to elements within the new parent
|
||||
|
@ -309,12 +372,19 @@ class Element(treebuilder_base.Node):
|
|||
if new_parents_last_child:
|
||||
new_parents_last_child.next_sibling = first_child
|
||||
|
||||
# Fix the last child's next_element and next_sibling
|
||||
last_child = to_append[-1]
|
||||
last_child.next_element = new_parents_last_descendant_next_element
|
||||
# Find the very last element being moved. It is now the
|
||||
# parent's last descendant. It has no .next_sibling and
|
||||
# its .next_element is whatever the previous last
|
||||
# descendant had.
|
||||
last_childs_last_descendant = to_append[-1]._last_descendant(False, True)
|
||||
|
||||
last_childs_last_descendant.next_element = new_parents_last_descendant_next_element
|
||||
if new_parents_last_descendant_next_element:
|
||||
new_parents_last_descendant_next_element.previous_element = last_child
|
||||
last_child.next_sibling = None
|
||||
# TODO: This code has no test coverage and I'm not sure
|
||||
# how to get html5lib to go through this path, but it's
|
||||
# just the other side of the previous line.
|
||||
new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant
|
||||
last_childs_last_descendant.next_sibling = None
|
||||
|
||||
for child in to_append:
|
||||
child.parent = new_parent_element
|
||||
|
|
|
@ -52,7 +52,31 @@ from bs4.builder import (
|
|||
HTMLPARSER = 'html.parser'
|
||||
|
||||
class BeautifulSoupHTMLParser(HTMLParser):
|
||||
def handle_starttag(self, name, attrs):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
HTMLParser.__init__(self, *args, **kwargs)
|
||||
|
||||
# Keep a list of empty-element tags that were encountered
|
||||
# without an explicit closing tag. If we encounter a closing tag
|
||||
# of this type, we'll associate it with one of those entries.
|
||||
#
|
||||
# This isn't a stack because we don't care about the
|
||||
# order. It's a list of closing tags we've already handled and
|
||||
# will ignore, assuming they ever show up.
|
||||
self.already_closed_empty_element = []
|
||||
|
||||
def handle_startendtag(self, name, attrs):
|
||||
# This is only called when the markup looks like
|
||||
# <tag/>.
|
||||
|
||||
# is_startend() tells handle_starttag not to close the tag
|
||||
# just because its name matches a known empty-element tag. We
|
||||
# know that this is an empty-element tag and we want to call
|
||||
# handle_endtag ourselves.
|
||||
tag = self.handle_starttag(name, attrs, handle_empty_element=False)
|
||||
self.handle_endtag(name)
|
||||
|
||||
def handle_starttag(self, name, attrs, handle_empty_element=True):
|
||||
# XXX namespace
|
||||
attr_dict = {}
|
||||
for key, value in attrs:
|
||||
|
@ -62,10 +86,34 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
value = ''
|
||||
attr_dict[key] = value
|
||||
attrvalue = '""'
|
||||
self.soup.handle_starttag(name, None, None, attr_dict)
|
||||
#print "START", name
|
||||
tag = self.soup.handle_starttag(name, None, None, attr_dict)
|
||||
if tag and tag.is_empty_element and handle_empty_element:
|
||||
# Unlike other parsers, html.parser doesn't send separate end tag
|
||||
# events for empty-element tags. (It's handled in
|
||||
# handle_startendtag, but only if the original markup looked like
|
||||
# <tag/>.)
|
||||
#
|
||||
# So we need to call handle_endtag() ourselves. Since we
|
||||
# know the start event is identical to the end event, we
|
||||
# don't want handle_endtag() to cross off any previous end
|
||||
# events for tags of this name.
|
||||
self.handle_endtag(name, check_already_closed=False)
|
||||
|
||||
def handle_endtag(self, name):
|
||||
self.soup.handle_endtag(name)
|
||||
# But we might encounter an explicit closing tag for this tag
|
||||
# later on. If so, we want to ignore it.
|
||||
self.already_closed_empty_element.append(name)
|
||||
|
||||
def handle_endtag(self, name, check_already_closed=True):
|
||||
#print "END", name
|
||||
if check_already_closed and name in self.already_closed_empty_element:
|
||||
# This is a redundant end tag for an empty-element tag.
|
||||
# We've already called handle_endtag() for it, so just
|
||||
# check it off the list.
|
||||
# print "ALREADY CLOSED", name
|
||||
self.already_closed_empty_element.remove(name)
|
||||
else:
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def handle_data(self, data):
|
||||
self.soup.handle_data(data)
|
||||
|
@ -169,6 +217,7 @@ class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
|||
warnings.warn(RuntimeWarning(
|
||||
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
|
||||
raise e
|
||||
parser.already_closed_empty_element = []
|
||||
|
||||
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
|
||||
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
|
||||
|
|
|
@ -310,7 +310,7 @@ class EncodingDetector:
|
|||
else:
|
||||
xml_endpos = 1024
|
||||
html_endpos = max(2048, int(len(markup) * 0.05))
|
||||
|
||||
|
||||
declared_encoding = None
|
||||
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
|
||||
if not declared_encoding_match and is_html:
|
||||
|
@ -736,7 +736,7 @@ class UnicodeDammit:
|
|||
0xde : b'\xc3\x9e', # Þ
|
||||
0xdf : b'\xc3\x9f', # ß
|
||||
0xe0 : b'\xc3\xa0', # à
|
||||
0xe1 : b'\xa1', # á
|
||||
0xe1 : b'\xa1', # á
|
||||
0xe2 : b'\xc3\xa2', # â
|
||||
0xe3 : b'\xc3\xa3', # ã
|
||||
0xe4 : b'\xc3\xa4', # ä
|
||||
|
|
|
@ -131,8 +131,8 @@ class PageElement(object):
|
|||
# to methods like encode() and prettify():
|
||||
#
|
||||
# "html" - All Unicode characters with corresponding HTML entities
|
||||
# are converted to those entities on output.
|
||||
# "minimal" - Bare ampersands and angle brackets are converted to
|
||||
# are converted to those entities on output.
|
||||
# "minimal" - Bare ampersands and angle brackets are converted to
|
||||
# XML entities: & < >
|
||||
# None - The null formatter. Unicode characters are never
|
||||
# converted to entities. This is not recommended, but it's
|
||||
|
@ -535,9 +535,16 @@ class PageElement(object):
|
|||
return ResultSet(strainer, result)
|
||||
elif isinstance(name, basestring):
|
||||
# Optimization to find all tags with a given name.
|
||||
if name.count(':') == 1:
|
||||
# This is a name with a prefix.
|
||||
prefix, name = name.split(':', 1)
|
||||
else:
|
||||
prefix = None
|
||||
result = (element for element in generator
|
||||
if isinstance(element, Tag)
|
||||
and element.name == name)
|
||||
and element.name == name
|
||||
and (prefix is None or element.prefix == prefix)
|
||||
)
|
||||
return ResultSet(strainer, result)
|
||||
results = ResultSet(strainer)
|
||||
while True:
|
||||
|
@ -863,7 +870,7 @@ class Tag(PageElement):
|
|||
Its contents are a copy of the old Tag's contents.
|
||||
"""
|
||||
clone = type(self)(None, self.builder, self.name, self.namespace,
|
||||
self.nsprefix, self.attrs, is_xml=self._is_xml)
|
||||
self.prefix, self.attrs, is_xml=self._is_xml)
|
||||
for attr in ('can_be_empty_element', 'hidden'):
|
||||
setattr(clone, attr, getattr(self, attr))
|
||||
for child in self.contents:
|
||||
|
@ -985,6 +992,13 @@ class Tag(PageElement):
|
|||
attribute."""
|
||||
return self.attrs.get(key, default)
|
||||
|
||||
def get_attribute_list(self, key, default=None):
|
||||
"""The same as get(), but always returns a list."""
|
||||
value = self.get(key, default)
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
return value
|
||||
|
||||
def has_attr(self, key):
|
||||
return key in self.attrs
|
||||
|
||||
|
@ -1698,7 +1712,7 @@ class SoupStrainer(object):
|
|||
"I don't know how to match against a %s" % markup.__class__)
|
||||
return found
|
||||
|
||||
def _matches(self, markup, match_against):
|
||||
def _matches(self, markup, match_against, already_tried=None):
|
||||
# print u"Matching %s against %s" % (markup, match_against)
|
||||
result = False
|
||||
if isinstance(markup, list) or isinstance(markup, tuple):
|
||||
|
@ -1713,7 +1727,7 @@ class SoupStrainer(object):
|
|||
if self._matches(' '.join(markup), match_against):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
if match_against is True:
|
||||
# True matches any non-None value.
|
||||
return markup is not None
|
||||
|
@ -1723,6 +1737,7 @@ class SoupStrainer(object):
|
|||
|
||||
# Custom callables take the tag as an argument, but all
|
||||
# other ways of matching match the tag name as a string.
|
||||
original_markup = markup
|
||||
if isinstance(markup, Tag):
|
||||
markup = markup.name
|
||||
|
||||
|
@ -1733,18 +1748,51 @@ class SoupStrainer(object):
|
|||
# None matches None, False, an empty string, an empty list, and so on.
|
||||
return not match_against
|
||||
|
||||
if isinstance(match_against, unicode):
|
||||
if (hasattr(match_against, '__iter__')
|
||||
and not isinstance(match_against, basestring)):
|
||||
# We're asked to match against an iterable of items.
|
||||
# The markup must be match at least one item in the
|
||||
# iterable. We'll try each one in turn.
|
||||
#
|
||||
# To avoid infinite recursion we need to keep track of
|
||||
# items we've already seen.
|
||||
if not already_tried:
|
||||
already_tried = set()
|
||||
for item in match_against:
|
||||
if item.__hash__:
|
||||
key = item
|
||||
else:
|
||||
key = id(item)
|
||||
if key in already_tried:
|
||||
continue
|
||||
else:
|
||||
already_tried.add(key)
|
||||
if self._matches(original_markup, item, already_tried):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
# Beyond this point we might need to run the test twice: once against
|
||||
# the tag's name and once against its prefixed name.
|
||||
match = False
|
||||
|
||||
if not match and isinstance(match_against, unicode):
|
||||
# Exact string match
|
||||
return markup == match_against
|
||||
match = markup == match_against
|
||||
|
||||
if hasattr(match_against, 'match'):
|
||||
if not match and hasattr(match_against, 'search'):
|
||||
# Regexp match
|
||||
return match_against.search(markup)
|
||||
|
||||
if hasattr(match_against, '__iter__'):
|
||||
# The markup must be an exact match against something
|
||||
# in the iterable.
|
||||
return markup in match_against
|
||||
if (not match
|
||||
and isinstance(original_markup, Tag)
|
||||
and original_markup.prefix):
|
||||
# Try the whole thing again with the prefixed tag name.
|
||||
return self._matches(
|
||||
original_markup.prefix + ':' + original_markup.name, match_against
|
||||
)
|
||||
|
||||
return match
|
||||
|
||||
|
||||
class ResultSet(list):
|
||||
|
@ -1753,3 +1801,8 @@ class ResultSet(list):
|
|||
def __init__(self, source, result=()):
|
||||
super(ResultSet, self).__init__(result)
|
||||
self.source = source
|
||||
|
||||
def __getattr__(self, key):
|
||||
raise AttributeError(
|
||||
"ResultSet object has no attribute '%s'. You're probably treating a list of items like a single item. Did you call find_all() when you meant to call find()?" % key
|
||||
)
|
||||
|
|
|
@ -69,6 +69,18 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
markup in these tests, there's not much room for interpretation.
|
||||
"""
|
||||
|
||||
def test_empty_element_tags(self):
|
||||
"""Verify that all HTML4 and HTML5 empty element (aka void element) tags
|
||||
are handled correctly.
|
||||
"""
|
||||
for name in [
|
||||
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
|
||||
'spacer', 'frame'
|
||||
]:
|
||||
soup = self.soup("")
|
||||
new_tag = soup.new_tag(name)
|
||||
self.assertEqual(True, new_tag.is_empty_element)
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
# Pickling a tree, then unpickling it, yields a tree identical
|
||||
# to the original.
|
||||
|
@ -330,6 +342,13 @@ Hello, world!
|
|||
self.assertEqual("p", soup.p.name)
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_empty_element_tags(self):
|
||||
"""Verify consistent handling of empty-element tags,
|
||||
no matter how they come in through the markup.
|
||||
"""
|
||||
self.assertSoupEquals('<br/><br/><br/>', "<br/><br/><br/>")
|
||||
self.assertSoupEquals('<br /><br /><br />', "<br/><br/><br/>")
|
||||
|
||||
def test_head_tag_between_head_and_body(self):
|
||||
"Prevent recurrence of a bug in the html5lib treebuilder."
|
||||
content = """<html><head></head>
|
||||
|
@ -669,6 +688,40 @@ class XMLTreeBuilderSmokeTest(object):
|
|||
soup = self.soup(markup)
|
||||
self.assertEqual(unicode(soup.foo), markup)
|
||||
|
||||
def test_find_by_prefixed_name(self):
|
||||
doc = """<?xml version="1.0" encoding="utf-8"?>
|
||||
<Document xmlns="http://example.com/ns0"
|
||||
xmlns:ns1="http://example.com/ns1"
|
||||
xmlns:ns2="http://example.com/ns2"
|
||||
<ns1:tag>foo</ns1:tag>
|
||||
<ns1:tag>bar</ns1:tag>
|
||||
<ns2:tag key="value">baz</ns2:tag>
|
||||
</Document>
|
||||
"""
|
||||
soup = self.soup(doc)
|
||||
|
||||
# There are three <tag> tags.
|
||||
self.assertEqual(3, len(soup.find_all('tag')))
|
||||
|
||||
# But two of them are ns1:tag and one of them is ns2:tag.
|
||||
self.assertEqual(2, len(soup.find_all('ns1:tag')))
|
||||
self.assertEqual(1, len(soup.find_all('ns2:tag')))
|
||||
|
||||
self.assertEqual(1, len(soup.find_all('ns2:tag', key='value')))
|
||||
self.assertEqual(3, len(soup.find_all(['ns1:tag', 'ns2:tag'])))
|
||||
|
||||
def test_copy_tag_preserves_namespace(self):
|
||||
xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<w:document xmlns:w="http://example.com/ns0"/>"""
|
||||
|
||||
soup = self.soup(xml)
|
||||
tag = soup.document
|
||||
duplicate = copy.copy(tag)
|
||||
|
||||
# The two tags have the same namespace prefix.
|
||||
self.assertEqual(tag.prefix, duplicate.prefix)
|
||||
|
||||
|
||||
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
|
||||
"""Smoke test for a tree builder that supports HTML5."""
|
||||
|
||||
|
|
|
@ -95,6 +95,22 @@ class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
|
|||
assert space1.next_element is tbody1
|
||||
assert tbody2.next_element is space2
|
||||
|
||||
def test_reparented_markup_containing_children(self):
|
||||
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
|
||||
soup = self.soup(markup)
|
||||
noscript = soup.noscript
|
||||
self.assertEqual("target", noscript.next_element)
|
||||
target = soup.find(string='target')
|
||||
|
||||
# The 'aftermath' string was duplicated; we want the second one.
|
||||
final_aftermath = soup.find_all(string='aftermath')[-1]
|
||||
|
||||
# The <noscript> tag was moved beneath a copy of the <a> tag,
|
||||
# but the 'target' string within is still connected to the
|
||||
# (second) 'aftermath' string.
|
||||
self.assertEqual(final_aftermath, target.next_element)
|
||||
self.assertEqual(target, final_aftermath.previous_element)
|
||||
|
||||
def test_processing_instruction(self):
|
||||
"""Processing instructions become comments."""
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
|
@ -107,3 +123,8 @@ class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
|
|||
a1, a2 = soup.find_all('a')
|
||||
self.assertEqual(a1, a2)
|
||||
assert a1 is not a2
|
||||
|
||||
def test_foster_parenting(self):
|
||||
markup = b"""<table><td></tbody>A"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
|
||||
|
|
|
@ -29,4 +29,6 @@ class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
|
|||
loaded = pickle.loads(dumped)
|
||||
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
|
||||
|
||||
|
||||
def test_redundant_empty_element_closing_tags(self):
|
||||
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
|
||||
self.assertSoupEquals('</br></br></br>', "")
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Tests for Beautiful Soup's tree traversal methods.
|
||||
|
||||
|
@ -234,6 +235,7 @@ class TestFindAllByName(TreeTest):
|
|||
self.assertEqual('1', r3.string)
|
||||
self.assertEqual('3', r4.string)
|
||||
|
||||
|
||||
class TestFindAllByAttribute(TreeTest):
|
||||
|
||||
def test_find_all_by_attribute_name(self):
|
||||
|
@ -1284,6 +1286,10 @@ class TestCDAtaListAttributes(SoupTest):
|
|||
soup = self.soup("<a class='foo\tbar'>")
|
||||
self.assertEqual(b'<a class="foo bar"></a>', soup.a.encode())
|
||||
|
||||
def test_get_attribute_list(self):
|
||||
soup = self.soup("<a id='abc def'>")
|
||||
self.assertEqual(['abc def'], soup.a.get_attribute_list('id'))
|
||||
|
||||
def test_accept_charset(self):
|
||||
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
|
||||
self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset'])
|
||||
|
|
|
@ -138,7 +138,7 @@ DOWNLOAD_APIKEY = None
|
|||
APILOCK = False
|
||||
CMTAGGER_PATH = None
|
||||
STATIC_COMICRN_VERSION = "1.01"
|
||||
STATIC_APC_VERSION = "2.02"
|
||||
STATIC_APC_VERSION = "2.04"
|
||||
SAB_PARAMS = None
|
||||
COMICINFO = []
|
||||
SCHED = BackgroundScheduler({
|
||||
|
@ -412,6 +412,7 @@ def start():
|
|||
SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=weekly_diff, trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
|
||||
|
||||
#initiate startup rss feeds for torrents/nzbs here...
|
||||
rs = rsscheckit.tehMain()
|
||||
if CONFIG.ENABLE_RSS:
|
||||
logger.info('[RSS-FEEDS] Initiating startup-RSS feed checks.')
|
||||
if SCHED_RSS_LAST is not None:
|
||||
|
@ -419,7 +420,6 @@ def start():
|
|||
logger.info('[RSS-FEEDS] RSS last run @ %s' % datetime.datetime.utcfromtimestamp(rss_timestamp))
|
||||
else:
|
||||
rss_timestamp = helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) *60)
|
||||
rs = rsscheckit.tehMain()
|
||||
duration_diff = (helpers.utctimestamp() - rss_timestamp)/60
|
||||
if duration_diff >= int(CONFIG.RSS_CHECKINTERVAL):
|
||||
SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
|
||||
|
@ -427,6 +427,9 @@ def start():
|
|||
rss_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) * 60) - (duration_diff * 60))
|
||||
logger.fdebug('[RSS-FEEDS] Scheduling next run for @ %s every %s minutes' % (rss_diff, CONFIG.RSS_CHECKINTERVAL))
|
||||
SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=rss_diff, trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
|
||||
#else:
|
||||
# SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
|
||||
# SCHED.pause_job('rss')
|
||||
|
||||
if CONFIG.CHECK_GITHUB:
|
||||
vs = versioncheckit.CheckVersion()
|
||||
|
@ -468,7 +471,7 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
logger.warn('Unable to update readinglist table to new storyarc table format.')
|
||||
|
||||
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT, TorrentID_32P TEXT, LatestIssueID TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT, ImageURL TEXT, ImageURL_ALT TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT, Hash TEXT, crc TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)')
|
||||
|
@ -589,6 +592,16 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN Corrected_SeriesYear TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT TorrentID_32P from comics')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN TorrentID_32P TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT LatestIssueID from comics')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN LatestIssueID TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DynamicComicName from comics')
|
||||
if CONFIG.DYNAMIC_UPDATE < 3:
|
||||
|
|
|
@ -169,7 +169,7 @@ class Api(object):
|
|||
if mylar.CONFIG.ANNUALS_ON:
|
||||
annuals = self._dic_from_query('SELECT * FROM annuals WHERE ComicID="' + self.id + '"')
|
||||
else:
|
||||
annuals = None
|
||||
annuals = []
|
||||
|
||||
self.data = {'comic': comic, 'issues': issues, 'annuals': annuals}
|
||||
return
|
||||
|
|
|
@ -170,23 +170,28 @@ class info32p(object):
|
|||
def searchit(self):
|
||||
chk_id = None
|
||||
#logger.info('searchterm: %s' % self.searchterm)
|
||||
series_search = self.searchterm['series']
|
||||
#self.searchterm is a tuple containing series name, issue number, volume and publisher.
|
||||
series_search = self.searchterm['series']
|
||||
issue_search = self.searchterm['issue']
|
||||
volume_search = self.searchterm['volume']
|
||||
|
||||
if series_search.startswith('0-Day Comics Pack'):
|
||||
#issue = '21' = WED, #volume='2' = 2nd month
|
||||
torrentid = 22247 #2018
|
||||
issue_search = self.searchterm['issue'] #'21' #Wed
|
||||
volume_search = self.searchterm['volume'] #'2' #2nd month
|
||||
publisher_search = None #'2' #2nd month
|
||||
comic_id = None
|
||||
elif self.searchterm['torrentid_32p'] is not None:
|
||||
torrentid = self.searchterm['torrentid_32p']
|
||||
comic_id = self.searchterm['id']
|
||||
publisher_search = self.searchterm['publisher']
|
||||
else:
|
||||
torrentid = None
|
||||
comic_id = self.searchterm['id']
|
||||
|
||||
annualize = False
|
||||
if 'annual' in series_search.lower():
|
||||
series_search = re.sub(' annual', '', series_search.lower()).strip()
|
||||
annualize = True
|
||||
issue_search = self.searchterm['issue']
|
||||
volume_search = self.searchterm['volume']
|
||||
publisher_search = self.searchterm['publisher']
|
||||
spl = [x for x in self.publisher_list if x in publisher_search]
|
||||
for x in spl:
|
||||
|
@ -250,7 +255,7 @@ class info32p(object):
|
|||
pdata = []
|
||||
pubmatch = False
|
||||
|
||||
if series_search.startswith('0-Day Comics Pack'):
|
||||
if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]):
|
||||
data.append({"id": torrentid,
|
||||
"series": series_search})
|
||||
else:
|
||||
|
@ -308,11 +313,14 @@ class info32p(object):
|
|||
dataset += pdata
|
||||
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
|
||||
|
||||
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack')]) and any([len(data) == 1, len(pdata) == 1]):
|
||||
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None]) and any([len(data) == 1, len(pdata) == 1]):
|
||||
#update the 32p_reference so we avoid doing a url lookup next time
|
||||
helpers.checkthe_id(comic_id, dataset)
|
||||
else:
|
||||
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
|
||||
if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None]):
|
||||
pass
|
||||
else:
|
||||
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
|
||||
|
||||
results32p = []
|
||||
resultlist = {}
|
||||
|
|
|
@ -500,6 +500,17 @@ class Config(object):
|
|||
self.provider_sequence()
|
||||
|
||||
if startup is True:
|
||||
if self.LOG_DIR is None:
|
||||
self.LOG_DIR = os.path.join(mylar.DATA_DIR, 'logs')
|
||||
|
||||
if not os.path.exists(self.LOG_DIR):
|
||||
try:
|
||||
os.makedirs(self.LOG_DIR)
|
||||
except OSError:
|
||||
if not mylar.QUIET:
|
||||
self.LOG_DIR = None
|
||||
print('Unable to create the log directory. Logging to screen only.')
|
||||
|
||||
# Start the logger, silence console logging if we need to
|
||||
if logger.LOG_LANG.startswith('en'):
|
||||
logger.initLogger(console=not mylar.QUIET, log_dir=self.LOG_DIR, max_logsize=self.MAX_LOGSIZE, max_logfiles=self.MAX_LOGFILES, loglevel=mylar.LOG_LEVEL)
|
||||
|
|
|
@ -139,7 +139,7 @@ class FileChecker(object):
|
|||
if filename.startswith('.'):
|
||||
continue
|
||||
|
||||
#logger.info('[FILENAME]: ' + filename)
|
||||
logger.debug('[FILENAME]: ' + filename)
|
||||
runresults = self.parseit(self.dir, filename, filedir)
|
||||
if runresults:
|
||||
try:
|
||||
|
|
|
@ -11,37 +11,44 @@ import unicodedata
|
|||
import urllib
|
||||
|
||||
def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||
#searchName = "Uncanny Avengers"
|
||||
#searchIssue = "01"
|
||||
#searchYear = "2012"
|
||||
if searchName.lower().startswith('the '):
|
||||
searchName = searchName[4:]
|
||||
cName = searchName
|
||||
#clean up searchName due to webparse.
|
||||
searchName = searchName.replace("%20", " ")
|
||||
if "," in searchName:
|
||||
searchName = searchName.replace(",", "")
|
||||
#logger.fdebug("name:" + str(searchName))
|
||||
#logger.fdebug("issue:" + str(searchIssue))
|
||||
#logger.fdebug("year:" + str(searchYear))
|
||||
|
||||
#clean up searchName due to webparse/redudant naming that would return too specific of results.
|
||||
commons = ['and', 'the', '&', '-']
|
||||
for x in commons:
|
||||
cnt = 0
|
||||
for m in re.finditer(x, searchName.lower()):
|
||||
cnt +=1
|
||||
tehstart = m.start()
|
||||
tehend = m.end()
|
||||
if any([x == 'the', x == 'and']):
|
||||
if len(searchName) == tehend:
|
||||
tehend =-1
|
||||
if all([tehstart == 0, searchName[tehend] == ' ']) or all([tehstart != 0, searchName[tehstart-1] == ' ', searchName[tehend] == ' ']):
|
||||
searchName = searchName.replace(x, ' ', cnt)
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
searchName = searchName.replace(x, ' ', cnt)
|
||||
|
||||
searchName = re.sub('\s+', ' ', searchName)
|
||||
searchName = re.sub("[\,\:|'%20']", "", searchName).strip()
|
||||
logger.fdebug("searchname: %s" % searchName)
|
||||
logger.fdebug("issue: %s" % searchIssue)
|
||||
logger.fdebug("year: %s" % searchYear)
|
||||
encodeSearch = urllib.quote_plus(searchName)
|
||||
splitSearch = encodeSearch.split(" ")
|
||||
|
||||
joinSearch = "+".join(splitSearch) +"+" +searchIssue
|
||||
searchIsOne = "0" +searchIssue
|
||||
searchIsTwo = "00" +searchIssue
|
||||
|
||||
if mylar.CONFIG.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr"
|
||||
elif mylar.CONFIG.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz"
|
||||
if len(searchIssue) == 1:
|
||||
loop = 3
|
||||
elif len(searchIssue) == 2:
|
||||
loop = 2
|
||||
else:
|
||||
loop = 1
|
||||
|
||||
if "-" in searchName:
|
||||
searchName = searchName.replace("-", '((\\s)?[-:])?(\\s)?')
|
||||
|
||||
regexName = searchName.replace(" ", '((\\s)?[-:])?(\\s)?')
|
||||
|
||||
|
||||
#logger.fdebug('searchName:' + searchName)
|
||||
#logger.fdebug('regexName:' + regexName)
|
||||
regexName = searchName.replace(" ", '((\\s)?[-:])?(\\s)?')
|
||||
|
||||
if mylar.CONFIG.USE_MINSIZE:
|
||||
size_constraints = "minsize=" + str(mylar.CONFIG.MINSIZE)
|
||||
|
@ -55,13 +62,29 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
max_age = "&age=" + str(mylar.CONFIG.USENET_RETENTION)
|
||||
|
||||
feeds = []
|
||||
feed1 = "http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + str(max_age) + "&dq=%s&max=50&more=1" %joinSearch
|
||||
feeds.append(feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + str(max_age) + "&dq=%s&max=50&more=1" %joinSearch))
|
||||
time.sleep(3)
|
||||
if mylar.CONFIG.ALTEXPERIMENTAL:
|
||||
feed2 = "http://nzbindex.nl/rss/?dq=%s&g[]=41&g[]=510&sort=agedesc&hidespam=0&max=&more=1" %joinSearch
|
||||
feeds.append(feedparser.parse("http://nzbindex.nl/rss/?dq=%s&g[]=41&g[]=510&sort=agedesc&hidespam=0&max=&more=1" %joinSearch))
|
||||
i = 1
|
||||
while (i <= loop):
|
||||
if i == 1:
|
||||
searchmethod = searchIssue
|
||||
elif i == 2:
|
||||
searchmethod = '0' + searchIssue
|
||||
elif i == 3:
|
||||
searchmethod = '00' + searchIssue
|
||||
else:
|
||||
break
|
||||
logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod)
|
||||
joinSearch = "+".join(splitSearch) + "+" +searchmethod
|
||||
|
||||
if mylar.CONFIG.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr"
|
||||
elif mylar.CONFIG.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz"
|
||||
|
||||
feeds.append(feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&" + str(size_constraints) + str(max_age) + "&dq=%s&max=50&more=1" %joinSearch))
|
||||
time.sleep(3)
|
||||
if mylar.CONFIG.ALTEXPERIMENTAL:
|
||||
feeds.append(feedparser.parse("http://nzbindex.nl/rss/?dq=%s&g[]=41&g[]=510&sort=agedesc&hidespam=0&max=&more=1" %joinSearch))
|
||||
time.sleep(3)
|
||||
|
||||
i+=1
|
||||
|
||||
entries = []
|
||||
mres = {}
|
||||
|
@ -76,8 +99,6 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
regList = []
|
||||
countUp = 0
|
||||
|
||||
#logger.fdebug(str(totNum) + " results")
|
||||
|
||||
while countUp < totNum:
|
||||
urlParse = feed.entries[countUp].enclosures[0]
|
||||
#keyPair[feed.entries[countUp].title] = feed.entries[countUp].link
|
||||
|
@ -87,8 +108,6 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
"length": urlParse["length"],
|
||||
"pubdate": feed.entries[countUp].updated})
|
||||
countUp=countUp +1
|
||||
#logger.fdebug('keypair: ' + str(keyPair))
|
||||
|
||||
|
||||
# thanks to SpammyHagar for spending the time in compiling these regEx's!
|
||||
|
||||
|
@ -105,7 +124,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
|
||||
regexList=[regEx, regExOne, regExTwo, regExThree, regExFour, regExFive]
|
||||
|
||||
except_list=['releases', 'gold line', 'distribution', '0-day', '0 day']
|
||||
except_list=['releases', 'gold line', 'distribution', '0-day', '0 day', '0day']
|
||||
|
||||
for entry in keyPair:
|
||||
title = entry['title']
|
||||
|
|
|
@ -2075,11 +2075,12 @@ def incr_snatched(ComicID):
|
|||
myDB.upsert("comics", newVal, newCtrl)
|
||||
return
|
||||
|
||||
def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
|
||||
def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None, rtnval=None):
|
||||
#filename = the filename in question that's being checked against
|
||||
#comicid = the comicid of the series that's being checked for duplication
|
||||
#issueid = the issueid of the issue that's being checked for duplication
|
||||
#storyarcid = the storyarcid of the issue that's being checked for duplication.
|
||||
#rtnval = the return value of a previous duplicate_filecheck that's re-running against new values
|
||||
#
|
||||
import db
|
||||
myDB = db.DBConnection()
|
||||
|
@ -2124,14 +2125,17 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
|
|||
mylar.updater.dbUpdate(ComicIDList=cid, calledfrom='dupechk')
|
||||
return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
|
||||
else:
|
||||
rtnval = {'action': "dont_dupe"}
|
||||
#file is Archived, but no entry exists in the db for the location. Assume Archived, and don't post-process.
|
||||
#quick rescan of files in dir, then rerun the dup check again...
|
||||
mylar.updater.forceRescan(ComicID)
|
||||
chk1 = duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
|
||||
if chk1['action'] == 'dont_dupe':
|
||||
logger.fdebug('[DUPECHECK] File is Archived but no file can be located within the db at the specified location. Assuming this was a manual archival and will not post-process this issue.')
|
||||
rtnval = chk1
|
||||
if rtnval is not None:
|
||||
if rtnval['action'] == 'dont_dupe':
|
||||
logger.fdebug('[DUPECHECK] File is Archived but no file can be located within the db at the specified location. Assuming this was a manual archival and will not post-process this issue.')
|
||||
return rtnval
|
||||
else:
|
||||
rtnval = {'action': "dont_dupe"}
|
||||
#file is Archived, but no entry exists in the db for the location. Assume Archived, and don't post-process.
|
||||
#quick rescan of files in dir, then rerun the dup check again...
|
||||
mylar.updater.forceRescan(ComicID)
|
||||
chk1 = duplicate_filecheck(filename, ComicID, IssueID, StoryArcID, rtnval)
|
||||
rtnval = chk1
|
||||
else:
|
||||
rtnval = {'action': "dupe_file",
|
||||
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}
|
||||
|
@ -3357,6 +3361,11 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
|
|||
nextrun_date = datetime.datetime.utcfromtimestamp(nextrun_stamp)
|
||||
jobstore.modify(next_run_time=nextrun_date)
|
||||
nextrun_date = nextrun_date.replace(microsecond=0)
|
||||
else:
|
||||
# if the rss is enabled after startup, we have to re-set it up...
|
||||
nextrun_stamp = utctimestamp() + (int(mylar.CONFIG.RSS_CHECKINTERVAL) * 60)
|
||||
nextrun_date = datetime.datetime.utcfromtimestamp(nextrun_stamp)
|
||||
mylar.SCHED_RSS_LAST = last_run_completed
|
||||
|
||||
logger.fdebug('ReScheduled job: %s to %s' % (job, nextrun_date))
|
||||
lastrun_comp = datetime.datetime.utcfromtimestamp(last_run_completed)
|
||||
|
|
|
@ -60,6 +60,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
comlocation = None
|
||||
oldcomversion = None
|
||||
series_status = 'Loading'
|
||||
lastissueid = None
|
||||
else:
|
||||
if chkwant is not None:
|
||||
logger.fdebug('ComicID: ' + str(comicid) + ' already exists. Not adding from the future pull list at this time.')
|
||||
|
@ -73,6 +74,8 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
|
||||
newValueDict = {"Status": "Loading"}
|
||||
comlocation = dbcomic['ComicLocation']
|
||||
lastissueid = dbcomic['LatestIssueID']
|
||||
|
||||
if not latestissueinfo:
|
||||
latestissueinfo = []
|
||||
latestissueinfo.append({"latestiss": dbcomic['LatestIssue'],
|
||||
|
@ -224,12 +227,20 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
'$VolumeN': comicVol.upper(),
|
||||
'$Annual': 'Annual'
|
||||
}
|
||||
|
||||
if mylar.CONFIG.FOLDER_FORMAT == '':
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + SeriesYear + ")")
|
||||
else:
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values))
|
||||
|
||||
try:
|
||||
if mylar.CONFIG.FOLDER_FORMAT == '':
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + SeriesYear + ")")
|
||||
else:
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values))
|
||||
except Exception as e:
|
||||
if 'TypeError' in e:
|
||||
if mylar.CONFIG.DESTINATION_DIR is None:
|
||||
logger.error('[ERROR] %s' % e)
|
||||
logger.error('No Comic Location specified. This NEEDS to be set before anything can be added successfully.')
|
||||
return
|
||||
logger.error('[ERROR] %s' % e)
|
||||
logger.error('Cannot determine Comic Location path properly. Check your Comic Location and Folder Format for any errors.')
|
||||
return
|
||||
|
||||
#comlocation = mylar.CONFIG.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
|
||||
if mylar.CONFIG.DESTINATION_DIR == "":
|
||||
|
@ -260,23 +271,28 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
comicIssues = str(int(comic['ComicIssues']) + 1)
|
||||
|
||||
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is False:
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImage'])
|
||||
if covercheck == 'retry':
|
||||
logger.info('Attempting to retrieve alternate comic image for the series.')
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImageALT'])
|
||||
|
||||
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
|
||||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
if os.path.isfile(os.path.join(comlocation, 'cover.jpg')) is True:
|
||||
logger.fdebug('Cover already exists for series. Not redownloading.')
|
||||
else:
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImage'])
|
||||
if covercheck == 'retry':
|
||||
logger.info('Attempting to retrieve alternate comic image for the series.')
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImageALT'])
|
||||
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if mylar.CONFIG.COMIC_COVER_LOCAL and os.path.isdir(comlocation):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
logger.error('Unable to save cover (' + str(coverfile) + ') into series directory (' + str(comiclocal) + ') at this time.')
|
||||
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
|
||||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
logger.error('Unable to save cover (' + str(coverfile) + ') into series directory (' + str(comiclocal) + ') at this time.')
|
||||
else:
|
||||
ComicImage = None
|
||||
|
||||
|
@ -346,36 +362,13 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
if anndata:
|
||||
manualAnnual(annchk=anndata)
|
||||
|
||||
#let's download the image...
|
||||
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True:
|
||||
ls = helpers.issuedigits(importantdates['LatestIssue'])
|
||||
imagetopull = myDB.selectone('SELECT issueid from issues where ComicID=? AND Int_IssueNumber=?', [comicid, ls]).fetchone()
|
||||
imageurl = mylar.cv.getComic(comicid, 'image', issueid=imagetopull[0])
|
||||
covercheck = helpers.getImage(comicid, imageurl['image'])
|
||||
if covercheck == 'retry':
|
||||
logger.fdebug('Attempting to retrieve a different comic image for this particular issue.')
|
||||
if imageurl['image_alt'] is not None:
|
||||
covercheck = helpers.getImage(comicid, imageurl['image_alt'])
|
||||
else:
|
||||
if not os.path.isfile(os.path.join(mylar.CACHE_DIR, str(comicid) + '.jpg')):
|
||||
logger.fdebug('Failed to retrieve issue image, possibly because not available. Reverting back to series image.')
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImage'])
|
||||
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
|
||||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if mylar.CONFIG.COMIC_COVER_LOCAL and os.path.isdir(comlocation):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
logger.error('Unable to save cover into series directory (%s) at this time' % comiclocal)
|
||||
myDB.upsert('comics', {'ComicImage': ComicImage}, {'ComicID': comicid})
|
||||
if all([mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True, lastissueid != importantdates['LatestIssueID']]):
|
||||
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
|
||||
else:
|
||||
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
|
||||
|
||||
if (mylar.CONFIG.CVINFO or (mylar.CONFIG.CV_ONLY and mylar.CONFIG.CVINFO)) and os.path.isdir(comlocation):
|
||||
if not os.path.exists(os.path.join(comlocation, "cvinfo")) or mylar.CONFIG.CV_ONETIMER:
|
||||
if os.path.isfile(os.path.join(comlocation, "cvinfo")) is False:
|
||||
with open(os.path.join(comlocation, "cvinfo"), "w") as text_file:
|
||||
text_file.write(str(comic['ComicURL']))
|
||||
|
||||
|
@ -1086,6 +1079,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
|
||||
latestiss = "0"
|
||||
latestdate = "0000-00-00"
|
||||
latestissueid = None
|
||||
firstiss = "10000000"
|
||||
firstdate = "2099-00-00"
|
||||
#print ("total issues:" + str(iscnt))
|
||||
|
@ -1095,7 +1089,6 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
while (n <= iscnt):
|
||||
try:
|
||||
firstval = issued['issuechoice'][n]
|
||||
#print firstval
|
||||
except IndexError:
|
||||
break
|
||||
try:
|
||||
|
@ -1104,7 +1097,6 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
cleanname = 'None'
|
||||
issid = str(firstval['Issue_ID'])
|
||||
issnum = firstval['Issue_Number']
|
||||
#logger.info("issnum: " + str(issnum))
|
||||
issname = cleanname
|
||||
issdate = str(firstval['Issue_Date'])
|
||||
storedate = str(firstval['Store_Date'])
|
||||
|
@ -1125,7 +1117,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
int_issnum = (int(issnum[:-3]) * 1000) + ord('m') + ord('u')
|
||||
elif u'\xbd' in issnum:
|
||||
int_issnum = .5 * 1000
|
||||
logger.info('1/2 issue detected :' + issnum + ' === ' + str(int_issnum))
|
||||
logger.fdebug('1/2 issue detected :' + issnum + ' === ' + str(int_issnum))
|
||||
elif u'\xbc' in issnum:
|
||||
int_issnum = .25 * 1000
|
||||
elif u'\xbe' in issnum:
|
||||
|
@ -1170,7 +1162,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
x = float(issnum)
|
||||
#validity check
|
||||
if x < 0:
|
||||
logger.info('I have encountered a negative issue #: ' + str(issnum) + '. Trying to accomodate.')
|
||||
logger.fdebug('I have encountered a negative issue #: ' + str(issnum) + '. Trying to accomodate.')
|
||||
logger.fdebug('value of x is : ' + str(x))
|
||||
int_issnum = (int(x) *1000) - 1
|
||||
else: raise ValueError
|
||||
|
@ -1239,12 +1231,18 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
#logger.fdebug('latest date: ' + str(latestdate))
|
||||
#logger.fdebug('first date: ' + str(firstdate))
|
||||
#logger.fdebug('issue date: ' + str(firstval['Issue_Date']))
|
||||
if firstval['Issue_Date'] >= latestdate:
|
||||
#logger.fdebug('issue date: ' + storedate)
|
||||
if any([firstval['Issue_Date'] >= latestdate, storedate >= latestdate]):
|
||||
#logger.fdebug('date check hit for issue date > latestdate')
|
||||
if int_issnum > helpers.issuedigits(latestiss):
|
||||
#logger.fdebug('assigning latest issue to : ' + str(issnum))
|
||||
latestiss = issnum
|
||||
latestdate = str(firstval['Issue_Date'])
|
||||
latestissueid = issid
|
||||
if firstval['Issue_Date'] != '0000-00-00':
|
||||
latestdate = str(firstval['Issue_Date'])
|
||||
else:
|
||||
latestdate = storedate
|
||||
|
||||
if firstval['Issue_Date'] < firstdate and firstval['Issue_Date'] != '0000-00-00':
|
||||
firstiss = issnum
|
||||
firstdate = str(firstval['Issue_Date'])
|
||||
|
@ -1341,6 +1339,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
"ComicPublished": publishfigure,
|
||||
"NewPublish": newpublish,
|
||||
"LatestIssue": latestiss,
|
||||
"LatestIssueID": latestissueid,
|
||||
"LatestDate": latestdate,
|
||||
"LastUpdated": helpers.now()
|
||||
}
|
||||
|
@ -1350,6 +1349,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
|
||||
importantdates = {}
|
||||
importantdates['LatestIssue'] = latestiss
|
||||
importantdates['LatestIssueID'] = latestissueid
|
||||
importantdates['LatestDate'] = latestdate
|
||||
importantdates['LastPubDate'] = lastpubdate
|
||||
importantdates['SeriesStatus'] = 'Active'
|
||||
|
@ -1537,3 +1537,32 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, annualslis
|
|||
logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong')
|
||||
|
||||
#if this is called from the importer module, return the weeklyissue_check
|
||||
|
||||
def image_it(comicid, latestissueid, comlocation, ComicImage):
|
||||
#alternate series covers download latest image...
|
||||
|
||||
imageurl = mylar.cv.getComic(comicid, 'image', issueid=latestissueid)
|
||||
covercheck = helpers.getImage(comicid, imageurl['image'])
|
||||
if covercheck == 'retry':
|
||||
logger.fdebug('Attempting to retrieve a different comic image for this particular issue.')
|
||||
if imageurl['image_alt'] is not None:
|
||||
covercheck = helpers.getImage(comicid, imageurl['image_alt'])
|
||||
else:
|
||||
if not os.path.isfile(os.path.join(mylar.CACHE_DIR, str(comicid) + '.jpg')):
|
||||
logger.fdebug('Failed to retrieve issue image, possibly because not available. Reverting back to series image.')
|
||||
covercheck = helpers.getImage(comicid, ComicImage)
|
||||
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
|
||||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
logger.error('Unable to save cover into series directory (%s) at this time' % comiclocal)
|
||||
|
||||
myDB = db.DBConnection()
|
||||
myDB.upsert('comics', {'ComicImage': ComicImage}, {'ComicID': comicid})
|
||||
|
|
|
@ -77,37 +77,38 @@ if not LOG_LANG.startswith('en'):
|
|||
lg = logging.getLogger('mylar')
|
||||
lg.setLevel(logging.DEBUG)
|
||||
|
||||
self.filename = os.path.join(log_dir, self.filename)
|
||||
if log_dir is not None:
|
||||
self.filename = os.path.join(log_dir, self.filename)
|
||||
|
||||
#concurrentLogHandler/0.8.7 (to deal with windows locks)
|
||||
#since this only happens on windows boxes, if it's nix/mac use the default logger.
|
||||
if mylar.OS_DETECT == 'Windows':
|
||||
#set the path to the lib here - just to make sure it can detect cloghandler & portalocker.
|
||||
import sys
|
||||
sys.path.append(os.path.join(mylar.PROG_DIR, 'lib'))
|
||||
#concurrentLogHandler/0.8.7 (to deal with windows locks)
|
||||
#since this only happens on windows boxes, if it's nix/mac use the default logger.
|
||||
if mylar.OS_DETECT == 'Windows':
|
||||
#set the path to the lib here - just to make sure it can detect cloghandler & portalocker.
|
||||
import sys
|
||||
sys.path.append(os.path.join(mylar.PROG_DIR, 'lib'))
|
||||
|
||||
try:
|
||||
from ConcurrentLogHandler.cloghandler import ConcurrentRotatingFileHandler as RFHandler
|
||||
mylar.LOGTYPE = 'clog'
|
||||
except ImportError:
|
||||
try:
|
||||
from ConcurrentLogHandler.cloghandler import ConcurrentRotatingFileHandler as RFHandler
|
||||
mylar.LOGTYPE = 'clog'
|
||||
except ImportError:
|
||||
mylar.LOGTYPE = 'log'
|
||||
from logging.handlers import RotatingFileHandler as RFHandler
|
||||
else:
|
||||
mylar.LOGTYPE = 'log'
|
||||
from logging.handlers import RotatingFileHandler as RFHandler
|
||||
else:
|
||||
mylar.LOGTYPE = 'log'
|
||||
from logging.handlers import RotatingFileHandler as RFHandler
|
||||
|
||||
filehandler = RFHandler(
|
||||
self.filename,
|
||||
maxBytes=max_logsize,
|
||||
backupCount=max_logfiles)
|
||||
filehandler = RFHandler(
|
||||
self.filename,
|
||||
maxBytes=max_logsize,
|
||||
backupCount=max_logfiles)
|
||||
|
||||
filehandler.setLevel(logging.DEBUG)
|
||||
filehandler.setLevel(logging.DEBUG)
|
||||
|
||||
fileformatter = logging.Formatter('%(asctime)s - %(levelname)-7s :: %(message)s', '%d-%b-%Y %H:%M:%S')
|
||||
fileformatter = logging.Formatter('%(asctime)s - %(levelname)-7s :: %(message)s', '%d-%b-%Y %H:%M:%S')
|
||||
|
||||
filehandler.setFormatter(fileformatter)
|
||||
lg.addHandler(filehandler)
|
||||
self.filehandler = filehandler
|
||||
filehandler.setFormatter(fileformatter)
|
||||
lg.addHandler(filehandler)
|
||||
self.filehandler = filehandler
|
||||
|
||||
if loglevel:
|
||||
consolehandler = logging.StreamHandler()
|
||||
|
|
|
@ -74,7 +74,12 @@ def pullsearch(comicapi, comicquery, offset, type):
|
|||
logger.warn('Error fetching data from ComicVine: %s' % (e))
|
||||
return
|
||||
|
||||
dom = parseString(r.content) #(data)
|
||||
try:
|
||||
dom = parseString(r.content) #(data)
|
||||
except ExpatError:
|
||||
logger.warn('[WARNING] ComicVine is not responding correctly at the moment. This is usually due to some problems on their end. If you re-try things again in a few moments, it might work properly.')
|
||||
return
|
||||
|
||||
return dom
|
||||
|
||||
def findComic(name, mode, issue, limityear=None, type=None):
|
||||
|
|
|
@ -38,7 +38,7 @@ from base64 import b16encode, b32decode
|
|||
from operator import itemgetter
|
||||
from wsgiref.handlers import format_date_time
|
||||
|
||||
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None, allow_packs=None, oneoff=False, manual=False):
|
||||
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None, allow_packs=None, oneoff=False, manual=False, torrentid_32p=None):
|
||||
|
||||
mylar.COMICINFO = []
|
||||
unaltered_ComicName = None
|
||||
|
@ -307,7 +307,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
#sure it's not disabled (it gets auto-disabled on maxing out the API hits)
|
||||
prov_count+=1
|
||||
continue
|
||||
elif all([searchprov == '32P', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]):
|
||||
elif all([searchprov == '32P', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]):
|
||||
prov_count+=1
|
||||
continue
|
||||
if searchmode == 'rss':
|
||||
|
@ -331,7 +331,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
break
|
||||
|
||||
else:
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host)
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p)
|
||||
if all([searchprov == '32P', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]):
|
||||
checked_once = True
|
||||
if findit['status'] is False:
|
||||
|
@ -343,7 +343,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
for calt in chkthealt:
|
||||
AS_Alternate = re.sub('##', '', calt)
|
||||
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate))
|
||||
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host)
|
||||
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p)
|
||||
if findit['status'] is True:
|
||||
break
|
||||
if findit['status'] is True:
|
||||
|
@ -403,7 +403,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
|
||||
return findit, 'None'
|
||||
|
||||
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None, oneoff=False, cmloopit=None, manual=False, torznab_host=None):
|
||||
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None, oneoff=False, cmloopit=None, manual=False, torznab_host=None, torrentid_32p=None):
|
||||
|
||||
if any([allow_packs is None, allow_packs == 'None', allow_packs == 0, allow_packs == '0']) and all([mylar.CONFIG.ENABLE_TORRENT_SEARCH, mylar.CONFIG.ENABLE_32P]):
|
||||
allow_packs = False
|
||||
|
@ -597,11 +597,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
if nzbprov == '':
|
||||
bb = "no results"
|
||||
if nzbprov == '32P':
|
||||
if all([mylar.CONFIG.MODE_32P == 1,mylar.CONFIG.ENABLE_32P]):
|
||||
if all([mylar.CONFIG.MODE_32P == 1, mylar.CONFIG.ENABLE_32P is True]):
|
||||
if ComicName[:17] == '0-Day Comics Pack':
|
||||
searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7]}
|
||||
else:
|
||||
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
|
||||
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p}
|
||||
#first we find the id on the serieslist of 32P
|
||||
#then we call the ajax against the id and issue# and volume (if exists)
|
||||
a = auth32p.info32p(searchterm=searchterm)
|
||||
|
@ -954,7 +954,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
continue
|
||||
|
||||
#convert it to a Thu, 06 Feb 2014 00:00:00 format
|
||||
issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')
|
||||
issue_converted = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')
|
||||
issue_convert = issue_converted + datetime.timedelta(days=-1)
|
||||
# to get past different locale's os-dependent dates, let's convert it to a generic datetime format
|
||||
try:
|
||||
stamp = time.mktime(issue_convert.timetuple())
|
||||
|
@ -980,6 +981,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
try:
|
||||
#try new method to get around issues populating in a diff timezone thereby putting them in a different day.
|
||||
if dateconv2.date() < econv2.date():
|
||||
logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), econv2.date()))
|
||||
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
|
||||
continue
|
||||
else:
|
||||
|
@ -987,6 +989,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
except:
|
||||
#if the above fails, drop down to the integer compare method as a failsafe.
|
||||
if postdate_int < issuedate_int:
|
||||
logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int))
|
||||
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
|
||||
continue
|
||||
else:
|
||||
|
@ -1986,6 +1989,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
AlternateSearch = None
|
||||
UseFuzzy = None
|
||||
ComicVersion = comic['Volume']
|
||||
TorrentID_32p = None
|
||||
else:
|
||||
Comicname_filesafe = comic['ComicName_Filesafe']
|
||||
SeriesYear = comic['ComicYear']
|
||||
|
@ -1993,6 +1997,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
AlternateSearch = comic['AlternateSearch']
|
||||
UseFuzzy = comic['UseFuzzy']
|
||||
ComicVersion = comic['ComicVersion']
|
||||
TorrentID_32p = comic['TorrentID_32P']
|
||||
if any([comic['AllowPacks'] == 1, comic['AllowPacks'] == '1']):
|
||||
AllowPacks = True
|
||||
|
||||
|
@ -2005,7 +2010,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
ComicYear = str(result['IssueDate'])[:4]
|
||||
|
||||
mode = result['mode']
|
||||
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), SeriesYear, Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=result['SARC'], IssueArcID=result['IssueArcID'], mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=Comicname_filesafe, allow_packs=AllowPacks, oneoff=OneOff)
|
||||
foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), SeriesYear, Publisher, IssueDate, StoreDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=result['SARC'], IssueArcID=result['IssueArcID'], mode=mode, rsscheck=rsscheck, ComicID=result['ComicID'], filesafe=Comicname_filesafe, allow_packs=AllowPacks, oneoff=OneOff, torrentid_32p=TorrentID_32p)
|
||||
if foundNZB['status'] is True:
|
||||
#logger.info(foundNZB)
|
||||
updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov, SARC=result['SARC'], IssueArcID=result['IssueArcID'], hash=foundNZB['info']['t_hash'])
|
||||
|
@ -2045,7 +2050,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
SARC = result['StoryArc']
|
||||
IssueArcID = issueid
|
||||
actissueid = None
|
||||
|
||||
TorrentID_32p = None
|
||||
else:
|
||||
comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
|
||||
Comicname_filesafe = comic['ComicName_Filesafe']
|
||||
|
@ -2059,6 +2064,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
SARC = None
|
||||
IssueArcID = None
|
||||
actissueid = issueid
|
||||
TorrentID_32p = comic['TorrentID_32P']
|
||||
if any([comic['AllowPacks'] == 1, comic['AllowPacks'] == '1']):
|
||||
allow_packs = True
|
||||
|
||||
|
@ -2070,12 +2076,12 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
else:
|
||||
IssueYear = str(result['IssueDate'])[:4]
|
||||
|
||||
foundNZB, prov = search_init(ComicName, IssueNumber, str(IssueYear), SeriesYear, Publisher, IssueDate, StoreDate, actissueid, AlternateSearch, UseFuzzy, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, mode=mode, rsscheck=rsscheck, ComicID=ComicID, filesafe=Comicname_filesafe, allow_packs=allow_packs, oneoff=oneoff, manual=manual)
|
||||
foundNZB, prov = search_init(ComicName, IssueNumber, str(IssueYear), SeriesYear, Publisher, IssueDate, StoreDate, actissueid, AlternateSearch, UseFuzzy, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, mode=mode, rsscheck=rsscheck, ComicID=ComicID, filesafe=Comicname_filesafe, allow_packs=allow_packs, oneoff=oneoff, manual=manual, torrentid_32p=TorrentID_32p)
|
||||
if manual is True:
|
||||
return foundNZB
|
||||
if foundNZB['status'] is True:
|
||||
logger.fdebug("I found " + comic['ComicName'] + ' #:' + str(result['Issue_Number']))
|
||||
updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov, SARC=result['SARC'], IssueArcID=result['IssueArcID'], hash=foundNZB['info']['t_hash'])
|
||||
updater.foundsearch(ComicID, actissueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID, hash=foundNZB['info']['t_hash'])
|
||||
|
||||
else:
|
||||
if rsscheck:
|
||||
|
@ -2111,6 +2117,7 @@ def searchIssueIDList(issuelist):
|
|||
Publisher = comic['ComicPublisher']
|
||||
UseFuzzy = comic['UseFuzzy']
|
||||
ComicVersion = comic['ComicVersion']
|
||||
TorrentID_32p = comic['TorrentID_32P']
|
||||
if issue['IssueDate'] == None:
|
||||
IssueYear = comic['ComicYear']
|
||||
else:
|
||||
|
@ -2120,7 +2127,7 @@ def searchIssueIDList(issuelist):
|
|||
else:
|
||||
AllowPacks = False
|
||||
|
||||
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks)
|
||||
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p)
|
||||
if foundNZB['status'] is True:
|
||||
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash'])
|
||||
logger.info('Completed search request.')
|
||||
|
|
|
@ -1556,6 +1556,7 @@ def totals(ComicID, havefiles=None, totalfiles=None, module=None, issueid=None,
|
|||
if module is None:
|
||||
module = '[FILE-RESCAN]'
|
||||
myDB = db.DBConnection()
|
||||
filetable = 'issues'
|
||||
if any([havefiles is None, havefiles == '+1']):
|
||||
if havefiles is None:
|
||||
hf = myDB.selectone("SELECT Have, Total FROM comics WHERE ComicID=?", [ComicID]).fetchone()
|
||||
|
@ -1565,6 +1566,7 @@ def totals(ComicID, havefiles=None, totalfiles=None, module=None, issueid=None,
|
|||
hf = myDB.selectone("SELECT a.Have, a.Total, b.Status as IssStatus FROM comics AS a INNER JOIN issues as b ON a.ComicID=b.ComicID WHERE b.IssueID=?", [issueid]).fetchone()
|
||||
if hf is None:
|
||||
hf = myDB.selectone("SELECT a.Have, a.Total, b.Status as IssStatus FROM comics AS a INNER JOIN annuals as b ON a.ComicID=b.ComicID WHERE b.IssueID=?", [issueid]).fetchone()
|
||||
filetable = 'annuals'
|
||||
totalfiles = int(hf['Total'])
|
||||
logger.fdebug('totalfiles: %s' % totalfiles)
|
||||
logger.fdebug('status: %s' % hf['IssStatus'])
|
||||
|
@ -1585,4 +1587,4 @@ def totals(ComicID, havefiles=None, totalfiles=None, module=None, issueid=None,
|
|||
controlValueStat = {"IssueID": issueid,
|
||||
"ComicID": ComicID}
|
||||
newValueStat = {"ComicSize": os.path.getsize(file)}
|
||||
myDB.upsert("issues", newValueStat, controlValueStat)
|
||||
myDB.upsert(filetable, newValueStat, controlValueStat)
|
||||
|
|
|
@ -187,6 +187,7 @@ class WebInterface(object):
|
|||
"delete_dir": helpers.checked(mylar.CONFIG.DELETE_REMOVE_DIR),
|
||||
"allow_packs": helpers.checked(int(allowpacks)),
|
||||
"corrected_seriesyear": comic['ComicYear'],
|
||||
"torrentid_32p": comic['TorrentID_32P'],
|
||||
"totalissues": totalissues,
|
||||
"haveissues": haveissues,
|
||||
"percent": percent,
|
||||
|
@ -285,7 +286,13 @@ class WebInterface(object):
|
|||
logger.error('Unable to perform required story-arc search for : [arc: ' + name + '][mode: ' + mode + ']')
|
||||
return
|
||||
|
||||
searchresults = sorted(searchresults, key=itemgetter('comicyear', 'issues'), reverse=True)
|
||||
try:
|
||||
searchresults = sorted(searchresults, key=itemgetter('comicyear', 'issues'), reverse=True)
|
||||
except Exception as e:
|
||||
logger.error('Unable to retrieve results from ComicVine: %s' % e)
|
||||
if mylar.COMICVINE_API is None:
|
||||
logger.error('You NEED to set a ComicVine API key prior to adding anything. It\'s Free - Go get one!')
|
||||
return
|
||||
return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type, imported=None, ogcname=None, name=name, serinfo=serinfo)
|
||||
searchit.exposed = True
|
||||
|
||||
|
@ -1371,6 +1378,7 @@ class WebInterface(object):
|
|||
AllowPacks= cdname['AllowPacks']
|
||||
ComicVersion = cdname['ComicVersion']
|
||||
ComicName = cdname['ComicName']
|
||||
TorrentID_32p = cdname['TorrentID_32P']
|
||||
controlValueDict = {"IssueID": IssueID}
|
||||
newStatus = {"Status": "Wanted"}
|
||||
if mode == 'want':
|
||||
|
@ -1416,7 +1424,7 @@ class WebInterface(object):
|
|||
#Publisher = miy['ComicPublisher']
|
||||
#UseAFuzzy = miy['UseFuzzy']
|
||||
#ComicVersion = miy['ComicVersion']
|
||||
foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID, manualsearch=manualsearch, filesafe=ComicName_Filesafe, allow_packs=AllowPacks)
|
||||
foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID, manualsearch=manualsearch, filesafe=ComicName_Filesafe, allow_packs=AllowPacks, torrentid_32p=TorrentID_32p)
|
||||
if foundcom['status'] is True:
|
||||
# file check to see if issue exists and update 'have' count
|
||||
if IssueID is not None:
|
||||
|
@ -4670,8 +4678,13 @@ class WebInterface(object):
|
|||
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
|
||||
manual_annual_add.exposed = True
|
||||
|
||||
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None):
|
||||
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None, torrentid_32p=None):
|
||||
myDB = db.DBConnection()
|
||||
chk1 = myDB.selectone('SELECT ComicLocation FROM comics WHERE ComicID=?', [ComicID]).fetchone()
|
||||
if chk1 is None:
|
||||
orig_location = com_location
|
||||
else:
|
||||
orig_location = chk1['ComicLocation']
|
||||
#--- this is for multiple search terms............
|
||||
#--- works, just need to redo search.py to accomodate multiple search terms
|
||||
ffs_alt = []
|
||||
|
@ -4732,22 +4745,31 @@ class WebInterface(object):
|
|||
else:
|
||||
newValues['AllowPacks'] = 1
|
||||
|
||||
newValues['TorrentID_32P'] = torrentid_32p
|
||||
|
||||
if alt_filename is None or alt_filename == 'None':
|
||||
newValues['AlternateFileName'] = "None"
|
||||
else:
|
||||
newValues['AlternateFileName'] = str(alt_filename)
|
||||
|
||||
#force the check/creation of directory com_location here
|
||||
if mylar.CONFIG.CREATE_FOLDERS is True:
|
||||
if any([mylar.CONFIG.CREATE_FOLDERS is True, os.path.isdir(orig_location)]):
|
||||
if os.path.isdir(str(com_location)):
|
||||
logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
|
||||
else:
|
||||
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
|
||||
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
|
||||
if not checkdirectory:
|
||||
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
|
||||
return
|
||||
|
||||
if orig_location != com_location:
|
||||
logger.fdebug('Renaming existing location [%s] to new location: %s' % (orig_location, com_location))
|
||||
try:
|
||||
os.rename(orig_location, com_location)
|
||||
except Exception as e:
|
||||
logger.warn('Unable to rename existing directory: %s' % e)
|
||||
return
|
||||
else:
|
||||
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
|
||||
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
|
||||
if not checkdirectory:
|
||||
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
|
||||
return
|
||||
myDB.upsert("comics", newValues, controlValueDict)
|
||||
logger.fdebug('Updated Series options!')
|
||||
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
|
||||
|
@ -4900,82 +4922,77 @@ class WebInterface(object):
|
|||
if sabapikey is None:
|
||||
sabapikey = mylar.CONFIG.SAB_APIKEY
|
||||
logger.fdebug('Now attempting to test SABnzbd connection')
|
||||
if mylar.USE_SABNZBD:
|
||||
|
||||
#if user/pass given, we can auto-fill the API ;)
|
||||
if sabusername is None or sabpassword is None:
|
||||
logger.error('No Username / Password provided for SABnzbd credentials. Unable to test API key')
|
||||
return "Invalid Username/Password provided"
|
||||
logger.fdebug('testing connection to SABnzbd @ ' + sabhost)
|
||||
if sabhost.endswith('/'):
|
||||
sabhost = sabhost
|
||||
else:
|
||||
sabhost = sabhost + '/'
|
||||
#if user/pass given, we can auto-fill the API ;)
|
||||
if sabusername is None or sabpassword is None:
|
||||
logger.error('No Username / Password provided for SABnzbd credentials. Unable to test API key')
|
||||
return "Invalid Username/Password provided"
|
||||
logger.fdebug('testing connection to SABnzbd @ ' + sabhost)
|
||||
if sabhost.endswith('/'):
|
||||
sabhost = sabhost
|
||||
else:
|
||||
sabhost = sabhost + '/'
|
||||
|
||||
querysab = sabhost + 'api'
|
||||
payload = {'mode': 'get_config',
|
||||
'section': 'misc',
|
||||
'output': 'json',
|
||||
'keyword': 'api_key',
|
||||
'apikey': sabapikey}
|
||||
querysab = sabhost + 'api'
|
||||
payload = {'mode': 'get_config',
|
||||
'section': 'misc',
|
||||
'output': 'json',
|
||||
'keyword': 'api_key',
|
||||
'apikey': sabapikey}
|
||||
|
||||
if sabhost.startswith('https'):
|
||||
verify = True
|
||||
else:
|
||||
verify = False
|
||||
|
||||
try:
|
||||
r = requests.get(querysab, params=payload, verify=verify)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (querysab, e))
|
||||
if requests.exceptions.SSLError:
|
||||
logger.warn('Cannot verify ssl certificate. Attempting to authenticate with no ssl-certificate verification.')
|
||||
try:
|
||||
from requests.packages.urllib3 import disable_warnings
|
||||
disable_warnings()
|
||||
except:
|
||||
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
|
||||
|
||||
if sabhost.startswith('https'):
|
||||
verify = True
|
||||
else:
|
||||
verify = False
|
||||
|
||||
try:
|
||||
r = requests.get(querysab, params=payload, verify=verify)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (querysab, e))
|
||||
if requests.exceptions.SSLError:
|
||||
logger.warn('Cannot verify ssl certificate. Attempting to authenticate with no ssl-certificate verification.')
|
||||
try:
|
||||
from requests.packages.urllib3 import disable_warnings
|
||||
disable_warnings()
|
||||
except:
|
||||
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
|
||||
|
||||
verify = False
|
||||
|
||||
try:
|
||||
r = requests.get(querysab, params=payload, verify=verify)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
|
||||
return 'Unable to retrieve data from SABnzbd'
|
||||
else:
|
||||
try:
|
||||
r = requests.get(querysab, params=payload, verify=verify)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
|
||||
return 'Unable to retrieve data from SABnzbd'
|
||||
|
||||
|
||||
logger.info('status code: ' + str(r.status_code))
|
||||
|
||||
if str(r.status_code) != '200':
|
||||
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
data = False
|
||||
else:
|
||||
data = r.json()
|
||||
return 'Unable to retrieve data from SABnzbd'
|
||||
|
||||
try:
|
||||
q_apikey = data['config']['misc']['api_key']
|
||||
except:
|
||||
logger.error('Error detected attempting to retrieve SAB data using FULL APIKey')
|
||||
if all([sabusername is not None, sabpassword is not None]):
|
||||
try:
|
||||
sp = sabparse.sabnzbd(sabhost, sabusername, sabpassword)
|
||||
q_apikey = sp.sab_get()
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
|
||||
if q_apikey is None:
|
||||
return "Invalid APIKey provided"
|
||||
|
||||
mylar.CONFIG.SAB_APIKEY = q_apikey
|
||||
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
|
||||
logger.info('status code: ' + str(r.status_code))
|
||||
|
||||
logger.info('Connection to SABnzbd tested sucessfully')
|
||||
return "Successfully verified APIkey"
|
||||
if str(r.status_code) != '200':
|
||||
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
data = False
|
||||
else:
|
||||
logger.error('You do not have anything stated for SAB Host. Please correct and try again.')
|
||||
return "Invalid SABnzbd host specified"
|
||||
data = r.json()
|
||||
|
||||
try:
|
||||
q_apikey = data['config']['misc']['api_key']
|
||||
except:
|
||||
logger.error('Error detected attempting to retrieve SAB data using FULL APIKey')
|
||||
if all([sabusername is not None, sabpassword is not None]):
|
||||
try:
|
||||
sp = sabparse.sabnzbd(sabhost, sabusername, sabpassword)
|
||||
q_apikey = sp.sab_get()
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
|
||||
if q_apikey is None:
|
||||
return "Invalid APIKey provided"
|
||||
|
||||
mylar.CONFIG.SAB_APIKEY = q_apikey
|
||||
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
|
||||
logger.info('Connection to SABnzbd tested sucessfully')
|
||||
return "Successfully verified APIkey"
|
||||
SABtest.exposed = True
|
||||
|
||||
def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None):
|
||||
|
|
|
@ -57,6 +57,8 @@ class wwt(object):
|
|||
pagelist = resultpages.findAll("a")
|
||||
except:
|
||||
logger.info('No results found for %s' % self.query)
|
||||
return
|
||||
|
||||
pages = []
|
||||
for p in pagelist:
|
||||
if p['href'] not in pages:
|
||||
|
|
|
@ -11,7 +11,7 @@ except ImportError:
|
|||
print "requests to bypass this in the future (ie. pip install requests)"
|
||||
use_requests = False
|
||||
|
||||
apc_version = "2.02"
|
||||
apc_version = "2.04"
|
||||
|
||||
def processEpisode(dirName, nzbName=None):
|
||||
print "Your ComicRN.py script is outdated. I'll force this through, but Failed Download Handling and possible enhancements/fixes will not work and could cause errors."
|
||||
|
@ -93,7 +93,13 @@ def processIssue(dirName, nzbName=None, failed=False, comicrn_version=None):
|
|||
for line in result:
|
||||
print line
|
||||
|
||||
if any("Post Processing SUCCESSFUL" in s for s in result.split('\n')):
|
||||
return 0
|
||||
if type(result) == list:
|
||||
if any("Post Processing SUCCESSFUL" in s for s in result):
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
else:
|
||||
return 1
|
||||
if any("Post Processing SUCCESSFUL" in s for s in result.split('\n')):
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
|
Loading…
Reference in New Issue