diff --git a/bs4/builder/_lxml.py b/bs4/builder/_lxml.py deleted file mode 100644 index c78fdff6..00000000 --- a/bs4/builder/_lxml.py +++ /dev/null @@ -1,179 +0,0 @@ -__all__ = [ - 'LXMLTreeBuilderForXML', - 'LXMLTreeBuilder', - ] - -from StringIO import StringIO -import collections -from lxml import etree -from bs4.element import Comment, Doctype, NamespacedAttribute -from bs4.builder import ( - FAST, - HTML, - HTMLTreeBuilder, - PERMISSIVE, - TreeBuilder, - XML) -from bs4.dammit import UnicodeDammit - -LXML = 'lxml' - -class LXMLTreeBuilderForXML(TreeBuilder): - DEFAULT_PARSER_CLASS = etree.XMLParser - - is_xml = True - - # Well, it's permissive by XML parser standards. - features = [LXML, XML, FAST, PERMISSIVE] - - CHUNK_SIZE = 512 - - @property - def default_parser(self): - # This can either return a parser object or a class, which - # will be instantiated with default arguments. - return etree.XMLParser(target=self, strip_cdata=False, recover=True) - - def __init__(self, parser=None, empty_element_tags=None): - if empty_element_tags is not None: - self.empty_element_tags = set(empty_element_tags) - if parser is None: - # Use the default parser. - parser = self.default_parser - if isinstance(parser, collections.Callable): - # Instantiate the parser with default arguments - parser = parser(target=self, strip_cdata=False) - self.parser = parser - self.soup = None - self.nsmaps = None - - def _getNsTag(self, tag): - # Split the namespace URL out of a fully-qualified lxml tag - # name. Copied from lxml's src/lxml/sax.py. - if tag[0] == '{': - return tuple(tag[1:].split('}', 1)) - else: - return (None, tag) - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None): - """ - :return: A 3-tuple (markup, original encoding, encoding - declared within markup). - """ - if isinstance(markup, unicode): - return markup, None, None, False - - try_encodings = [user_specified_encoding, document_declared_encoding] - dammit = UnicodeDammit(markup, try_encodings, is_html=True) - return (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) - - def feed(self, markup): - if isinstance(markup, basestring): - markup = StringIO(markup) - # Call feed() at least once, even if the markup is empty, - # or the parser won't be initialized. - data = markup.read(self.CHUNK_SIZE) - self.parser.feed(data) - while data != '': - # Now call feed() on the rest of the data, chunk by chunk. - data = markup.read(self.CHUNK_SIZE) - if data != '': - self.parser.feed(data) - self.parser.close() - - def close(self): - self.nsmaps = None - - def start(self, name, attrs, nsmap={}): - # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. - attrs = dict(attrs) - - nsprefix = None - # Invert each namespace map as it comes in. - if len(nsmap) == 0 and self.nsmaps != None: - # There are no new namespaces for this tag, but namespaces - # are in play, so we need a separate tag stack to know - # when they end. - self.nsmaps.append(None) - elif len(nsmap) > 0: - # A new namespace mapping has come into play. - if self.nsmaps is None: - self.nsmaps = [] - inverted_nsmap = dict((value, key) for key, value in nsmap.items()) - self.nsmaps.append(inverted_nsmap) - # Also treat the namespace mapping as a set of attributes on the - # tag, so we can recreate it later. - attrs = attrs.copy() - for prefix, namespace in nsmap.items(): - attribute = NamespacedAttribute( - "xmlns", prefix, "http://www.w3.org/2000/xmlns/") - attrs[attribute] = namespace - namespace, name = self._getNsTag(name) - if namespace is not None: - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - nsprefix = inverted_nsmap[namespace] - break - self.soup.handle_starttag(name, namespace, nsprefix, attrs) - - def end(self, name): - self.soup.endData() - completed_tag = self.soup.tagStack[-1] - namespace, name = self._getNsTag(name) - nsprefix = None - if namespace is not None: - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - nsprefix = inverted_nsmap[namespace] - break - self.soup.handle_endtag(name, nsprefix) - if self.nsmaps != None: - # This tag, or one of its parents, introduced a namespace - # mapping, so pop it off the stack. - self.nsmaps.pop() - if len(self.nsmaps) == 0: - # Namespaces are no longer in play, so don't bother keeping - # track of the namespace stack. - self.nsmaps = None - - def pi(self, target, data): - pass - - def data(self, content): - self.soup.handle_data(content) - - def doctype(self, name, pubid, system): - self.soup.endData() - doctype = Doctype.for_name_and_ids(name, pubid, system) - self.soup.object_was_parsed(doctype) - - def comment(self, content): - "Handle comments as Comment objects." - self.soup.endData() - self.soup.handle_data(content) - self.soup.endData(Comment) - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'\n%s' % fragment - - -class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): - - features = [LXML, HTML, FAST, PERMISSIVE] - is_xml = False - - @property - def default_parser(self): - return etree.HTMLParser - - def feed(self, markup): - self.parser.feed(markup) - self.parser.close() - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'%s' % fragment diff --git a/cache/blankcover.jpg b/cache/blankcover.jpg deleted file mode 100644 index 43472a43..00000000 Binary files a/cache/blankcover.jpg and /dev/null differ diff --git a/data/interfaces/default/artistredone.html b/data/interfaces/default/artistredone.html deleted file mode 100755 index 6d40a0cf..00000000 --- a/data/interfaces/default/artistredone.html +++ /dev/null @@ -1,623 +0,0 @@ -<%inherit file="base.html"/> -<%! - import os - from mylar import db - import mylar -%> - -<%def name="headerIncludes()"> -
-
- Refresh Comic - Delete Comic - %if mylar.RENAME_FILES: - Rename Files - %endif - Recheck Files - %if comic['Status'] == 'Paused': - Resume Comic - %else: - Pause Comic - %endif - -
-
- -<%def name="body()"> - -
-

- %if comic['Status'] == 'Loading': - loading - %endif -
- - ${comic['ComicName']} (${comic['ComicYear']}) - %if comic['Status'] == 'Loading': -

(Comic information is currently being loaded)

- %endif -
-

-
- - -
- -
- -
- -
- - - - - - - -
-
-
- -
-
-
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %elif comic['ComicPublisher'] == 'Image': - Image - %elif comic['ComicPublisher'] == 'Dark Horse Comics' or comic['ComicPublisher'] == 'Dark Horse': - Darkhorse - %elif comic['ComicPublisher'] == 'IDW Publishing': - IDW - %elif comic['ComicPublisher'] == 'Icon': - Icon - %elif comic['ComicPublisher'] == 'Red5': - Red5 - %elif comic['ComicPublisher'] == 'Vertigo': - Vertigo - %elif comic['ComicPublisher'] == 'ShadowLine': - Shadowline - %elif comic['ComicPublisher'] == 'Archie Comics': - Archie - %elif comic['ComicPublisher'] == 'Oni Press': - Oni Press - %elif comic['ComicPublisher'] == 'Tokyopop': - Tokyopop - %elif comic['ComicPublisher'] == 'Midtown Comics': - Midtown - %elif comic['ComicPublisher'] == 'Boom! Studios': - Boom! - %elif comic['ComicPublisher'] == 'Skybound': - Skybound - %elif comic['ComicPublisher'] == 'Vertigo': - Dynamite - %elif comic['ComicPublisher'] == 'Top Cow': - Top Cow - %elif comic['ComicPublisher'] == 'Dynamite Entertainment': - Dynamite - %elif comic['ComicPublisher'] == 'Cartoon Books': - Cartoon Books - %endif -
-
- -
-
- -
-
- -
-
- -
-
- -
-
- -
-
-
- -
-
- -
-
-
- - - - - -
-
-
- -
-
-
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %elif comic['ComicPublisher'] == 'Image': - Image - %elif comic['ComicPublisher'] == 'Dark Horse Comics': - Darkhorse - %elif comic['ComicPublisher'] == 'IDW Publishing': - IDW - %endif -
-
- -
-
- -
-
- -
-
- -
-
- -
- <% - if comic['UseFuzzy'] == "0" or comic['UseFuzzy'] is None: - fuzzy = "None" - fuzzy_year = "0" - elif comic['UseFuzzy'] == "1": - fuzzy = "Remove Year" - fuzzy_year = "1" - elif comic['UseFuzzy'] == "2": - fuzzy = "Fuzzy Year" - fuzzy_year = "2" - - %> - -
- -
- -
-
-
-
- - - - - -
-
-
- -
-
-
- %if comic['ComicPublisher'] == 'DC Comics': - DC - %elif comic['ComicPublisher'] == 'Marvel': - Marvel - %elif comic['ComicPublisher'] == 'Image': - Image - %elif comic['ComicPublisher'] == 'Dark Horse Comics': - Darkhorse - %elif comic['ComicPublisher'] == 'IDW Publishing': - IDW - %endif - -
- -


-
-
- -
-
the directory where all the comics are located for this particular comic
-
-
- -
-
Alternate comic names to be searched in case naming is different (ie. Hack/Slash = hack-slash)
-
- -
-
-
- -
- - - -
- - -
-
-
-
-
- - %if annuals: - -
- - %endif - - - -
-
- - - - -
-
- - %if annuals: -
- %endif -
-
-
Mark selected issues as - - selected issues - - -
- - - - - - - - - - - - - - - %for issue in issues: - <% - if issue['Status'] == 'Skipped': - grade = 'Z' - elif issue['Status'] == 'Wanted': - grade = 'X' - elif issue['Status'] == 'Snatched': - grade = 'C' - elif issue['Status'] == 'Downloaded': - grade = 'A' - elif issue['Status'] == 'Archived': - grade = 'A' - else: - grade = 'A' - %> - - - - - - - - - - - %endfor - -
IntIssNumNumberNameDateStatusOptions
${issue['Int_IssueNumber']}${issue['Issue_Number']}${issue['IssueName']}${issue['IssueDate']}${issue['Status']} - %if issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived': - <%Csize = mylar.helpers.human_size(issue['ComicSize'])%> - - %endif - - %if issue['Status'] == 'Skipped': - - %elif (issue['Status'] == 'Wanted'): - - %elif (issue['Status'] == 'Snatched'): - - - %elif (issue['Status'] == 'Downloaded'): - - %if issue['inCacheDIR']: - <% - try: - with open(os.path.join(mylar.CACHE_DIR,issue['Location'])) as f: - linky = issue['Location'] - except IOError as e: - linky = None - %> - %if linky: - - %else: - - %endif - %else: - - %endif - - %else: - - - %endif - -
-
-
- %if annuals: -
-
- - - - - - - - - - - - - - - - %for annual in annuals: - <% - if annual['Status'] == 'Skipped': - grade = 'Z' - elif annual['Status'] == 'Wanted': - grade = 'X' - elif annual['Status'] == 'Snatched': - grade = 'C' - elif annual['Status'] == 'Downloaded': - grade = 'A' - elif annual['Status'] == 'Archived': - grade = 'A' - else: - grade = 'A' - %> - - - - - - - - - - - - %endfor - -
NumberNameDateStatusOptions
${annual['Issue_Number']}${annual['IssueName']}${annual['IssueDate']}${annual['Status']} - %if annual['Status'] == 'Downloaded' or annual['Status'] == 'Archived': - <%Csize = mylar.helpers.human_size(annual['ComicSize'])%> - - %endif - - %if annual['Status'] == 'Skipped': - - %elif (annual['Status'] == 'Wanted'): - - %elif (annual['Status'] == 'Snatched'): - - %elif (annual['Status'] == 'Read'): - - %else: - - - %endif - -
-
-
- %endif - - -<%def name="headIncludes()"> - - - %if comic['Status'] == 'Loading': - - %endif - - - -<%def name="javascriptIncludes()"> - - - - - - - diff --git a/data/interfaces/default/comicdetails.html b/data/interfaces/default/comicdetails.html index 5fcf8134..7d3930ef 100644 --- a/data/interfaces/default/comicdetails.html +++ b/data/interfaces/default/comicdetails.html @@ -180,7 +180,25 @@
- +
<% if comic['UseFuzzy'] == "0" or comic['UseFuzzy'] is None: @@ -238,13 +256,14 @@
-
+ +
Alternate comic names to be searched in case naming is different (ie. Hack/Slash = hack-slash)
+

-
@@ -267,6 +286,7 @@ +
@@ -314,6 +334,8 @@ grade = 'A' elif issue['Status'] == 'Archived': grade = 'A' + elif issue['Status'] == 'Ignored': + grade = 'A' else: grade = 'A' %> @@ -331,7 +353,7 @@ %endif - %if issue['Status'] == 'Skipped': + %if issue['Status'] == 'Skipped' or issue['Status'] == 'Ignored': %elif (issue['Status'] == 'Wanted'): @@ -358,8 +380,8 @@ %endif %else: - - + + %endif @@ -367,13 +389,13 @@ %endfor - + %if annuals:

Annuals

-
Mark selected issues as +
Mark selected annuals as - selected issues + selected annuals
@@ -446,19 +468,12 @@ %endif - %if annual['Status'] == 'Skipped': - - %elif (annual['Status'] == 'Wanted'): + <% amode = 'want_ann' %> + - %elif (annual['Status'] == 'Snatched'): - - %elif (annual['Status'] == 'Read'): - - %else: - - - %endif - + + + @@ -467,14 +482,14 @@ %endif - + <%def name="headIncludes()"> %if comic['Status'] == 'Loading': - + %endif @@ -637,8 +652,8 @@ } $(document).ready(function() { - $('#issue_table').dataTable(); - $('#annual_table').dataTable(); + $("issue_table").dataTable(); + $("annual_table").dataTable(); initThisPage(); }); diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html index 6c3a26d1..30ba215f 100755 --- a/data/interfaces/default/config.html +++ b/data/interfaces/default/config.html @@ -66,7 +66,7 @@
-
Coming Soon...
+
http://forum.mylarcomics.com
@@ -215,7 +215,7 @@
-
+
Where your SAB downloads go... (optional) @@ -307,6 +307,51 @@
+ Torrents +
+
+ +
+
+
+ +
+
+
+ +
+ Local Folder your torrent client watches +
+
+
+ +
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ +
+ Folder path your torrent seedbox client watches +
+
+
+
+ @@ -316,10 +361,18 @@ - + %endfor diff --git a/data/interfaces/default/images/torrent-icon.png b/data/interfaces/default/images/torrent-icon.png new file mode 100755 index 00000000..594fd020 Binary files /dev/null and b/data/interfaces/default/images/torrent-icon.png differ diff --git a/data/interfaces/default/logs.html b/data/interfaces/default/logs.html index 6a82809b..7ecadce8 100755 --- a/data/interfaces/default/logs.html +++ b/data/interfaces/default/logs.html @@ -9,21 +9,23 @@

LogsLogs

+
+
+ +
+
+ + (Mins) +
+
- NZB.SU -
- +
+ NZB.SU
@@ -330,9 +383,8 @@
- DOGNZB -
- +
+ DOGNZB
@@ -343,20 +395,31 @@
- NZBX -
- +
+ Use Experimental Search +
+
+ Note: this is an experimental search - results may be better/worse.
- EXPERIMENTAL -
- Note: this is an experimental search - results may be better/worse. +
+ Torrents
-
- +
+
+ +
+
+ +
+
+ + +
+
@@ -374,19 +437,23 @@ %> %for newznab in config['extra_newznabs']: <% - if newznab[2] == '1' or newznab[2] == 1: + if newznab[3] == '1' or newznab[3] == 1: newznab_enabled = "checked" else: newznab_enabled = "" %>
+
+ + +
- +
- +
@@ -427,7 +494,7 @@
- +
@@ -442,7 +509,13 @@ +
+ Duplicate Handling +
+ +
+
@@ -789,7 +862,7 @@ $("#add_newznab").click(function() { var intId = $("#newznab_providers > div").size() + deletedNewznabs + 1; - var formfields = $("
"); + var formfields = $("
"); var removeButton = $("
"); removeButton.click(function() { $(this).parent().remove(); @@ -812,6 +885,12 @@ initConfigCheckbox("#usenewznab"); initConfigCheckbox("#usenzbsu"); initConfigCheckbox("#usedognzb"); + initConfigCheckbox("#enable_torrents"); + initConfigCheckbox("#torrent_local"); + initConfigCheckbox("#torrent_seedbox"); + initConfigCheckbox("#enable_torrent_search"); + initConfigCheckbox("#enable_cbt"); + initConfigCheckbox("#enable_rss"); initConfigCheckbox("#useexperimental"); initConfigCheckbox("#useraw"); initConfigCheckbox("#replace_spaces"); diff --git a/data/interfaces/default/history.html b/data/interfaces/default/history.html index ae57248a..7ae9679d 100755 --- a/data/interfaces/default/history.html +++ b/data/interfaces/default/history.html @@ -61,7 +61,11 @@
${item['DateAdded']} ${item['ComicName']} ${item['Issue_Number']}${item['Status']}${item['Status']} + %if item['Provider'] == 'ComicBT' or item['Provider'] == 'KAT': + + %endif + [retry]
-
+
+
- + %for loglevel in ['Info', 'Warning', 'Debug']: <% - if interface == mylar.INTERFACE: + if loglevel == mylar.LOG_LEVEL: selected = 'selected="selected"' else: selected = '' %> - + %endfor -
- +
+ + diff --git a/data/interfaces/default/readlist.html b/data/interfaces/default/readlist.html index ecb92a2a..8166c3a0 100755 --- a/data/interfaces/default/readlist.html +++ b/data/interfaces/default/readlist.html @@ -70,7 +70,7 @@ %> - + @@ -111,8 +111,10 @@ "aaSorting": [] }).rowReordering({ - sURL:"/reOrder", - sRequestType: "GET" + sAjax: "reOrder", + fnAlert: function(text){ + alert("Order cannot be changed.\n" + text); + } }); resetFilters("item"); } diff --git a/data/interfaces/default/upcoming.html b/data/interfaces/default/upcoming.html index ca4178c0..5aa8a910 100755 --- a/data/interfaces/default/upcoming.html +++ b/data/interfaces/default/upcoming.html @@ -38,8 +38,16 @@ %for issue in issues: - - + + %endfor @@ -64,8 +72,8 @@ %for upcome in upcoming: - - + + diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py index a5fd88d5..8d6a0a21 100755 --- a/mylar/PostProcessor.py +++ b/mylar/PostProcessor.py @@ -307,17 +307,21 @@ class PostProcessor(object): logger.fdebug("issueid:" + str(issueid)) sarc = nzbiss['SARC'] #use issueid to get publisher, series, year, issue number + annchk = "no" if 'annual' in nzbname.lower(): logger.info("annual detected.") annchk = "yes" - issuenzb = myDB.action("SELECT * from annuals WHERE IssueID=?", [issueid]).fetchone() + issuenzb = myDB.action("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() else: - issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() + issuenzb = myDB.action("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone() + if issuenzb is not None: + logger.info("issuenzb found.") if helpers.is_number(issueid): sandwich = int(issuenzb['IssueID']) else: + logger.info("issuenzb not found.") #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing. if 'S' in issueid: @@ -418,8 +422,8 @@ class PostProcessor(object): return self.log - comicid = issuenzb['ComicID'] - issuenumOG = issuenzb['Issue_Number'] + comicid = issuenzb['ComicID'] + issuenumOG = issuenzb['Issue_Number'] if self.nzb_name == 'Manual Run': #loop through the hits here. @@ -440,10 +444,15 @@ class PostProcessor(object): extensions = ('.cbr', '.cbz') myDB = db.DBConnection() comicnzb = myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone() - issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone() + issuenzb = myDB.action("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone() + print "issueid: " + str(issueid) + print "issuenumOG: " + str(issuenumOG) if issuenzb is None: - issuenzb = myDB.action("SELECT * from annuals WHERE issueid=?", [issueid]).fetchone() + print "chk1" + issuenzb = myDB.action("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone() + print "chk2" annchk = "yes" + print issuenzb #issueno = str(issuenum).split('.')[0] #new CV API - removed all decimals...here we go AGAIN! issuenum = issuenzb['Issue_Number'] @@ -533,7 +542,7 @@ class PostProcessor(object): if annchk == "yes": prettycomiss = "Annual " + str(prettycomiss) - + self._log("Annual detected.") logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss)) issueyear = issuenzb['IssueDate'][:4] self._log("Issue Year: " + str(issueyear), logger.DEBUG) diff --git a/mylar/__init__.py b/mylar/__init__.py index 51994852..243b528a 100755 --- a/mylar/__init__.py +++ b/mylar/__init__.py @@ -29,7 +29,7 @@ from lib.configobj import ConfigObj import cherrypy -from mylar import versioncheck, logger, version +from mylar import versioncheck, logger, version, rsscheck FULL_PATH = None PROG_DIR = None @@ -118,6 +118,7 @@ REPLACE_CHAR = None ZERO_LEVEL = False ZERO_LEVEL_N = None LOWERCASE_FILENAME = False +IGNORE_HAVETOTAL = False USE_MINSIZE = False MINSIZE = 10 USE_MAXSIZE = False @@ -223,6 +224,27 @@ BIGGIE_PUB = 55 ENABLE_META = 0 CMTAGGER_PATH = None +ENABLE_RSS = 1 +RSS_CHECKINTERVAL = 20 +RSS_LASTRUN = None + +ENABLE_TORRENTS = 0 +TORRENT_LOCAL = 0 +LOCAL_WATCHDIR = None +TORRENT_SEEDBOX = 0 +SEEDBOX_HOST = None +SEEDBOX_PORT = None +SEEDBOX_USER = None +SEEDBOX_PASS = None +SEEDBOX_WATCHDIR = None + +ENABLE_TORRENT_SEARCH = 0 +ENABLE_KAT = 0 +ENABLE_CBT = 0 +CBT_PASSKEY = None + + + def CheckSection(sec): """ Check if INI section exists, if not create it """ try: @@ -281,7 +303,9 @@ def initialize(): USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\ NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \ RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \ - ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, \ + ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, \ + ENABLE_TORRENTS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \ + ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, ENABLE_TORRENT_SEARCH, ENABLE_KAT, ENABLE_CBT, CBT_PASSKEY, \ PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \ PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, STORYARCDIR, CVURL, CVAPIFIX, \ COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, ANNUALS_ON, CV_ONLY, CV_ONETIMER, WEEKFOLDER @@ -298,6 +322,7 @@ def initialize(): CheckSection('Raw') CheckSection('Experimental') CheckSection('Newznab') + CheckSection('Torrents') # Set global variables based on config file or use defaults try: HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 8090) @@ -355,6 +380,7 @@ def initialize(): ZERO_LEVEL = bool(check_setting_int(CFG, 'General', 'zero_level', 0)) ZERO_LEVEL_N = check_setting_str(CFG, 'General', 'zero_level_n', '') LOWERCASE_FILENAMES = bool(check_setting_int(CFG, 'General', 'lowercase_filenames', 0)) + IGNORE_HAVETOTAL = bool(check_setting_int(CFG, 'General', 'ignore_havetotal', 0)) SYNO_FIX = bool(check_setting_int(CFG, 'General', 'syno_fix', 0)) SEARCH_DELAY = check_setting_int(CFG, 'General', 'search_delay', 1) GRABBAG_DIR = check_setting_str(CFG, 'General', 'grabbag_dir', '') @@ -423,6 +449,25 @@ def initialize(): INDIE_PUB = check_setting_str(CFG, 'General', 'indie_pub', '75') BIGGIE_PUB = check_setting_str(CFG, 'General', 'biggie_pub', '55') + ENABLE_RSS = bool(check_setting_int(CFG, 'General', 'enable_rss', 1)) + RSS_CHECKINTERVAL = check_setting_str(CFG, 'General', 'rss_checkinterval', '20') + RSS_LASTRUN = check_setting_str(CFG, 'General', 'rss_lastrun', '') + + ENABLE_TORRENTS = bool(check_setting_int(CFG, 'Torrents', 'enable_torrents', 0)) + TORRENT_LOCAL = bool(check_setting_int(CFG, 'Torrents', 'torrent_local', 0)) + LOCAL_WATCHDIR = check_setting_str(CFG, 'Torrents', 'local_watchdir', '') + TORRENT_SEEDBOX = bool(check_setting_int(CFG, 'Torrents', 'torrent_seedbox', 0)) + SEEDBOX_HOST = check_setting_str(CFG, 'Torrents', 'seedbox_host', '') + SEEDBOX_PORT = check_setting_str(CFG, 'Torrents', 'seedbox_port', '') + SEEDBOX_USER = check_setting_str(CFG, 'Torrents', 'seedbox_user', '') + SEEDBOX_PASS = check_setting_str(CFG, 'Torrents', 'seedbox_pass', '') + SEEDBOX_WATCHDIR = check_setting_str(CFG, 'Torrents', 'seedbox_watchdir', '') + + ENABLE_TORRENT_SEARCH = bool(check_setting_int(CFG, 'Torrents', 'enable_torrent_search', 0)) + ENABLE_KAT = bool(check_setting_int(CFG, 'Torrents', 'enable_kat', 0)) + ENABLE_CBT = bool(check_setting_int(CFG, 'Torrents', 'enable_cbt', 0)) + CBT_PASSKEY = check_setting_str(CFG, 'Torrents', 'cbt_passkey', '') + USE_SABNZBD = bool(check_setting_int(CFG, 'SABnzbd', 'use_sabnzbd', 0)) SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '') SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '') @@ -469,16 +514,41 @@ def initialize(): NEWZNAB_HOST = check_setting_str(CFG, 'Newznab', 'newznab_host', '') NEWZNAB_APIKEY = check_setting_str(CFG, 'Newznab', 'newznab_apikey', '') NEWZNAB_ENABLED = bool(check_setting_int(CFG, 'Newznab', 'newznab_enabled', 1)) + NEWZNAB_NAME = NEWZNAB_HOST + if CONFIG_VERSION == '4': + NEWZNAB_NAME = check_setting_str(CFG, 'Newznab', 'newznab_name', '') + + # this gets nasty + # if configv is != 4, then the NewznabName doesn't exist so we need to create and add it and + # then rewrite + # if configv == 4, Newznab name exists and let it go through.... # Need to pack the extra newznabs back into a list of tuples flattened_newznabs = check_setting_str(CFG, 'Newznab', 'extra_newznabs', [], log=False) - EXTRA_NEWZNABS = list(itertools.izip(*[itertools.islice(flattened_newznabs, i, None, 3) for i in range(3)])) + if CONFIG_VERSION == '4': + EN_NUM = 4 #EN_NUM is the number of iterations of itertools to use + else: + EN_NUM = 3 + + EXTRA_NEWZNABS = list(itertools.izip(*[itertools.islice(flattened_newznabs, i, None, EN_NUM) for i in range(EN_NUM)])) + + #if ConfigV3 add the nzb_name to it.. + if CONFIG_VERSION != '4': + ENABS = [] + for en in EXTRA_NEWZNABS: + #set newznabname to newznab address initially so doesn't bomb. + ENABS.append((en[0], en[0], en[1], en[2])) + #now we hammer the EXTRA_NEWZNABS with the corrected version + EXTRA_NEWZNABS = ENABS + #update the configV and write the config. + CONFIG_VERSION = '4' + config_write() #to counteract the loss of the 1st newznab entry because of a switch, let's rewrite to the tuple if NEWZNAB_HOST and CONFIG_VERSION: - EXTRA_NEWZNABS.append((NEWZNAB_HOST, NEWZNAB_APIKEY, int(NEWZNAB_ENABLED))) + EXTRA_NEWZNABS.append((NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, int(NEWZNAB_ENABLED))) # Need to rewrite config here and bump up config version - CONFIG_VERSION = '3' + CONFIG_VERSION = '4' config_write() # update folder formats in the config & bump up config version @@ -734,6 +804,7 @@ def config_write(): new_config['General']['zero_level'] = int(ZERO_LEVEL) new_config['General']['zero_level_n'] = ZERO_LEVEL_N new_config['General']['lowercase_filenames'] = int(LOWERCASE_FILENAMES) + new_config['General']['ignore_havetotal'] = int(IGNORE_HAVETOTAL) new_config['General']['syno_fix'] = int(SYNO_FIX) new_config['General']['search_delay'] = SEARCH_DELAY new_config['General']['grabbag_dir'] = GRABBAG_DIR @@ -761,6 +832,27 @@ def config_write(): new_config['General']['indie_pub'] = INDIE_PUB new_config['General']['biggie_pub'] = BIGGIE_PUB + new_config['General']['enable_rss'] = int(ENABLE_RSS) + new_config['General']['rss_checkinterval'] = RSS_CHECKINTERVAL + new_config['General']['rss_lastrun'] = RSS_LASTRUN + + new_config['Torrents'] = {} + new_config['Torrents']['enable_torrents'] = int(ENABLE_TORRENTS) + new_config['Torrents']['torrent_local'] = int(TORRENT_LOCAL) + new_config['Torrents']['local_watchdir'] = LOCAL_WATCHDIR + new_config['Torrents']['torrent_seedbox'] = int(TORRENT_SEEDBOX) + new_config['Torrents']['seedbox_host'] = SEEDBOX_HOST + new_config['Torrents']['seedbox_port'] = SEEDBOX_PORT + new_config['Torrents']['seedbox_user'] = SEEDBOX_USER + new_config['Torrents']['seedbox_pass'] = SEEDBOX_PASS + new_config['Torrents']['seedbox_watchdir'] = SEEDBOX_WATCHDIR + + new_config['Torrents']['enable_torrent_search'] = int(ENABLE_TORRENT_SEARCH) + new_config['Torrents']['enable_kat'] = int(ENABLE_KAT) + new_config['Torrents']['enable_cbt'] = int(ENABLE_CBT) + new_config['Torrents']['cbt_passkey'] = CBT_PASSKEY + + new_config['SABnzbd'] = {} new_config['SABnzbd']['use_sabnzbd'] = int(USE_SABNZBD) new_config['SABnzbd']['sab_host'] = SAB_HOST @@ -847,6 +939,13 @@ def start(): SCHED.add_interval_job(updater.dbUpdate, hours=48) SCHED.add_interval_job(search.searchforissue, minutes=SEARCH_INTERVAL) + + #initiate startup rss feeds for torrents/nzbs here... + SCHED.add_interval_job(rsscheck.tehMain, minutes=int(RSS_CHECKINTERVAL)) + + logger.info("Initiating startup-RSS feed checks.") + rsscheck.tehMain() + #SCHED.add_interval_job(librarysync.libraryScan, minutes=LIBRARYSCAN_INTERVAL) #weekly pull list gets messed up if it's not populated first, so let's populate it then set the scheduler. @@ -877,14 +976,16 @@ def dbcheck(): c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER)') c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT)') c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT)') c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)') # c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)') c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT)') c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT)') c.execute('CREATE TABLE IF NOT EXISTS readinglist(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT)') - c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT)') + c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT)') + c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)') + conn.commit c.close #new @@ -1030,11 +1131,28 @@ def dbcheck(): except: c.execute('ALTER TABLE annuals ADD COLUMN Int_IssueNumber INT') + try: + c.execute('SELECT ComicName from annuals') + annual_update = "no" + except: + c.execute('ALTER TABLE annuals ADD COLUMN ComicName TEXT') + annual_update = "yes" + + if annual_update == "yes": + logger.info("Updating Annuals table for new fields - one-time update.") + helpers.annual_update() + try: c.execute('SELECT Provider from snatched') except: c.execute('ALTER TABLE snatched ADD COLUMN Provider TEXT') + try: + c.execute('SELECT DisplayComicName from upcoming') + except: + c.execute('ALTER TABLE upcoming ADD COLUMN DisplayComicName TEXT') + + #if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere #prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the #value in the sql so we can display it in the details screen for everyone to wonder at. diff --git a/mylar/cmtagmylar.py b/mylar/cmtagmylar.py index 373b850d..bb033ac5 100644 --- a/mylar/cmtagmylar.py +++ b/mylar/cmtagmylar.py @@ -7,6 +7,7 @@ import sys import glob import platform import shutil +import time import zipfile import subprocess import mylar @@ -14,36 +15,42 @@ import mylar from mylar import logger def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): - #print "manual:" + manual - #print "filename: " + filename logger.fdebug("dirName:" + dirName) - #print "issueid:" + issueid ## Set the directory in which comictagger and other external commands are located - IMPORTANT - ## # ( User may have to modify, depending on their setup, but these are some guesses for now ) - #check for dependencies here - configparser - try: - import configparser - except ImportError: - logger.fdebug("configparser not found on system. Please install manually in order to write metadata") - logger.fdebug("continuing with PostProcessing, but I'm not using metadata.") - return "fail" - if platform.system() == "Windows": (x, y) = platform.architecture() if x == "64bit": comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.exe') - # http://www.win-rar.com/download.html else: comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.exe') unrar_cmd = "C:\Program Files\WinRAR\UnRAR.exe" + + # test for UnRAR + if not os.path.isfile(unrar_cmd): + unrar_cmd = "C:\Program Files (x86)\WinRAR\UnRAR.exe" + if not os.path.isfile(unrar_cmd): + logger.fdebug("Unable to locate UnRAR.exe - make sure it's installed.") + logger.fdebug("Aborting meta-tagging.") + return "fail" + elif platform.system() == "Darwin": #Mac OS X comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH) unrar_cmd = "/usr/local/bin/unrar" else: + #for the 'nix + #check for dependencies here - configparser + try: + import configparser + except ImportError: + logger.fdebug("configparser not found on system. Please install manually in order to write metadata") + logger.fdebug("continuing with PostProcessing, but I'm not using metadata.") + return "fail" + #set this to the lib path (ie. '/lib') comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py') unrar_cmd = "/usr/bin/unrar" @@ -179,15 +186,16 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): if filename.endswith('.cbz'): f = os.path.join( comicpath, filename ) - try: - rar_test_cmd_output = "is not RAR archive" #default, in case of error - rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] ) - except: - pass - if not "is not RAR archive" in rar_test_cmd_output: - base = os.path.splitext( f )[0] - shutil.move( f, base + ".cbr" ) - logger.fdebug("{0}: renaming {1} to be a cbr".format( scriptname, os.path.basename( f ) )) + if os.path.isfile( f ): + try: + rar_test_cmd_output = "is not RAR archive" #default, in case of error + rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] ) + except: + pass + if not "is not RAR archive" in rar_test_cmd_output: + base = os.path.splitext( f )[0] + shutil.move( f, base + ".cbr" ) + logger.fdebug("{0}: renaming {1} to be a cbr".format( scriptname, os.path.basename( f ) )) # Now rename all CBR files to RAR if filename.endswith('.cbr'): @@ -219,7 +227,7 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): ## Changes zip to cbz f = os.path.join( comicpath, os.path.splitext(filename)[0] + ".zip" ) - print f + print "zipfile" + f try: with open(f): pass except: @@ -231,7 +239,12 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): else: nfilename = filename - file_dir, file_n = os.path.split(nfilename) + if os.path.isfile( nfilename): + file_dir, file_n = os.path.split(nfilename) + else: + #remove the IssueID from the path + file_dir = re.sub(issueid, '', comicpath) + file_n = os.path.split(nfilename)[1] logger.fdebug("converted directory: " + str(file_dir)) logger.fdebug("converted filename: " + str(file_n)) logger.fdebug("destination path: " + os.path.join(dirName,file_n)) @@ -249,7 +262,19 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None): logger.fdebug("Unable to move - file already exists.") else: shutil.move( nfilename, os.path.join(os.path.abspath(dirName),file_n)) - shutil.rmtree( comicpath ) + logger.fdebug("Sucessfully moved file from temporary path.") + i = 0 + + while i < 10: + try: + shutil.rmtree( comicpath ) + except: + time.sleep(.1) + else: + return os.path.join(os.path.abspath(dirName), file_n) + i+=1 + + logger.fdebug("Failed to remove temporary path : " + str(comicpath)) return os.path.join(os.path.abspath(dirName),file_n) diff --git a/mylar/filechecker.py b/mylar/filechecker.py index 9ba63d6c..acdbbbf7 100755 --- a/mylar/filechecker.py +++ b/mylar/filechecker.py @@ -57,6 +57,7 @@ def listFiles(dir,watchcomic,AlternateSearch=None): for item in os.listdir(basedir): + if item == 'cover.jpg' or item == 'cvinfo': continue #print item #subname = os.path.join(basedir, item) subname = item @@ -104,6 +105,8 @@ def listFiles(dir,watchcomic,AlternateSearch=None): logger.fdebug("possible negative issue detected.") nonocount = nonocount + subcnt - 1 detneg = "yes" + if '-' in watchcomic and i < len(watchcomic): + logger.fdebug("- appears in series title.") i+=1 if detneg == "no": subname = re.sub(str(nono), ' ', subname) @@ -131,25 +134,42 @@ def listFiles(dir,watchcomic,AlternateSearch=None): #subname = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\+\'\?\@]',' ', subname) modwatchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', ' ', u_watchcomic) detectand = False + detectthe = False modwatchcomic = re.sub('\&', ' and ', modwatchcomic) + if ' the ' in modwatchcomic.lower(): + modwatchcomic = re.sub("\\bthe\\b", "", modwatchcomic.lower()) + logger.fdebug("new modwatchcomic: " + str(modwatchcomic)) + detectthe = True modwatchcomic = re.sub('\s+', ' ', str(modwatchcomic)).strip() if '&' in subname: subname = re.sub('\&', ' and ', subname) detectand = True + if ' the ' in subname.lower(): + subname = re.sub("\\bthe\\b", "", subname.lower()) + detectthe = True subname = re.sub('\s+', ' ', str(subname)).strip() + + AS_Alt = [] if AlternateSearch is not None: - #same = encode. - u_altsearchcomic = AlternateSearch.encode('ascii', 'ignore').strip() - altsearchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\+\'\?\@]', ' ', u_altsearchcomic) - altseachcomic = re.sub('\&', ' and ', altsearchcomic) - altsearchcomic = re.sub('\s+', ' ', str(altsearchcomic)).strip() + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + #same = encode. + u_altsearchcomic = AS_Alternate.encode('ascii', 'ignore').strip() + altsearchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\+\'\?\@]', ' ', u_altsearchcomic) + altseachcomic = re.sub('\&', ' and ', altsearchcomic) + altsearchcomic = re.sub('\s+', ' ', str(altsearchcomic)).strip() + AS_Alt.append(altsearchcomic) else: #create random characters so it will never match. altsearchcomic = "127372873872871091383 abdkhjhskjhkjdhakajhf" + AS_Alt.append(altsearchcomic) #if '_' in subname: # subname = subname.replace('_', ' ') logger.fdebug("watchcomic:" + str(modwatchcomic) + " ..comparing to found file: " + str(subname)) - if modwatchcomic.lower() in subname.lower() or altsearchcomic.lower() in subname.lower(): + if modwatchcomic.lower() in subname.lower() or any(x in subname.lower() for x in AS_Alt):#altsearchcomic.lower() in subname.lower(): comicpath = os.path.join(basedir, item) logger.fdebug( modwatchcomic + " - watchlist match on : " + comicpath) comicsize = os.path.getsize(comicpath) @@ -160,52 +180,55 @@ def listFiles(dir,watchcomic,AlternateSearch=None): if 'annual' in subname.lower(): logger.fdebug("Annual detected - proceeding") jtd_len = subname.lower().find('annual') + cchk = modwatchcomic else: if modwatchcomic.lower() in subname.lower(): - logger.fdebug("we should remove " + str(nonocount) + " characters") + cchk = modwatchcomic + else: + cchk_ls = [x for x in AS_Alt if x in subname.lower()] + cchk = cchk_ls[0] + #print "something: " + str(cchk) - findtitlepos = subname.find('-') - if charpos != 0: - logger.fdebug("detected " + str(len(charpos)) + " special characters") - i=0 - while (i < len(charpos)): - for i,j in enumerate(charpos): - #print i,j - #print subname - #print "digitchk: " + str(subname[j:]) - if j >= len(subname): - logger.fdebug("end reached. ignoring remainder.") - break - elif subname[j:] == '-': - if i <= len(subname) and subname[i+1].isdigit(): - logger.fdebug("negative issue detected.") - #detneg = "yes" - elif j > findtitlepos: - if subname[j:] == '#': - if subname[i+1].isdigit(): - logger.fdebug("# detected denoting issue#, ignoring.") - else: - nonocount-=1 - else: - logger.fdebug("special character appears outside of title - ignoring @ position: " + str(charpos[i])) + logger.fdebug("we should remove " + str(nonocount) + " characters") + + findtitlepos = subname.find('-') + if charpos != 0: + logger.fdebug("detected " + str(len(charpos)) + " special characters") + i=0 + while (i < len(charpos)): + for i,j in enumerate(charpos): + #print i,j + #print subname + #print "digitchk: " + str(subname[j:]) + if j >= len(subname): + logger.fdebug("end reached. ignoring remainder.") + break + elif subname[j:] == '-': + if i <= len(subname) and subname[i+1].isdigit(): + logger.fdebug("negative issue detected.") + #detneg = "yes" + elif j > findtitlepos: + if subname[j:] == '#': + if subname[i+1].isdigit(): + logger.fdebug("# detected denoting issue#, ignoring.") + else: nonocount-=1 - i+=1 + elif '-' in watchcomic and i < len(watchcomic): + logger.fdebug("- appears in series title, ignoring.") + else: + logger.fdebug("special character appears outside of title - ignoring @ position: " + str(charpos[i])) + nonocount-=1 + i+=1 - #remove versioning here - if volrem != None: - jtd_len = len(modwatchcomic) + len(volrem) + nonocount + 1 #1 is to account for space btwn comic and vol # - else: - jtd_len = len(modwatchcomic) + nonocount - if detectand: - jtd_len = jtd_len - 2 # char substitution diff between & and 'and' = 2 chars - elif altsearchcomic.lower() in subname.lower(): - #remove versioning here - if volrem != None: - jtd_len = len(altsearchcomic) + len(volrem) + nonocount + 1 - else: - jtd_len = len(altsearchcomic) + nonocount - if detectand: - jtd_len = jtd_len - 2 + #remove versioning here + if volrem != None: + jtd_len = len(cchk) + len(volrem) + nonocount + 1 #1 is to account for space btwn comic and vol # + else: + jtd_len = len(cchk) + nonocount + if detectand: + jtd_len = jtd_len - 2 # char substitution diff between & and 'and' = 2 chars + if detectthe: + jtd_len = jtd_len - 3 # char subsitiution diff between 'the' and '' = 3 chars justthedigits = item[jtd_len:] diff --git a/mylar/findcomicfeed.py b/mylar/findcomicfeed.py index 70ef48d5..cfd0c5c8 100755 --- a/mylar/findcomicfeed.py +++ b/mylar/findcomicfeed.py @@ -81,7 +81,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion): except_list=['releases', 'gold line', 'distribution', '0-day', '0 day'] for title, link in keyPair.items(): - logger.fdebug("titlesplit: " + str(title.split("\""))) + #logger.fdebug("titlesplit: " + str(title.split("\""))) splitTitle = title.split("\"") for subs in splitTitle: diff --git a/mylar/ftpsshup.py b/mylar/ftpsshup.py new file mode 100644 index 00000000..0cc54b00 --- /dev/null +++ b/mylar/ftpsshup.py @@ -0,0 +1,48 @@ +#!/usr/local/bin/python + +#import paramiko +import os + +import mylar +from mylar import logger + +def putfile(localpath,file): #localpath=full path to .torrent (including filename), file=filename of torrent + + try: + import paramiko + except ImportError: + logger.fdebug("paramiko not found on system. Please install manually in order to use seedbox option") + logger.fdebug("get it at https://github.com/paramiko/paramiko") + logger.fdebug("to install: python setup.py install") + logger.fdebug("aborting send.") + return "fail" + + host = mylar.SEEDBOX_HOST + port = int(mylar.SEEDBOX_PORT) #this is usually 22 + transport = paramiko.Transport((host, port)) + + logger.fdebug("Sending file: " + str(file)) + logger.fdebug("destination: " + str(host)) + logger.fdebug("Using SSH port : " + str(port)) + password = mylar.SEEDBOX_PASS + username = mylar.SEEDBOX_USER + transport.connect(username = username, password = password) + + sftp = paramiko.SFTPClient.from_transport(transport) + + import sys + if file[-7:] != "torrent": + file += ".torrent" + rempath = os.path.join(mylar.SEEDBOX_WATCHDIR, file) #this will default to the OS running mylar for slashes. + logger.fdebug("remote path set to " + str(rempath)) + logger.fdebug("local path set to " + str(localpath)) + sftp.put(localpath, rempath) + + sftp.close() + transport.close() + logger.fdebug("Upload complete to seedbox.") + return "pass" + +if __name__ == '__main__': + putfile(sys.argv[1]) + diff --git a/mylar/helpers.py b/mylar/helpers.py index d635dc9f..5ee8a589 100755 --- a/mylar/helpers.py +++ b/mylar/helpers.py @@ -17,6 +17,7 @@ import time from operator import itemgetter import datetime import re +import platform import itertools import os import mylar @@ -239,7 +240,7 @@ def decimal_issue(iss): return deciss, dec_except def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None): - from mylar import db, logger + import db, logger myDB = db.DBConnection() print ("comicid: " + str(comicid)) print ("issue#: " + str(issue)) @@ -684,3 +685,53 @@ def checkthepub(ComicID): logger.fdebug("Indie publisher detected - " + str(pubchk['ComicPublisher'])) return mylar.INDIE_PUB + +def annual_update(): + import db, logger + myDB = db.DBConnection() + annuallist = myDB.action('SELECT * FROM annuals') + if annuallist is None: + logger.info("no annuals to update.") + return + + cnames = [] + #populate the ComicName field with the corresponding series name from the comics table. + for ann in annuallist: + coms = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ann['ComicID']]).fetchone() + cnames.append({'ComicID': ann['ComicID'], + 'ComicName': coms['ComicName'] + }) + + #write in a seperate loop to avoid db locks + i=0 + for cns in cnames: + ctrlVal = {"ComicID": cns['ComicID']} + newVal = {"ComicName": cns['ComicName']} + myDB.upsert("annuals", newVal, ctrlVal) + i+=1 + + logger.info(str(i) + " series have been updated in the annuals table.") + return + +def replacetheslash(data): + import logger + # this is necessary for the cache directory to display properly in IE/FF. + # os.path.join will pipe in the '\' in windows, which won't resolve + # when viewing through cherrypy - so convert it and viola. + if platform.system() == "Windows": + slashreplaced = replace.data('\\', '/') + else: + slashreplaced = data + return slashreplaced + +def urlretrieve(urlfile, fpath): + chunk = 4096 + f = open(fpath, "w") + while 1: + data = urlfile.read(chunk) + if not data: + print "done." + break + f.write(data) + print "Read %s bytes"%len(data) + diff --git a/mylar/importer.py b/mylar/importer.py index c11d075b..895dd5a8 100755 --- a/mylar/importer.py +++ b/mylar/importer.py @@ -218,6 +218,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): "IssueDate": issdate, "IssueName": issname, "ComicID": comicid, + "ComicName": comic['ComicName'], "Status": "Skipped"} myDB.upsert("annuals", newVals, newCtrl) n+=1 @@ -356,7 +357,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): urllib.urlretrieve(str(comic['ComicImage']), str(coverfile)) try: with open(str(coverfile)) as f: - ComicImage = os.path.join('cache',str(comicid) + ".jpg") + PRComicImage = os.path.join('cache',str(comicid) + ".jpg") + ComicImage = helpers.replacetheslash(PRComicImage) #this is for Firefox when outside the LAN...it works, but I don't know how to implement it #without breaking the normal flow for inside the LAN (above) @@ -740,9 +742,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): # lets' check the pullist for anything at this time as well since we're here. # do this for only Present comics.... if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present': #and 'Present' in gcdinfo['resultPublished']: - print ("latestissue: #" + str(latestiss)) + logger.fdebug("latestissue: #" + str(latestiss)) chkstats = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone() - print chkstats['Status'] + logger.fdebug(chkstats['Status']) if chkstats['Status'] == 'Skipped' or chkstats['Status'] == 'Wanted' or chkstats['Status'] == 'Snatched': logger.info(u"Checking this week's pullist for new issues of " + comic['ComicName']) updater.newpullcheck(comic['ComicName'], comicid) @@ -754,8 +756,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None): logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName']) for result in results: - print "Searching for : " + str(result['Issue_Number']) - print "Status of : " + str(result['Status']) + logger.fdebug("Searching for : " + str(result['Issue_Number'])) + logger.fdebug("Status of : " + str(result['Status'])) search.searchforissue(result['IssueID']) else: logger.info(u"No issues marked as wanted for " + comic['ComicName']) diff --git a/mylar/rsscheck.py b/mylar/rsscheck.py new file mode 100755 index 00000000..42f4ca79 --- /dev/null +++ b/mylar/rsscheck.py @@ -0,0 +1,519 @@ +#!/usr/bin/python + +import os, sys +import re +import lib.feedparser as feedparser +import feedparser +import urllib2 +import ftpsshup +import datetime + +import mylar +from mylar import db, logger, ftpsshup, helpers + +def tehMain(): + logger.info("RSS Feed Check was last run at : " + str(mylar.RSS_LASTRUN)) + firstrun = "no" + #check the last run of rss to make sure it's not hammering. + if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0': + logger.info("RSS Feed Check First Ever Run.") + firstrun = "yes" + mins = 0 + else: + c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN, "%Y-%m-%d %H:%M:%S") + n_date = datetime.datetime.now() + absdiff = abs(n_date - c_obj_date) + mins = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0 #3600 is for hours. + + if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL): + logger.fdebug("RSS Check has taken place less than the threshold - not initiating at this time.") + return + + mylar.RSS_LASTRUN = helpers.now() + logger.fdebug("Updating RSS Run time to : " + str(mylar.RSS_LASTRUN)) + mylar.config_write() + + #function for looping through nzbs/torrent feeds + if mylar.ENABLE_TORRENTS: + logger.fdebug("[RSS] Initiating Torrent RSS Check.") + if mylar.ENABLE_KAT: + logger.fdebug("[RSS] Initiating Torrent RSS Feed Check on KAT.") + torrents(pickfeed='3') + if mylar.ENABLE_CBT: + logger.fdebug("[RSS] Initiating Torrent RSS Feed Check on CBT.") + torrents(pickfeed='1') + torrents(pickfeed='4') + logger.fdebug("[RSS] Initiating RSS Feed Check for NZB Providers.") + nzbs() + logger.fdebug("[RSS] RSS Feed Check/Update Complete") + logger.fdebug("[RSS] Watchlist Check for new Releases") + #if mylar.ENABLE_TORRENTS: + # if mylar.ENABLE_KAT: + # search.searchforissue(rsscheck='yes') + # if mylar.ENABLE_CBT: + mylar.search.searchforissue(rsscheck='yes') + #nzbcheck here + #nzbs(rsscheck='yes') + logger.fdebug("[RSS] Watchlist Check complete.") + return + +def torrents(pickfeed=None): + if pickfeed is None: + pickfeed = 1 + #else: + # print "pickfeed is " + str(pickfeed) + passkey = mylar.CBT_PASSKEY + if pickfeed == "1": # comicbt rss feed based on followlist + feed = "http://comicbt.com/rss.php?action=browse&passkey=" + str(passkey) + "&type=dl" + elif pickfeed == "2": # kat.ph search + feed = "http://kat.ph/usearch/" + str(seriesname) + "%20category%3Acomics%20seeds%3A1/?rss=1" + elif pickfeed == "3": # kat.ph rss feed + feed = "http://kat.ph/usearch/category%3Acomics%20seeds%3A1/?rss=1" + elif pickfeed == "4": #comicbt follow link + feed = "http://comicbt.com/rss.php?action=follow&passkey=" + str(passkey) + "&type=dl" + elif pickfeed == "5": # comicbt series link +# seriespage = "http://comicbt.com/series.php?passkey=" + str(passkey) + feed = "http://comicbt.com/rss.php?action=series&series=" + str(seriesno) + "&passkey=" + str(passkey) + else: + logger.error("invalid pickfeed denoted...") + return + logger.fdebug("feed #" + str(pickfeed) + " chosen: " + str(feed)) + title = [] + link = [] + description = [] + seriestitle = [] + + if pickfeed == "5": # we need to get the series # first + seriesSearch(seriespage, seriesname) + feedme = feedparser.parse(feed) + i = 0 + + feeddata = [] + + myDB = db.DBConnection() + + for entry in feedme['entries']: + if pickfeed == "2" or pickfeed == "3": + tmpsz = feedme.entries[i].enclosures[0] + feeddata.append({ + 'Site': 'KAT', + 'Title': feedme.entries[i].title, + 'Link': tmpsz['url'], + 'Pubdate': feedme.entries[i].updated, + 'Size': tmpsz['length'] + }) + + #print ("Site: KAT") + #print ("Title: " + str(feeddata[i]['Title'])) + #print ("Link: " + str(feeddata[i]['Link'])) + #print ("pubdate: " + str(feeddata[i]['Pubdate'])) + + elif pickfeed == "1" or pickfeed == "4": +# tmpsz = feedme.entries[i].enclosures[0] + feeddata.append({ + 'Site': 'comicBT', + 'Title': feedme.entries[i].title, + 'Link': feedme.entries[i].link, + 'Pubdate': feedme.entries[i].updated +# 'Size': tmpsz['length'] + }) + #print ("Site: ComicBT") + #print ("Title: " + str(feeddata[i]['Title'])) + #print ("Link: " + str(feeddata[i]['Link'])) + #print ("pubdate: " + str(feeddata[i]['Pubdate'])) + i+=1 + logger.fdebug("there were " + str(i) + " results..") + rssdbupdate(feeddata,i,'torrent') + return + +def nzbs(provider=None): + nzbprovider = [] + nzbp = 0 + if mylar.NZBSU == 1: + nzbprovider.append('nzb.su') + nzbp+=1 + if mylar.DOGNZB == 1: + nzbprovider.append('dognzb') + nzbp+=1 + # -------- + # Xperimental + if mylar.EXPERIMENTAL == 1: + nzbprovider.append('experimental') + nzbp+=1 + + newznabs = 0 + + newznab_hosts = [] + + if mylar.NEWZNAB == 1: + + for newznab_host in mylar.EXTRA_NEWZNABS: + if newznab_host[3] == '1' or newznab_host[3] == 1: + newznab_hosts.append(newznab_host) + nzbprovider.append('newznab') + newznabs+=1 + logger.fdebug("newznab name:" + str(newznab_host[0]) + " - enabled: " + str(newznab_host[3])) + + # -------- + providercount = int(nzbp + newznabs) + logger.fdebug("there are : " + str(providercount) + " RSS search providers you have enabled.") + nzbpr = providercount - 1 + if nzbpr < 0: + nzbpr == 0 + + feeddata = [] + feedthis = [] + ft = 0 + totNum = 0 + nonexp = "no" + + while (nzbpr >= 0 ): + if nzbprovider[nzbpr] == 'experimental': + feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=50&more=1") + + totNum = len(feed.entries) + site = 'experimental' + keyPair = {} + regList = [] + entries = [] + mres = {} + countUp = 0 + + i = 0 + for entry in feed['entries']: + tmpsz = feed.entries[i].enclosures[0] + feeddata.append({ + 'Site': site, + 'Title': feed.entries[i].title, + 'Link': feed.entries[i].link, + 'Pubdate': feed.entries[i].updated, + 'Size': tmpsz['length'] + }) +# print ("Site:" + str(site)) +# print ("Title:" + str(feed.entries[i].title)) +# print ("Link:" + str(feed.entries[i].link)) +# print ("Pubdate:" + str(feed.entries[i].updated)) +# print ("Size:" + str(tmpsz['length'])) + i+=1 + logger.info(str(i) + " results from Experimental feed indexed.") + else: + if nzbprovider[nzbpr] == 'newznab': + for newznab_host in newznab_hosts: + feed = newznab_host[1].rstrip() + "/rss?t=7030&dl=1&i=1&r=" + newznab_host[2].rstrip() + feedme = feedparser.parse(feed) + site = newznab_host[0].rstrip() + feedthis.append({"feed": feedme, + "site": site}) + totNum+=len(feedme.entries) + ft+=1 + nonexp = "yes" + elif nzbprovider[nzbpr] == 'nzb.su': + feed = 'http://nzb.su/rss?t=7030&dl=1&i=1&r=' + mylar.NZBSU_APIKEY + feedme = feedparser.parse(feed) + site = nzbprovider[nzbpr] + feedthis.append({"feed": feedme, + "site": site }) + totNum+=len(feedme.entries) + ft+=1 + nonexp = "yes" + elif nzbprovider[nzbpr] == 'dognzb': + feed = 'http://dognzb.cr/rss?t=7030&dl=1&i=1&r=' + mylar.DOGNZB_APIKEY + feedme = feedparser.parser(feed) + site = nzbprovider[nzbpr] + ft+=1 + nonexp = "yes" + feedthis.append({"feed": feedme, + "site": site }) + totNum+=len(feedme.entries) + + nzbpr-=1 + + i = 0 + if nonexp == "yes": + #print str(ft) + " sites checked. There are " + str(totNum) + " entries to be updated." + #print feedme + #i = 0 + + for ft in feedthis: + site = ft['site'] + #print str(site) + " now being updated..." + for entry in ft['feed'].entries: + #print "entry: " + str(entry) + tmpsz = entry.enclosures[0] + feeddata.append({ + 'Site': site, + 'Title': entry.title, + 'Link': entry.link, + 'Pubdate': entry.updated, + 'Size': tmpsz['length'] + }) + +# print ("Site: " + str(feeddata[i]['Site'])) +# print ("Title: " + str(feeddata[i]['Title'])) +# print ("Link: " + str(feeddata[i]['Link'])) +# print ("pubdate: " + str(feeddata[i]['Pubdate'])) +# print ("size: " + str(feeddata[i]['Size'])) + i+=1 + logger.info(str(site) + " : " + str(i) + " entries indexed.") + + rssdbupdate(feeddata,i,'usenet') + return + +def rssdbupdate(feeddata,i,type): + rsschktime = 15 + myDB = db.DBConnection() + + #let's add the entries into the db so as to save on searches + #also to build up the ID's ;) + x = 1 + while x <= i: + try: + dataval = feeddata[x] + except IndexError: + logger.fdebug("reached the end of populating. Exiting the process.") + break + #print "populating : " + str(dataval) + #remove passkey so it doesn't end up in db + if type == 'torrent': + newlink = dataval['Link'][:(dataval['Link'].find('&passkey'))] + newVal = {"Link": newlink, + "Pubdate": dataval['Pubdate'], + "Site": dataval['Site']} + else: + newlink = dataval['Link'] + newVal = {"Link": newlink, + "Pubdate": dataval['Pubdate'], + "Site": dataval['Site'], + "Size": dataval['Size']} + + ctrlVal = {"Title": dataval['Title']} + + myDB.upsert("rssdb", newVal,ctrlVal) + + x+=1 + + logger.fdebug("Completed adding new data to RSS DB. Next add in " + str(mylar.RSS_CHECKINTERVAL) + " minutes") + return + +def torrentdbsearch(seriesname,issue,comicid=None): + myDB = db.DBConnection() + seriesname_alt = None + if comicid is None or comicid == 'None': + pass + else: + snm = myDB.action("SELECT * FROM comics WHERE comicid=?", [comicid]).fetchone() + if snm is None: + logger.fdebug("Invalid ComicID of " + str(comicid) + ". Aborting search.") + return + else: + seriesname = snm['ComicName'] + seriesname_alt = snm['AlternateSearch'] + + + tsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\s]', '%',seriesname) + formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname) + if formatrem_seriesname[:1] == ' ': formatrem_seriesname = formatrem_seriesname[1:] + tsearch = tsearch_seriesname + "%" + #print tsearch + if mylar.ENABLE_CBT: + tresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='comicBT'", [tsearch]) + if mylar.ENABLE_KAT: + tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [tsearch]) + if tresults is None: + logger.fdebug("torrent search returned no results for " + seriesname) + if seriesname_alt is None: + logger.fdebug("no Alternate name given. Aborting search.") + return "no results" + else: + chkthealt = seriesname_alt.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + if mylar.ENABLE_CBT: + tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='comicBT'", [AS_Alternate]) + if mylar.ENABLE_KAT: + tresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [AS_Alternate]) + if tresults is None: + logger.fdebug("torrent alternate name search returned no results.") + return "no results" + extensions = ('cbr', 'cbz') + tortheinfo = [] + torinfo = {} + + for tor in tresults: + torsplit = tor['Title'].split('/') + #print tor['Title'] + #print ("there are " + str(len(torsplit)) + " sections in this title") + i=0 + #0 holds the title/issue and format-type. + while (i < len(torsplit)): + #print "section(" + str(i) + "): " + str(torsplit[i]) + i+=1 + formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-]', '',torsplit[0]).lower() + formatrem_torsplit = re.sub('\s+', ' ', formatrem_torsplit) + #print (str(len(formatrem_torsplit)) + " - formatrem_torsplit : " + formatrem_torsplit.lower()) + #print (str(len(formatrem_seriesname)) + " - formatrem_seriesname :" + formatrem_seriesname.lower()) + if formatrem_seriesname.lower() in formatrem_torsplit.lower(): + logger.fdebug("matched to : " + tor['Title']) + logger.fdebug("matched on series title: " + seriesname) + titleend = formatrem_torsplit[len(formatrem_seriesname):] + titleend = re.sub('\-', '', titleend) #remove the '-' which is unnecessary + + titleend = re.sub('cbr', '', str(titleend)) #remove extensions + logger.fdebug("titleend: " + str(titleend)) + sptitle = titleend.split() + extra = '' + for sp in sptitle: + if 'v' in sp.lower() and sp[2:].isdigit(): + volumeadd = sp + elif 'vol' in sp.lower() and sp[3:].isdigit(): + volumeadd = sp + if sp.isdigit(): + #print("issue # detected : " + str(issue)) + if int(issue) == int(sp): + logger.fdebug("Issue matched for : " + str(issue)) + #the title on CBT has a mix-mash of crap...ignore everything after cbz/cbr to cleanit + ctitle = tor['Title'].find('cbr') + if ctitle == 0: + ctitle = tor['Title'].find('cbz') + if ctitle == 0: + logger.fdebug("cannot determine title properly - ignoring for now.") + continue + cttitle = tor['Title'][:ctitle] + #print("change title to : " + str(cttitle)) + + if extra == '': + tortheinfo.append({ + 'title': cttitle, #tor['Title'], + 'link': tor['Link'], + 'pubdate': tor['Pubdate'], + 'site': tor['Site'], + 'length': tor['Size'] + }) + continue + #torsend2client(formatrem_seriesname,tor['Link']) + else: + logger.fdebug("extra info given as :" + str(extra)) + logger.fdebug("extra information confirmed as a match") + logger.fdebug("queuing link: " + str(tor['Link'])) + tortheinfo.append({ + 'title': cttitle, #tor['Title'], + 'link': tor['Link'], + 'pubdate': tor['Pubdate'], + 'site': tor['Site'], + 'length': tor['Size'] + }) + logger.fdebug("entered info.") + continue + #torsend2client(formatrem_seriesname,tor['Link']) + else: + logger.fdebug("invalid issue#: " + str(sp)) + #extra = str(extra) + " " + str(sp) + else: + logger.fdebug("word detected - assuming continuation of title: " + str(sp)) + extra = str(extra) + " " + str(sp) + + torinfo['entries'] = tortheinfo + + return torinfo + +def nzbdbsearch(seriesname,issue,comicid=None): + myDB = db.DBConnection() + seriesname_alt = None + if comicid is None or comicid == 'None': + pass + else: + snm = myDB.action("SELECT * FROM comics WHERE comicid=?", [comicid]).fetchone() + if snm is None: + logger.info("Invalid ComicID of " + str(comicid) + ". Aborting search.") + return + else: + seriesname = snm['ComicName'] + seriesname_alt = snm['AlternateSearch'] + + + nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\s]', '%',seriesname) + formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname) + nsearch = nsearch_seriesname + "%" + nresults = myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'comicBT' AND Site != 'KAT'", [nsearch]) + if nresults is None: + logger.fdebug("nzb search returned no results for " + seriesname) + if seriesname_alt is None: + logger.fdebug("no nzb Alternate name given. Aborting search.") + return "no results" + else: + chkthealt = seriesname_alt.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + nresults += myDB.action("SELECT * FROM rssdb WHERE Title like ? AND Site != 'comicBT' AND Site != 'KAT'", [AS_Alternate]) + if nresults is None: + logger.fdebug("nzb alternate name search returned no results.") + return "no results" + + nzbtheinfo = [] + nzbinfo = {} + + for nzb in nresults: + # no need to parse here, just compile and throw it back .... + nzbtheinfo.append({ + 'title': nzb['Title'], + 'link': nzb['Link'], + 'pubdate': nzb['Pubdate'], + 'site': nzb['Site'], + 'length': nzb['Size'] + }) + logger.fdebug("entered info for " + nzb['Title']) + + nzbinfo['entries'] = nzbtheinfo + return nzbinfo + +def torsend2client(seriesname, linkit, site): + logger.info("matched on " + str(seriesname)) + filename = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname) + if site == 'ComicBT': + logger.info(linkit) + linkit = str(linkit) + '&passkey=' + str(mylar.CBT_PASSKEY) + + if linkit[-7:] != "torrent": + filename += ".torrent" + + request = urllib2.Request(linkit) + request.add_header('User-Agent', str(mylar.USER_AGENT)) + if mylar.TORRENT_LOCAL and mylar.LOCAL_WATCHDIR is not None: + filepath = os.path.join(mylar.LOCAL_WATCHDIR, filename) + logger.fdebug("filename for torrent set to : " + filepath) + elif mylar.TORRENT_SEEDBOX and mylar.SEEDBOX_WATCHDIR is not None: + filepath = os.path.join(mylar.CACHE_DIR, filename) + logger.fdebug("filename for torrent set to : " + filepath) + else: + logger.error("No Local Watch Directory or Seedbox Watch Directory specified. Set it and try again.") + return "fail" + + try: + opener = helpers.urlretrieve(urllib2.urlopen(request), filepath) + except Exception, e: + logger.warn('Error fetching data from %s: %s' % (site, e)) + return "fail" + + logger.fdebug("torrent file saved as : " + str(filepath)) + if mylar.TORRENT_LOCAL: + return "pass" + #remote_file = urllib2.urlopen(linkit) + #if linkit[-7:] != "torrent": + # filename += ".torrent" + + #local_file = open('%s' % (os.path.join(mylar.CACHE_DIR,filename)), 'w') + #local_file.write(remote_file.read()) + #local_file.close() + #remote_file.close() + elif mylar.TORRENT_SEEDBOX: + tssh = ftpsshup.putfile(filepath,filename) + return tssh + +if __name__ == '__main__': + #torrents(sys.argv[1]) + #torrentdbsearch(sys.argv[1], sys.argv[2], sys.argv[3]) + nzbs(sys.argv[1]) diff --git a/mylar/search.py b/mylar/search.py index 78b67312..e6f4d731 100755 --- a/mylar/search.py +++ b/mylar/search.py @@ -1,3 +1,4 @@ + # This file is part of Mylar. # # Mylar is free software: you can redistribute it and/or modify @@ -16,7 +17,7 @@ from __future__ import division import mylar -from mylar import logger, db, updater, helpers, parseit, findcomicfeed, prov_nzbx, notifiers +from mylar import logger, db, updater, helpers, parseit, findcomicfeed, prov_nzbx, notifiers, rsscheck nzbsu_APIkey = mylar.NZBSU_APIKEY dognzb_APIkey = mylar.DOGNZB_APIKEY @@ -36,7 +37,7 @@ from xml.dom.minidom import parseString import urllib2 from datetime import datetime -def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None): +def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None): if ComicYear == None: ComicYear = '2013' else: ComicYear = str(ComicYear)[:4] @@ -57,6 +58,16 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI print ("Story-ARC issue!") print ("Story-ARC: " + str(SARC)) print ("IssueArcID: " + str(IssueArcID)) + + torprovider = [] + torp = 0 + logger.fdebug("Checking for torrent enabled.") + if mylar.ENABLE_TORRENTS and mylar.ENABLE_TORRENT_SEARCH: + if mylar.ENABLE_CBT: + torprovider.append('cbt') + torp+=1 + #print torprovider[0] + ##nzb provider selection## ##'dognzb' or 'nzb.su' or 'experimental' nzbprovider = [] @@ -83,22 +94,25 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI if mylar.NEWZNAB == 1: for newznab_host in mylar.EXTRA_NEWZNABS: - if newznab_host[2] == '1' or newznab_host[2] == 1: + if newznab_host[3] == '1' or newznab_host[3] == 1: newznab_hosts.append(newznab_host) - try: - if newznab_host[3] is None: - nzbprovider.append('newznab') - else: - nzbprovider.append(newznab_host[3]) - except: + if newznab_host[0] == newznab_host[1]: nzbprovider.append('newznab') - logger.error("newznab name not given for " + str(newznab_host[0]) + ". Defaulting name to newznab.") + else: + nzbprovider.append('newznab:' + str(newznab_host[0])) +# except: +# nzbprovider.append('newznab') +# logger.error("newznab name not given for " + str(newznab_host[0]) + ". Defaulting name to newznab.") newznabs+=1 - logger.fdebug("newznab host:" + str(newznab_host[0]) + " - enabled: " + str(newznab_host[2])) + logger.fdebug("newznab name:" + str(newznab_host[0]) + " @ " + str(newznab_host[1])) # -------- + logger.fdebug("there are : " + str(torp) + " torrent providers you have selected.") + torpr = torp - 1 + if torpr < 0: + torpr = -1 providercount = int(nzbp + newznabs) logger.fdebug("there are : " + str(providercount) + " search providers you have selected.") logger.fdebug("Usenet Retention : " + str(mylar.USENET_RETENTION) + " days") @@ -114,97 +128,159 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI else: IssDateFix = "no" - while (nzbpr >= 0 ): - if nzbprovider[nzbpr] == 'newznab': - #this is for newznab - nzbprov = 'newznab' - for newznab_host in newznab_hosts: - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - logger.fdebug("findit = found!") - break - else: - if AlternateSearch is not None and AlternateSearch != "None": - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear)) - findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID) + while (torpr >=0 ): + if torprovider[torpr] == 'cbt': + # ComicBT + torprov = 'ComicBT' + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, torprov, torpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) + if findit == 'yes': + logger.fdebug("findit = found!") + break + else: + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + loopit = len(chkthealt) + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, torprov, torp, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) if findit == 'yes': break - nzbpr-=1 - elif nzbprovider[nzbpr] == 'experimental': - #this is for experimental - nzbprov = 'experimental' - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - logger.fdebug("findit = found!") - break + torpr-=1 + + if findit == 'yes': return findit, torprov + + searchcnt = 0 + nzbprov = None + if rsscheck: + searchcnt = 1 # rss-only + else: + searchcnt = 2 # rss first, then api on non-matches + + i = 1 + nzbsrchproviders = nzbpr + + while ( i <= searchcnt ): + #searchmodes: + # rss - will run through the built-cached db of entries + # api - will run through the providers via api (or non-api in the case of Experimental) + # the trick is if the search is done during an rss compare, it needs to exit when done. + # otherwise, the order of operations is rss feed check first, followed by api on non-results. + + if i == 1: searchmode = 'rss' #order of ops - this will be used first. + elif i == 2: searchmode = 'api' + + nzbpr = nzbsrchproviders + logger.fdebug("Initiating Search via : " + str(searchmode)) + + while (nzbpr >= 0 ): + if 'newznab' in nzbprovider[nzbpr]: + #this is for newznab + nzbprov = 'newznab' + for newznab_host in newznab_hosts: + #if it's rss - search both seriesname/alternates via rss then return. + if searchmode == 'rss': + if mylar.ENABLE_RSS: + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes") + if findit == 'yes': + logger.fdebug("Found via RSS.") + break + #findit = altdefine(AlternateSearch, searchmode='rss') + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + loopit = len(chkthealt) + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes") + if findit == 'yes': + break + if findit == 'yes': + logger.fdebug("Found via RSS Alternate Naming.") + break + else: + logger.fdebug("RSS search not enabled - using API only (Enable in the Configuration)") + break + else: + #normal api-search here. + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID) + if findit == 'yes': + logger.fdebug("Found via API.") + break + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + loopit = len(chkthealt) + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID) + if findit == 'yes': + break + if findit == 'yes': + logger.fdebug("Found via API Alternate Naming.") + break else: - if AlternateSearch is not None and AlternateSearch != "None": - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear)) - findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': + nzbprov = nzbprovider[nzbpr] + if searchmode == 'rss': + if mylar.ENABLE_RSS: + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS='yes') + if findit == 'yes': + logger.fdebug("Found via RSS on " + nzbprov) + break + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + loopit = len(chkthealt) + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes") + if findit == 'yes': + logger.fdebug("Found via RSS Alternate Naming on " + nzbprov) + break + else: + logger.fdebug("RSS search not enabled - using API only (Enable in the Configuration)") break - + else: + #normal api-search here. + findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID) + if findit == 'yes': + logger.fdebug("Found via API on " + nzbprov) + break + if AlternateSearch is not None and AlternateSearch != "None": + chkthealt = AlternateSearch.split('##') + if chkthealt == 0: + AS_Alternate = AlternateSearch + for calt in chkthealt: + AS_Alternate = re.sub('##','',calt) + logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate)) + findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion, SARC=SARC, IssueArcID=IssueArcID) + if findit == 'yes': + break + if findit == 'yes': + logger.fdebug("Found via API Alternate Naming on " + nzbprov) + break nzbpr-=1 - - elif nzbprovider[nzbpr] == 'nzbx': - # this is for nzbx.co - nzbprov = 'nzbx' - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - logger.fdebug("findit = found!") - break + if nzbpr >= 0 and findit != 'yes': + logger.info(u"More than one search provider given - trying next one.") else: - if AlternateSearch is not None and AlternateSearch != "None": - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear)) - findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - break - - nzbpr-=1 - - elif nzbprovider[nzbpr] == 'nzb.su': - # this is for nzb.su - nzbprov = 'nzb.su' - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - logger.fdebug("findit = found!") break - else: - if AlternateSearch is not None and AlternateSearch != "None": - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear)) - findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - break - - nzbpr-=1 - - # ---- - - elif nzbprovider[nzbpr] == 'dognzb': - # this is for dognzb.com - nzbprov = 'dognzb' - findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - - if findit == 'yes': - logger.fdebug("findit = found!") - break - else: - if AlternateSearch is not None and AlternateSearch != "None": - logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AlternateSearch) + " " + str(ComicYear)) - findit = NZB_SEARCH(AlternateSearch, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID) - if findit == 'yes': - break - - nzbpr-=1 - - if nzbpr >= 0 and findit != 'yes': - logger.info(u"More than one search provider given - trying next one.") - # ---- if findit == 'yes': return findit, nzbprov + else: + logger.fdebug("Finished searching via : " + str(searchmode)) + i+=1 + return findit, nzbprov -def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None): +def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None): if nzbprov == 'nzb.su': apikey = mylar.NZBSU_APIKEY @@ -215,11 +291,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is elif nzbprov == 'experimental': apikey = 'none' elif nzbprov == 'newznab': - host_newznab = newznab_host[0].rstrip() - apikey = newznab_host[1].rstrip() - logger.fdebug("using Newznab host of : " + str(host_newznab)) + #updated to include Newznab Name now + name_newznab = newznab_host[0].rstrip() + host_newznab = newznab_host[1].rstrip() + apikey = newznab_host[2].rstrip() + logger.fdebug("using Newznab host of : " + str(name_newznab)) - logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " (" + str(ComicYear) + ") using " + str(nzbprov)) + if RSS == "yes": + tmpprov = str(nzbprov) + " [RSS]" + else: + tmpprov = nzbprov + logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " (" + str(ComicYear) + ") using " + str(tmpprov)) if mylar.PREFERRED_QUALITY == 0: filetype = "" @@ -309,11 +391,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is findcomiciss.append(iss) #print ("we need : " + str(findcomic[findcount]) + " issue: #" + str(findcomiciss[findcount])) + cm1 = re.sub("[\/]", " ", str(findcomic[findcount])) # replace whitespace in comic name with %20 for api search - cm1 = re.sub(" ", "%20", str(findcomic[findcount])) #cm = re.sub("\&", "%26", str(cm1)) - cm = re.sub("\\band\\b", "", str(cm1)) # remove 'and' & '&' from the search pattern entirely (broader results, will filter out later) + cm = re.sub("\\band\\b", "", cm1.lower()) # remove 'and' & '&' from the search pattern entirely (broader results, will filter out later) cm = re.sub("\\bthe\\b", "", cm.lower()) # remove 'the' from the search pattern to accomodate naming differences + cm = re.sub(" ", "%20", str(cm)) cm = re.sub("[\&\:\?\,]", "", str(cm)) #print (cmi) if '.' in findcomiciss[findcount]: @@ -356,77 +439,103 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is # here we account for issue pattern variations if cmloopit == 3: comsearch[findloop] = comsrc + "%2000" + isssearch[findloop] + "%20" + str(issue_exc) + "%20" + str(filetype) + issdig = '00' elif cmloopit == 2: comsearch[findloop] = comsrc + "%200" + isssearch[findloop] + "%20" + str(issue_exc) + "%20" + str(filetype) + issdig = '0' elif cmloopit == 1: comsearch[findloop] = comsrc + "%20" + isssearch[findloop] + "%20" + str(issue_exc) + "%20" + str(filetype) + issdig = '' #logger.fdebug("comsearch: " + str(comsearch)) #logger.fdebug("cmloopit: " + str(cmloopit)) #logger.fdebug("done: " + str(done)) - if nzbprov != 'experimental': - if nzbprov == 'dognzb': - findurl = "http://dognzb.cr/api?t=search&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" - elif nzbprov == 'nzb.su': - findurl = "https://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" - elif nzbprov == 'newznab': - #let's make sure the host has a '/' at the end, if not add it. - if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/': - host_newznab_fix = str(host_newznab) + "/" - else: host_newznab_fix = host_newznab - findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" - elif nzbprov == 'nzbx': - bb = prov_nzbx.searchit(comsearch[findloop]) - if nzbprov != 'nzbx': - # helper function to replace apikey here so we avoid logging it ;) - findurl = findurl + "&apikey=" + str(apikey) - logsearch = helpers.apiremove(str(findurl),'nzb') - logger.fdebug("search-url: " + str(logsearch)) + #--- this is basically for RSS Feeds --- + if RSS == "yes": + if nzbprov == 'ComicBT': + cmname = re.sub("%20", " ", str(comsrc)) + logger.fdebug("Sending request to [ComicBT] RSS for " + str(cmname) + " : " + str(issdig) + str(isssearch[findloop])) + bb = rsscheck.torrentdbsearch(cmname,isssearch[findloop]) + rss = "yes" + if bb is not None: logger.fdebug("bb results: " + str(bb)) + else: + cmname = re.sub("%20", " ", str(comsrc)) + logger.fdebug("Sending request to RSS for " + str(cmname) + " : " + str(issdig) + str(isssearch[findloop])) + bb = rsscheck.nzbdbsearch(cmname,isssearch[findloop]) + rss = "yes" + if bb is not None: logger.fdebug("bb results: " + str(bb)) + else: + #CBT is redudant now - just getting it ready for when it's not redudant :) + if nzbprov == 'ComicBT': + cmname = re.sub("%20", " ", str(comsrc)) + logger.fdebug("Sending request to [ComicBT] RSS for " + str(cmname) + " : " + str(issdig) + str(isssearch[findloop])) + bb = rsscheck.torrentdbsearch(cmname,isssearch[findloop]) + rss = "yes" + if bb is not None: logger.fdebug("results: " + str(bb)) - ### IF USENET_RETENTION is set, honour it - ### For newznab sites, that means appending "&maxage=" on the URL - if mylar.USENET_RETENTION != None: - findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION) + elif nzbprov != 'experimental': + if nzbprov == 'dognzb': + findurl = "http://dognzb.cr/api?t=search&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" + elif nzbprov == 'nzb.su': + findurl = "https://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" + elif nzbprov == 'newznab': + #let's make sure the host has a '/' at the end, if not add it. + if host_newznab[len(host_newznab)-1:len(host_newznab)] != '/': + host_newznab_fix = str(host_newznab) + "/" + else: host_newznab_fix = host_newznab + findurl = str(host_newznab_fix) + "api?t=search&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" + elif nzbprov == 'nzbx': + bb = prov_nzbx.searchit(comsearch[findloop]) + if nzbprov != 'nzbx': + # helper function to replace apikey here so we avoid logging it ;) + findurl = findurl + "&apikey=" + str(apikey) + logsearch = helpers.apiremove(str(findurl),'nzb') + logger.fdebug("search-url: " + str(logsearch)) - # Add a user-agent - #print ("user-agent:" + str(mylar.USER_AGENT)) - request = urllib2.Request(findurl) - request.add_header('User-Agent', str(mylar.USER_AGENT)) - opener = urllib2.build_opener() + ### IF USENET_RETENTION is set, honour it + ### For newznab sites, that means appending "&maxage=" on the URL + if mylar.USENET_RETENTION != None: + findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION) - #set a delay between searches here. Default is for 30 seconds... - if mylar.SEARCH_DELAY == 'None' or mylar.SEARCH_DELAY is None: - pause_the_search = 1 * 60 # (it's in seconds) - elif str(mylar.SEARCH_DELAY).isdigit(): - pause_the_search = mylar.SEARCH_DELAY * 60 - else: - logger.info("Check Search Delay - invalid numerical given. Force-setting to 1 minute.") - pause_the_search = 1 * 60 + # Add a user-agent + #print ("user-agent:" + str(mylar.USER_AGENT)) + request = urllib2.Request(findurl) + request.add_header('User-Agent', str(mylar.USER_AGENT)) + opener = urllib2.build_opener() - #bypass for local newznabs - if nzbprov == 'newznab': - if host_newznab_fix[:3] == '10.' or host_newznab_fix[:4] == '172.' or host_newznab_fix[:4] == '192.' or 'localhost' in str(host_newznab_fix): - pass + #set a delay between searches here. Default is for 30 seconds... + if mylar.SEARCH_DELAY == 'None' or mylar.SEARCH_DELAY is None: + pause_the_search = 1 * 60 # (it's in seconds) + elif str(mylar.SEARCH_DELAY).isdigit(): + pause_the_search = mylar.SEARCH_DELAY * 60 else: - logger.fdebug("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering") - time.sleep(pause_the_search) + logger.info("Check Search Delay - invalid numerical given. Force-setting to 1 minute.") + pause_the_search = 1 * 60 - try: - data = opener.open(request).read() - except Exception, e: - logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) - data = False + #bypass for local newznabs + if nzbprov == 'newznab': + if host_newznab_fix[:3] == '10.' or host_newznab_fix[:4] == '172.' or host_newznab_fix[:4] == '192.' or 'localhost' in str(host_newznab_fix): + pass + else: + logger.fdebug("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering") + time.sleep(pause_the_search) - if data: - bb = feedparser.parse(data) - else: - bb = "no results" + try: + data = opener.open(request).read() + except Exception, e: + logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) + data = False - elif nzbprov == 'experimental': - #bb = parseit.MysterBinScrape(comsearch[findloop], comyear) - bb = findcomicfeed.Startit(u_ComicName, isssearch[findloop], comyear, ComicVersion) - # since the regexs in findcomicfeed do the 3 loops, lets force the exit after - cmloopit == 1 + if data: + bb = feedparser.parse(data) + else: + bb = "no results" + + elif nzbprov == 'experimental': + #bb = parseit.MysterBinScrape(comsearch[findloop], comyear) + bb = findcomicfeed.Startit(u_ComicName, isssearch[findloop], comyear, ComicVersion) + # since the regexs in findcomicfeed do the 3 loops, lets force the exit after + cmloopit == 1 done = False foundc = "no" @@ -436,11 +545,18 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is foundc = "no" else: for entry in bb['entries']: - logger.fdebug("checking search result: " + str(entry['title'])) - if nzbprov != "experimental": - #Experimental already has size constraints done. - tmpsz = entry.enclosures[0] - comsize_b = tmpsz['length'] + logger.fdebug("checking search result: " + entry['title']) + if nzbprov != "experimental" and nzbprov != "ComicBT": + if RSS == "yes": + comsize_b = entry['length'] + else: + #Experimental already has size constraints done. + if nzbprov == 'ComicBT': + comsize_b = 0 #CBT rss doesn't have sizes + else: + tmpsz = entry.enclosures[0] + comsize_b = tmpsz['length'] + if comsize_b is None: comsize_b = 0 comsize_m = helpers.human_size(comsize_b) logger.fdebug("size given as: " + str(comsize_m)) #----size constraints. @@ -461,10 +577,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is # -- end size constaints. - thisentry = str(entry['title']) - logger.fdebug("Entry: " + str(thisentry)) - cleantitle = re.sub('[_/.]', ' ', str(entry['title'])) - cleantitle = helpers.cleanName(str(cleantitle)) + thisentry = entry['title'] + logger.fdebug("Entry: " + thisentry) + cleantitle = re.sub('[_/.]', ' ', entry['title']) + cleantitle = helpers.cleanName(cleantitle) # this is new - if title contains a '&' in the title it will assume the filename has ended at that point # which causes false positives (ie. wolverine & the x-men becomes the x-men, which matches on x-men. # 'the' is removed for comparisons later on @@ -472,6 +588,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is nzbname = cleantitle + # if it's coming from CBT, remove the ' -' at the end as it screws it up. + if nzbprov == 'ComicBT': + if cleantitle.endswith(' - '): + cleantitle = cleantitle[:-3] + logger.fdebug("cleaned up title to : " + str(cleantitle)) + # if there are no () in the string, try to add them if it looks like a year (19xx or 20xx) if len(re.findall('[^()]+', cleantitle)): @@ -481,10 +603,34 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is #adjust for covers only by removing them entirely... logger.fdebug("Cleantitle: " + str(cleantitle)) + vers4year = "no" + vers4vol = "no" if len(re.findall('[^()]+', cleantitle)) == 1 or 'cover only' in cleantitle.lower(): - logger.fdebug("invalid nzb and/or cover only - skipping.") - cleantitle = "abcdefghijk 0 (1901).cbz" - continue + #some sites don't have (2013) or whatever..just v2 / v2013. Let's adjust: + ctchk = cleantitle.split() + for ct in ctchk: + if 'v' in ct.lower() and ct[1:].isdigit(): + logger.fdebug("possible versioning..checking") + #we hit a versioning # - account for it + if ct[1:].isdigit(): + if len(ct[1:]) == 4: #v2013 + logger.fdebug("Version detected as " + str(ct)) + vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v + #cleantitle = re.sub(ct, "(" + str(vers4year) + ")", cleantitle) + #logger.fdebug("volumized cleantitle : " + cleantitle) + break + elif len(ct[1:]) == 1: #v2 + logger.fdebug("Version detected as " + str(ct)) + vers4vol = str(ct) + break + else: + logger.fdebug("error - unknown length for : " + str(ct)) + cleantitle = "abcdefghijk 0 (1901).cbz" + break + if vers4year == "no" and vers4vol == "no": + logger.fdebug("invalid nzb and/or cover only - skipping.") + cleantitle = "abcdefghijk 0 (1901).cbz" + continue if done: break @@ -513,7 +659,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is comic_andiss = m[cnt] logger.fdebug("Comic: " + str(comic_andiss)) logger.fdebug("UseFuzzy is : " + str(UseFuzzy)) - if UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no": + if vers4vol != "no" or vers4year != "no": + logger.fdebug("Year not given properly formatted but Version detected.Bypassing Year Match.") + yearmatch = "true" + elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no": if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': logger.fdebug("year detected: " + str(m[cnt])) result_comyear = m[cnt] @@ -590,8 +739,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is findcomic_chksplit = re.sub('[\-\:\,\.\?]', ' ', findcomic[findloop]) chg_comic = re.sub('[\s]', '', chg_comic) findcomic_chksplit = re.sub('[\s]', '', findcomic_chksplit) - print chg_comic.upper() - print findcomic_chksplit.upper() + #print chg_comic.upper() + #print findcomic_chksplit.upper() if chg_comic.upper() == findcomic_chksplit.upper(): logger.fdebug("series contains numerics...adjusting..") else: @@ -609,6 +758,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is #else: splitst = len(splitit) - 1 # make sure that things like - in watchcomic are accounted for when comparing to nzb. + findcomic[findloop] = re.sub('[\/]', ' ', findcomic[findloop]) watchcomic_split = helpers.cleanName(str(findcomic[findloop])) if '&' in watchcomic_split: watchcomic_split = re.sub('[/&]','and', watchcomic_split) watchcomic_nonsplit = re.sub('[\-\:\,\.\?]', ' ', watchcomic_split) @@ -632,20 +782,29 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is initialchk = 'ok' if (splitst) != len(watchcomic_split): logger.fdebug("incorrect comic lengths...not a match") - if str(splitit[0]).lower() == "the" or str(watchcomic_split[0]).lower() == "the": - if str(splitit[0]).lower() == "the": - logger.fdebug("THE word detected...attempting to adjust pattern matching") - #print comic_iss - #print comic_iss[4:] - splitit = comic_iss_b4[4:].split(None) - #splitit = splitit[4:] + #because the word 'the' can appear anywhere and really mess up matches... +# if str(splitit[0]).lower() == "the" or str(watchcomic_split[0]).lower() == "the": +# if str(splitit[0]).lower() == "the": + for tstsplit in splitit: + if tstsplit.lower == 'the': + logger.fdebug("THE word detected in found comic...attempting to adjust pattern matching") + #print comic_iss_b4 + #print comic_iss_b4[4:] + #splitit = comic_iss_b4[4:].split(None) + cissb4this = re.sub("\\bthe\\b", "", comic_iss_b4) + splitit = cissb4this.split(None) splitst = splitst - 1 #remove 'the' from start - logger.fdebug("comic is now : " + str(comic_iss_b4[4:])) - if str(watchcomic_split[0]).lower() == "the": - wtstart = watchcomic_nonsplit[4:] + logger.fdebug("comic is now : " + str(splitit))#str(comic_iss[4:])) + #if str(watchcomic_split[0]).lower() == "the": + for tstsplit in watchcomic_split: + if tstsplit.lower() == 'the': + logger.fdebug("THE word detected in watchcomic - attempting to adjust match.") + #wtstart = watchcomic_nonsplit[4:] + #watchcomic_split = wtstart.split(None) + wtstart = re.sub("\\bthe\\b", "", watchcomic_nonsplit) watchcomic_split = wtstart.split(None) logger.fdebug("new watchcomic string:" + str(watchcomic_split)) - initialchk = 'no' + initialchk = 'no' else: initialchk = 'ok' @@ -660,6 +819,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is if cvers == "true": splitst = splitst + 1 while ( n <= (splitst)-1 ): logger.fdebug("splitit: " + str(splitit[n])) + logger.fdebug("scount : " + str(scount)) if n < (splitst) and n < len(watchcomic_split): logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n])) if '+' in watchcomic_split[n]: @@ -677,11 +837,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is logger.fdebug("watch comicversion is " + str(ComicVersion)) fndcomicversion = str(splitit[n]) logger.fdebug("version found: " + str(fndcomicversion)) - if ComicVersion != "None" and ComicVersion is not None: + if vers4year != "no" or vers4vol != "no": + if ComicVersion != "None" and ComicVersion is not None: + D_ComicVersion = re.sub("[^0-9]", "", ComicVersion) + else: + D_ComicVersion = 0 + F_ComicVersion = re.sub("[^0-9]", "", fndcomicversion) - D_ComicVersion = re.sub("[^0-9]", "", ComicVersion) - if int(F_ComicVersion) == int(D_ComicVersion): - logger.fdebug("We matched on versions...") + S_ComicVersion = str(SeriesYear) + + if int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion): + logger.fdebug("We matched on versions..." + str(fndcomicversion)) scount+=1 else: logger.fdebug("Versions wrong. Ignoring possible match.") @@ -757,7 +923,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is #issue comparison now as well if int(intIss) == int(comintIss): logger.fdebug('issues match!') - logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov) ) + logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(tmpprov) ) ## -- inherit issue. Comic year is non-standard. nzb year is the year ## -- comic was printed, not the start year of the comic series and ## -- thus the deciding component if matches are correct or not @@ -789,7 +955,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is request = urllib2.Request(linkapi) #(str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) request.add_header('User-Agent', str(mylar.USER_AGENT)) try: - opener = urlretrieve(urllib2.urlopen(request), str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) + opener = helpers.urlretrieve(urllib2.urlopen(request), str(mylar.BLACKHOLE_DIR) + str(filenamenzb)) except Exception, e: logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) return @@ -803,7 +969,24 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is nzbname = re.sub(str(ext), '', str(filenamenzb)) logger.fdebug("nzb name to be used for post-processing is : " + str(nzbname)) #end blackhole + elif nzbprov == 'ComicBT': + logger.fdebug("sending .torrent to watchdir.") + logger.fdebug("ComicName:" + ComicName) + logger.fdebug("link:" + entry['link']) + logger.fdebug("Torrent Provider:" + nzbprov) + foundc = "yes" + #let's change all space to decimals for simplicity + nzbname = re.sub(" ", ".", str(entry['title'])) + #gotta replace & or escape it + nzbname = re.sub("\&", 'and', str(nzbname)) + nzbname = re.sub('[\,\:\?]', '', str(nzbname)) + if nzbname.lower().endswith('.torrent'): + nzbname = re.sub('.torrent', '', nzbname) + rcheck = rsscheck.torsend2client(ComicName, entry['link'], nzbprov) + if rcheck == "fail": + logger.error("Unable to send torrent - check logs and settings.") + return else: tmppath = mylar.CACHE_DIR if os.path.exists(tmppath): @@ -899,18 +1082,18 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is logger.info(u"Successfully sent nzb file to SABnzbd") - if mylar.PROWL_ENABLED and mylar.PROWL_ONSNATCH: - logger.info(u"Sending Prowl notification") - prowl = notifiers.PROWL() - prowl.notify(nzbname,"Download started") - if mylar.NMA_ENABLED and mylar.NMA_ONSNATCH: - logger.info(u"Sending NMA notification") - nma = notifiers.NMA() - nma.notify(snatched_nzb=nzbname) - if mylar.PUSHOVER_ENABLED and mylar.PUSHOVER_ONSNATCH: - logger.info(u"Sending Pushover notification") - pushover = notifiers.PUSHOVER() - pushover.notify(nzbname,"Download started") + if mylar.PROWL_ENABLED and mylar.PROWL_ONSNATCH: + logger.info(u"Sending Prowl notification") + prowl = notifiers.PROWL() + prowl.notify(nzbname,"Download started") + if mylar.NMA_ENABLED and mylar.NMA_ONSNATCH: + logger.info(u"Sending NMA notification") + nma = notifiers.NMA() + nma.notify(snatched_nzb=nzbname) + if mylar.PUSHOVER_ENABLED and mylar.PUSHOVER_ONSNATCH: + logger.info(u"Sending Pushover notification") + pushover = notifiers.PUSHOVER() + pushover.notify(nzbname,"Download started") foundc = "yes" done = True @@ -932,25 +1115,49 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is return foundc elif foundc == "no" and nzbpr == 0: foundcomic.append("no") - logger.fdebug("couldn't find a matching comic") + logger.fdebug("couldn't find a matching comic using " + str(tmpprov)) if IssDateFix == "no": logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + ComicName + "(" + str(comyear) + "). Status kept as wanted." ) break return foundc -def searchforissue(issueid=None, new=False): +def searchforissue(issueid=None, new=False, rsscheck=None): myDB = db.DBConnection() - if not issueid: + if not issueid or rsscheck: - logger.info(u"Initiating NZB Search scan at requested interval of " + str(mylar.SEARCH_INTERVAL) + " minutes.") + if rsscheck: + logger.info(u"Initiating RSS Search Scan at scheduled interval of " + str(mylar.RSS_CHECKINTERVAL) + " minutes.") + else: + logger.info(u"Initiating NZB Search scan at requested interval of " + str(mylar.SEARCH_INTERVAL) + " minutes.") myDB = db.DBConnection() - results = myDB.select('SELECT * from issues WHERE Status="Wanted"') + stloop = 1 + results = [] - # annuals include here... - #results += myDB.select('SELECT * from annuals WHERE Status="Wanted"') + if mylar.ANNUALS_ON: + stloop+=1 + while (stloop > 0): + if stloop == 1: + issues_1 = myDB.select('SELECT * from issues WHERE Status="Wanted"') + for iss in issues_1: + results.append({'ComicID': iss['ComicID'], + 'IssueID': iss['IssueID'], + 'Issue_Number': iss['Issue_Number'], + 'IssueDate': iss['IssueDate'], + 'mode': 'want' + }) + elif stloop == 2: + issues_2 = myDB.select('SELECT * from annuals WHERE Status="Wanted"') + for iss in issues_2: + results.append({'ComicID': iss['ComicID'], + 'IssueID': iss['IssueID'], + 'Issue_Number': iss['Issue_Number'], + 'IssueDate': iss['IssueDate'], + 'mode': 'want_ann' + }) + stloop-=1 new = True @@ -963,15 +1170,15 @@ def searchforissue(issueid=None, new=False): UseFuzzy = comic['UseFuzzy'] ComicVersion = comic['ComicVersion'] if result['IssueDate'] == None: - IssueYear = comic['ComicYear'] + ComicYear = comic['ComicYear'] else: - IssueYear = str(result['IssueDate'])[:4] - - if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.USE_SABNZBD or mylar.USE_NZBGET): - foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode) + ComicYear = str(result['IssueDate'])[:4] + mode = result['mode'] + if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX or mylar.ENABLE_KAT or mylar.ENABLE_CBT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS): + foundNZB, prov = search_init(comic['ComicName'], result['Issue_Number'], str(ComicYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, rsscheck=rsscheck) if foundNZB == "yes": #print ("found!") - updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode) + updater.foundsearch(result['ComicID'], result['IssueID'], mode=mode, provider=prov) else: pass #print ("not found!") @@ -981,6 +1188,9 @@ def searchforissue(issueid=None, new=False): if result is None: result = myDB.action('SELECT * FROM annuals where IssueID=?', [issueid]).fetchone() mode = 'want_ann' + if result is None: + logger.info("Unable to locate IssueID - you probably should delete/refresh the series.") + return ComicID = result['ComicID'] comic = myDB.action('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() SeriesYear = comic['ComicYear'] @@ -995,10 +1205,10 @@ def searchforissue(issueid=None, new=False): foundNZB = "none" if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.USE_SABNZBD or mylar.USE_NZBGET): - foundNZB = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, mode=mode) + foundNZB, prov = search_init(result['ComicName'], result['Issue_Number'], str(IssueYear), comic['ComicYear'], IssueDate, result['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, mode=mode) if foundNZB == "yes": logger.fdebug("I found " + result['ComicName'] + ' #:' + str(result['Issue_Number'])) - updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'], mode=mode) + updater.foundsearch(ComicID=result['ComicID'], IssueID=result['IssueID'], mode=mode, provider=prov) else: pass #print ("not found!") @@ -1012,6 +1222,9 @@ def searchIssueIDList(issuelist): if issue is None: issue = myDB.action('SELECT * from annuals WHERE IssueID=?', [issueid]).fetchone() mode = 'want_ann' + if issue is None: + logger.info("unable to determine IssueID - perhaps you need to delete/refresh series?") + break comic = myDB.action('SELECT * from comics WHERE ComicID=?', [issue['ComicID']]).fetchone() print ("Checking for issue: " + str(issue['Issue_Number'])) foundNZB = "none" @@ -1023,22 +1236,12 @@ def searchIssueIDList(issuelist): IssueYear = comic['ComicYear'] else: IssueYear = str(issue['IssueDate'])[:4] - if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.USE_SABNZBD or mylar.USE_NZBGET): - foundNZB = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], issue['IssueDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, mode=mode) + if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX or mylar.ENABLE_CBT or mylar.ENABLE_KAT) and (mylar.USE_SABNZBD or mylar.USE_NZBGET or mylar.ENABLE_TORRENTS): + foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], issue['IssueDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode) if foundNZB == "yes": #print ("found!") - updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode) + updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov) else: pass #print ("not found!") -def urlretrieve(urlfile, fpath): - chunk = 4096 - f = open(fpath, "w") - while 1: - data = urlfile.read(chunk) - if not data: - print "done." - break - f.write(data) - print "Read %s bytes"%len(data) diff --git a/mylar/updater.py b/mylar/updater.py index 862743ce..4709c58a 100755 --- a/mylar/updater.py +++ b/mylar/updater.py @@ -27,7 +27,7 @@ from mylar import db, logger, helpers, filechecker def dbUpdate(ComicIDList=None): myDB = db.DBConnection() - print "comicidlist:" + str(ComicIDList) + #print "comicidlist:" + str(ComicIDList) if ComicIDList is None: comiclist = myDB.select('SELECT ComicID, ComicName from comics WHERE Status="Active" or Status="Loading" order by LastUpdated ASC') else: @@ -37,7 +37,6 @@ def dbUpdate(ComicIDList=None): logger.info('Starting update for %i active comics' % len(comiclist)) for comic in comiclist: - print "comic" + comic if ComicIDList is None: comicid = comic[0] else: @@ -61,21 +60,37 @@ def dbUpdate(ComicIDList=None): #in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clean refresh. logger.fdebug("Gathering the status of all issues for the series.") issues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [comicid]) + if mylar.ANNUALS_ON: + issues += myDB.select('SELECT * FROM annuals WHERE ComicID=?', [comicid]) #store the issues' status for a given comicid, after deleting and readding, flip the status back to what it is currently. logger.fdebug("Deleting all issue data.") myDB.select('DELETE FROM issues WHERE ComicID=?', [comicid]) + myDB.select('DELETE FROM annuals WHERE ComicID=?', [comicid]) logger.fdebug("Refreshing the series and pulling in new data using only CV.") mylar.importer.addComictoDB(comicid,mismatch) issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [comicid]) + annuals = [] + ann_list = [] + if mylar.ANNUALS_ON: + annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) + ann_list += annuals_list + issues_new += annuals_list + icount = 0 logger.fdebug("Attempting to put the Status' back how they were.") for issue in issues: for issuenew in issues_new: - if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: + if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: + #if the status is now Downloaded, keep status. + if issuenew['Status'] == 'Downloaded': break #change the status to the previous status ctrlVAL = {'IssueID': issue['IssueID']} newVAL = {'Status': issue['Status']} - myDB.upsert("Issues", newVAL, ctrlVAL) + if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): + logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number'])) + myDB.upsert("Annuals", newVAL, ctrlVAL) + else: + myDB.upsert("Issues", newVAL, ctrlVAL) icount+=1 break logger.info("In converting data to CV only, I changed the status of " + str(icount) + " issues.") @@ -97,10 +112,16 @@ def latest_update(ComicID, LatestIssue, LatestDate): def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None): # here we add to upcoming table... myDB = db.DBConnection() - + dspComicName = ComicName #to make sure that the word 'annual' will be displayed on screen + if 'annual' in ComicName.lower(): + adjComicName = re.sub("\\bannual\\b", "", ComicName.lower()) # for use with comparisons. + logger.fdebug("annual detected - adjusting name to : " + adjComicName) + else: + adjComicName = ComicName controlValue = {"ComicID": ComicID} - newValue = {"ComicName": str(ComicName), + newValue = {"ComicName": adjComicName, "IssueNumber": str(IssueNumber), + "DisplayComicName": dspComicName, "IssueDate": str(IssueDate)} #let's refresh the artist here just to make sure if an issue is available/not. @@ -125,10 +146,14 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None) # no need to hammer the refresh # let's check it every 5 hours (or more) #pullupd = "yes" + if 'annual' in ComicName.lower(): + if mylar.ANNUALS_ON: + issuechk = myDB.action("SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone() + else: + issuechk = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone() - issuechk = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone() if issuechk is None: - logger.fdebug(ComicName + " Issue: " + str(IssueNumber) + " not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.") + logger.fdebug(adjComicName + " Issue: " + str(IssueNumber) + " not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.") # we need to either decrease the total issue count, OR indicate that an issue is upcoming. upco_results = myDB.action("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",[ComicID]).fetchall() upco_iss = upco_results[0][0] @@ -154,7 +179,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None) return elif issuechk['Issue_Number'] == IssueNumber: logger.fdebug("Comic series already up-to-date ... no need to refresh at this time.") - logger.fdebug("Available to be marked for download - checking..." + str(issuechk['ComicName']) + " Issue: " + str(issuechk['Issue_Number'])) + logger.fdebug("Available to be marked for download - checking..." + adjComicName + " Issue: " + str(issuechk['Issue_Number'])) logger.fdebug("...Existing status: " + str(issuechk['Status'])) control = {"IssueID": issuechk['IssueID']} newValue['IssueID'] = issuechk['IssueID'] @@ -206,7 +231,10 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None) values = {"IssueDate": newValue['IssueDate']} #if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd='yes') #else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd='yes') - myDB.upsert("issues", values, control) + if 'annual' in ComicName.lower(): + myDB.upsert("annuals", values, control) + else: + myDB.upsert("issues", values, control) if issuechk['Status'] == 'Downloaded': logger.fdebug("updating Pull-list to reflect status.") downstats = {"Status": issuechk['Status'], @@ -296,6 +324,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None): if down is None: # update the status to Snatched (so it won't keep on re-downloading!) logger.fdebug("updating status to snatched") + logger.fdebug("provider is " + provider) controlValue = {"IssueID": IssueID} newValue = {"Status": "Snatched"} if mode == 'want_ann': @@ -305,9 +334,9 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None): # update the snatched DB snatchedupdate = {"IssueID": IssueID, - "Status": "Snatched"}#, -# "Provider": Provider -# } + "Status": "Snatched", + "Provider": provider + } if mode == 'want_ann': IssueNum = "Annual " + issue['Issue_Number'] else: @@ -327,9 +356,9 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None): IssueNum = issue['Issue_Number'] snatchedupdate = {"IssueID": IssueID, - "Status": "Downloaded"}#, -# "Provider": Provider -# } + "Status": "Downloaded", + "Provider": provider + } newsnatchValues = {"ComicName": comic['ComicName'], "ComicID": ComicID, "Issue_Number": IssueNum, @@ -383,7 +412,7 @@ def forceRescan(ComicID,archive=None): extensions = ('.cbr','.cbz') if temploc.lower().endswith(extensions): logger.fdebug("removed extension for issue:" + str(temploc)) - #temploc = temploc[:-4] + temploc = temploc[:-4] deccnt = str(temploc).count('.') if deccnt > 0: #logger.fdebug("decimal counts are :" + str(deccnt)) @@ -422,7 +451,10 @@ def forceRescan(ComicID,archive=None): #logger.fdebug("final filename to use is : " + str(tempreconstruct)) temploc = tempreconstruct #logger.fdebug("checking " + str(temploc)) - fcnew = shlex.split(str(temploc)) + #fcnew_b4 = shlex.split(str(temploc)) + fcnew_af = re.findall('[^\()]+', temploc) + fcnew = shlex.split(fcnew_af[0]) + fcn = len(fcnew) n = 0 while (n <= iscnt): @@ -483,10 +515,14 @@ def forceRescan(ComicID,archive=None): fcnew[som+1] = '93939999919190933' logger.info("AI Detected seperate from issue - combining and continuing") - fcdigit = helpers.issuedigits(fcnew[som]) + #sometimes scanners refuse to use spaces between () and lump the issue right at the start + #mylar assumes it's all one word in this case..let's dump the brackets. + fcredone = re.findall('[^\()]+', fcnew[som]) + + fcdigit = helpers.issuedigits(fcredone[0]) - #logger.fdebug("fcdigit: " + str(fcdigit)) - #logger.fdebug("int_iss: " + str(int_iss)) + logger.fdebug("fcdigit: " + str(fcdigit)) + logger.fdebug("int_iss: " + str(int_iss)) if int(fcdigit) == int_iss: logger.fdebug("issue match - fcdigit: " + str(fcdigit) + " ... int_iss: " + str(int_iss)) @@ -588,12 +624,13 @@ def forceRescan(ComicID,archive=None): if issuedupe == "yes": pass else: - logger.fdebug("issueID to write to db:" + str(reiss['IssueID'])) #we have the # of comics, now let's update the db. #even if we couldn't find the physical issue, check the status. if 'annual' in temploc.lower(): + logger.fdebug("issueID to write to db:" + str(reann['IssueID'])) controlValueDict = {"IssueID": str(reann['IssueID'])} else: + logger.fdebug("issueID to write to db:" + str(reiss['IssueID'])) controlValueDict = {"IssueID": reiss['IssueID']} #if Archived, increase the 'Have' count. @@ -614,6 +651,8 @@ def forceRescan(ComicID,archive=None): havefiles+=1 elif old_status == "Wanted": issStatus = "Wanted" + elif old_status == "Ignored": + issStatus = "Ignored" else: issStatus = "Skipped" @@ -643,6 +682,13 @@ def forceRescan(ComicID,archive=None): havefiles = havefiles + arcfiles logger.fdebug("Adjusting have total to " + str(havefiles) + " because of this many archive files:" + str(arcfiles)) + ignorecount = 0 + if mylar.IGNORE_HAVETOTAL: # if this is enabled, will increase Have total as if in Archived Status + ignores = myDB.action("SELECT count(*) FROM issues WHERE ComicID=? AND Status='Ignored'", [ComicID]).fetchall() + if int(ignores[0][0]) > 0: + ignorecount = ignores[0][0] + havefiles = havefiles + ignorecount + logger.fdebug("Adjusting have total to " + str(havefiles) + " because of this many Ignored files:" + str(ignorecount)) #now that we are finished... #adjust for issues that have been marked as Downloaded, but aren't found/don't exist. @@ -688,6 +734,6 @@ def forceRescan(ComicID,archive=None): } myDB.upsert("comics", newValueStat, controlValueStat) - logger.info(u"I've physically found " + str(foundcount) + " issues, and accounted for " + str(totalarc) + " in an Archived state. Total Issue Count: " + str(havefiles) + " / " + str(rescan['Total'])) + logger.info(u"I've physically found " + str(foundcount) + " issues, ignored " + str(ignorecount) + " issues, and accounted for " + str(totalarc) + " in an Archived state. Total Issue Count: " + str(havefiles) + " / " + str(rescan['Total'])) return diff --git a/mylar/webserve.py b/mylar/webserve.py index 2ef0a694..4d4f7d35 100755 --- a/mylar/webserve.py +++ b/mylar/webserve.py @@ -109,10 +109,11 @@ class WebInterface(object): isCounts[2] = 0 #2 wanted isCounts[3] = 0 #3 archived isCounts[4] = 0 #4 downloaded - isCounts[5] = 0 #5 read + isCounts[5] = 0 #5 ignored + #isCounts[6] = 0 #6 read for curResult in issues: - baseissues = {'skipped':1,'wanted':2,'archived':3,'downloaded':4} + baseissues = {'skipped':1,'wanted':2,'archived':3,'downloaded':4,'ignored':5} for seas in baseissues: if seas in curResult['Status'].lower(): sconv = baseissues[seas] @@ -122,7 +123,8 @@ class WebInterface(object): "Skipped" : str(isCounts[1]), "Wanted" : str(isCounts[2]), "Archived" : str(isCounts[3]), - "Downloaded" : str(isCounts[4]) + "Downloaded" : str(isCounts[4]), + "Ignored" : str(isCounts[5]) } usethefuzzy = comic['UseFuzzy'] skipped2wanted = "0" @@ -137,7 +139,6 @@ class WebInterface(object): if mylar.ANNUALS_ON: annuals = myDB.select("SELECT * FROM annuals WHERE ComicID=?", [ComicID]) else: annuals = None - print "blah" return serve_template(templatename="comicdetails.html", title=comic['ComicName'], comic=comic, issues=issues, comicConfig=comicConfig, isCounts=isCounts, series=series, annuals=annuals) comicDetails.exposed = True @@ -424,21 +425,37 @@ class WebInterface(object): #in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clea$ logger.fdebug("Gathering the status of all issues for the series.") issues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + if mylar.ANNUALS_ON: + issues += myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) #store the issues' status for a given comicid, after deleting and readding, flip the status back to$ logger.fdebug("Deleting all issue data.") myDB.select('DELETE FROM issues WHERE ComicID=?', [ComicID]) + myDB.select('DELETE FROM annuals WHERE ComicID=?', [ComicID]) logger.fdebug("Refreshing the series and pulling in new data using only CV.") mylar.importer.addComictoDB(ComicID,mismatch) - issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID]) + annuals = [] + ann_list = [] + if mylar.ANNUALS_ON: + annuals_list = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID]) + ann_list += annuals_list + issues_new += annuals_list + logger.fdebug("Attempting to put the Status' back how they were.") icount = 0 for issue in issues: for issuenew in issues_new: if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']: + #if the status is now Downloaded, keep status. + if issuenew['Status'] == 'Downloaded': break #change the status to the previous status ctrlVAL = {'IssueID': issue['IssueID']} newVAL = {'Status': issue['Status']} - myDB.upsert("Issues", newVAL, ctrlVAL) + if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): + logger.fdebug("annual detected for " + str(issue['IssueID']) + " #: " + str(issue['Issue_Number'])) + myDB.upsert("Annuals", newVAL, ctrlVAL) + else: + myDB.upsert("Issues", newVAL, ctrlVAL) icount+=1 break logger.info("In the process of converting the data to CV, I changed the status of " + str(icount) + " issues.") @@ -480,6 +497,11 @@ class WebInterface(object): continue else: mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone() + annchk = 'no' + if mi is None: + if mylar.ANNUALS_ON: + mi = myDB.action("SELECT * FROM annuals WHERE IssueID=?",[IssueID]).fetchone() + annchk = 'yes' miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone() if action == 'Downloaded': if mi['Status'] == "Skipped" or mi['Status'] == "Wanted": @@ -499,8 +521,11 @@ class WebInterface(object): myDB.action("DELETE FROM snatched WHERE IssueID=?", [IssueID]) controlValueDict = {"IssueID": IssueID} newValueDict = {"Status": newaction} - myDB.upsert("issues", newValueDict, controlValueDict) - print "updated...to " + str(newaction) + if annchk == 'yes': + myDB.upsert("annuals", newValueDict, controlValueDict) + else: + myDB.upsert("issues", newValueDict, controlValueDict) + logger.fdebug("updated...to " + str(newaction)) if len(issuestoArchive) > 0: updater.forceRescan(mi['ComicID']) if len(issuesToAdd) > 0: @@ -610,20 +635,36 @@ class WebInterface(object): def unqueueissue(self, IssueID, ComicID): myDB = db.DBConnection() issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() + annchk = 'no' + if issue is None: + if mylar.ANNUALS_ON: + issue = myDB.action('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone() + annchk = 'yes' logger.info(u"Marking " + issue['ComicName'] + " issue # " + issue['Issue_Number'] + " as skipped...") controlValueDict = {'IssueID': IssueID} newValueDict = {'Status': 'Skipped'} - myDB.upsert("issues", newValueDict, controlValueDict) + if annchk == 'yes': + myDB.upsert("annuals", newValueDict, controlValueDict) + else: + myDB.upsert("issues", newValueDict, controlValueDict) raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID) unqueueissue.exposed = True def archiveissue(self, IssueID): myDB = db.DBConnection() issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() + annchk = 'no' + if issue is None: + if mylar.ANNUALS_ON: + issue = myDB.action('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone() + annchk = 'yes' logger.info(u"Marking " + issue['ComicName'] + " issue # " + issue['Issue_Number'] + " as archived...") controlValueDict = {'IssueID': IssueID} newValueDict = {'Status': 'Archived'} - myDB.upsert("issues", newValueDict, controlValueDict) + if annchk == 'yes': + myDB.upsert("annuals", newValueDict, controlValueDict) + else: + myDB.upsert("issues", newValueDict, controlValueDict) raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % issue['ComicID']) archiveissue.exposed = True @@ -683,6 +724,15 @@ class WebInterface(object): #upcoming = myDB.select("SELECT * from issues WHERE ReleaseDate > date('now') order by ReleaseDate DESC") upcoming = myDB.select("SELECT * from upcoming WHERE IssueDate > date('now') AND IssueID is NULL order by IssueDate DESC") issues = myDB.select("SELECT * from issues WHERE Status='Wanted'") + ann_list = [] + + if mylar.ANNUALS_ON: + #let's add the annuals to the wanted table so people can see them + #ComicName wasn't present in db initially - added on startup chk now. + annuals_list = myDB.select("SELECT * FROM annuals WHERE Status='Wanted'") + ann_list += annuals_list + issues += annuals_list + #let's move any items from the upcoming table into the wanted table if the date has already passed. #gather the list... mvupcome = myDB.select("SELECT * from upcoming WHERE IssueDate < date('now') order by IssueDate DESC") @@ -706,7 +756,7 @@ class WebInterface(object): deleteit = myDB.action("DELETE from upcoming WHERE ComicName=? AND IssueNumber=?", [mvup['ComicName'],mvup['IssueNumber']]) - return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues) + return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues, ann_list=ann_list) upcoming.exposed = True def skipped2wanted(self, comicid): @@ -886,13 +936,13 @@ class WebInterface(object): history.exposed = True def reOrder(request): - print ("I have reached the re-order!!!") - return serve_template(templatename="reorder.html", title="ReoRdered!", reorder=request) + return request +# return serve_template(templatename="reorder.html", title="ReoRdered!", reorder=request) reOrder.exposed = True def readlist(self): myDB = db.DBConnection() - readlist = myDB.select("SELECT * from readinglist group by StoryArcID COLLATE NOCASE") + readlist = myDB.select("SELECT * from readinglist WHERE ComicName is not Null group by StoryArcID COLLATE NOCASE") issuelist = myDB.select("SELECT * from readlist") readConfig = { "read2filename" : helpers.checked(mylar.READ2FILENAME), @@ -976,8 +1026,8 @@ class WebInterface(object): from xml.dom.minidom import parseString, Element import random myDB = db.DBConnection() - - file = open(str(filename)) + + file = open(filename) data = file.read() file.close() @@ -1472,8 +1522,8 @@ class WebInterface(object): def preSearchit(self, ComicName, comiclist=None, mimp=0): implog = '' - implog = implog + "imp_rename:" + str(imp_rename) + "\n" - implog = implog + "imp_move:" + str(imp_move) + "\n" + implog = implog + "imp_rename:" + str(mylar.IMP_RENAME) + "\n" + implog = implog + "imp_move:" + str(mylar.IMP_MOVE) + "\n" if mimp == 0: comiclist = [] comiclist.append(ComicName) @@ -1685,6 +1735,21 @@ class WebInterface(object): "newznab_api" : mylar.NEWZNAB_APIKEY, "newznab_enabled" : helpers.checked(mylar.NEWZNAB_ENABLED), "extra_newznabs" : mylar.EXTRA_NEWZNABS, + "enable_rss" : helpers.checked(mylar.ENABLE_RSS), + "rss_checkinterval" : mylar.RSS_CHECKINTERVAL, + "enable_torrents" : helpers.checked(mylar.ENABLE_TORRENTS), + "torrent_local" : helpers.checked(mylar.TORRENT_LOCAL), + "local_watchdir" : mylar.LOCAL_WATCHDIR, + "torrent_seedbox" : helpers.checked(mylar.TORRENT_SEEDBOX), + "seedbox_watchdir" : mylar.SEEDBOX_WATCHDIR, + "seedbox_host" : mylar.SEEDBOX_HOST, + "seedbox_port" : mylar.SEEDBOX_PORT, + "seedbox_user" : mylar.SEEDBOX_USER, + "seedbox_pass" : mylar.SEEDBOX_PASS, + "enable_torrent_search" : helpers.checked(mylar.ENABLE_TORRENT_SEARCH), + "enable_kat" : helpers.checked(mylar.ENABLE_KAT), + "enable_cbt" : helpers.checked(mylar.ENABLE_CBT), + "cbt_passkey" : mylar.CBT_PASSKEY, "destination_dir" : mylar.DESTINATION_DIR, "chmod_dir" : mylar.CHMOD_DIR, "chmod_file" : mylar.CHMOD_FILE, @@ -1777,6 +1842,28 @@ class WebInterface(object): myDB = db.DBConnection() #--- this is for multipe search terms............ #--- works, just need to redo search.py to accomodate multiple search terms + ffs_alt = [] + if '##' in alt_search: + ffs = alt_search.find('##') + ffs_alt.append(alt_search[:ffs]) + ffs_alt_st = str(ffs_alt[0]) + print ("ffs_alt: " + str(ffs_alt[0])) + + ffs_test = alt_search.split('##') + if len(ffs_test) > 0: + print("ffs_test names: " + str(len(ffs_test))) + ffs_count = len(ffs_test) + n=1 + while (n < ffs_count): + ffs_alt.append(ffs_test[n]) + print("adding : " + str(ffs_test[n])) + #print("ffs_alt : " + str(ffs_alt)) + ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n]) + n+=1 + asearch = ffs_alt + else: + asearch = alt_search + # ffs_alt = [] # if '+' in alt_search: #find first + @@ -1868,7 +1955,8 @@ class WebInterface(object): use_nzbget=0, nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None, usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_name=None, newznab_apikey=None, newznab_enabled=0, raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0, - enable_meta=0, cmtagger_path=None, + enable_meta=0, cmtagger_path=None, enable_rss=0, rss_checkinterval=None, enable_torrent_search=0, enable_kat=0, enable_cbt=0, cbt_passkey=None, + enable_torrents=0, torrent_local=0, local_watchdir=None, torrent_seedbox=0, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None, prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, cvapifix=0, destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs): @@ -1916,6 +2004,21 @@ class WebInterface(object): #mylar.NEWZNAB_HOST = newznab_host #mylar.NEWZNAB_APIKEY = newznab_apikey #mylar.NEWZNAB_ENABLED = newznab_enabled + mylar.ENABLE_RSS = int(enable_rss) + mylar.RSS_CHECKINTERVAL = rss_checkinterval + mylar.ENABLE_TORRENTS = int(enable_torrents) + mylar.TORRENT_LOCAL = int(torrent_local) + mylar.LOCAL_WATCHDIR = local_watchdir + mylar.TORRENT_SEEDBOX = int(torrent_seedbox) + mylar.SEEDBOX_WATCHDIR = seedbox_watchdir + mylar.SEEDBOX_HOST = seedbox_host + mylar.SEEDBOX_PORT = seedbox_port + mylar.SEEDBOX_USER = seedbox_user + mylar.SEEDBOX_PASS = seedbox_pass + mylar.ENABLE_TORRENT_SEARCH = int(enable_torrent_search) + mylar.ENABLE_KAT = int(enable_kat) + mylar.ENABLE_CBT = int(enable_cbt) + mylar.CBT_PASSKEY = cbt_passkey mylar.PREFERRED_QUALITY = int(preferred_quality) mylar.MOVE_FILES = move_files mylar.RENAME_FILES = rename_files @@ -1969,11 +2072,12 @@ class WebInterface(object): #changing this for simplicty - adding all newznabs into extra_newznabs if newznab_host is not None: #this - mylar.EXTRA_NEWZNABS.append((newznab_host, newznab_apikey, int(newznab_enabled))) + mylar.EXTRA_NEWZNABS.append((newznab_host, newznab_apikey, int(newznab_enabled), newznab_name)) for kwarg in kwargs: - if kwarg.startswith('newznab_host'): + if kwarg.startswith('newznab_name'): newznab_number = kwarg[12:] + newznab_name = kwargs['newznab_name' + newznab_number] newznab_host = kwargs['newznab_host' + newznab_number] newznab_api = kwargs['newznab_api' + newznab_number] try: @@ -1981,7 +2085,7 @@ class WebInterface(object): except KeyError: newznab_enabled = 0 - mylar.EXTRA_NEWZNABS.append((newznab_host, newznab_api, newznab_enabled)) + mylar.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_api, newznab_enabled)) # Sanity checking if mylar.SEARCH_INTERVAL < 360: @@ -1992,6 +2096,10 @@ class WebInterface(object): logger.info("Minimum search delay set for 1 minute to avoid hammering.") mylar.SEARCH_DELAY = 1 + if mylar.RSS_CHECKINTERVAL < 20: + logger.info("Minimum RSS Interval Check delay set for 20 minutes to avoid hammering.") + mylar.RSS_CHECKINTERVAL = 20 + if not helpers.is_number(mylar.CHMOD_DIR): logger.info("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777") mylar.CHMOD_DIR = '0777' diff --git a/mylar/weeklypull.py b/mylar/weeklypull.py index 34699182..d6d88267 100755 --- a/mylar/weeklypull.py +++ b/mylar/weeklypull.py @@ -457,6 +457,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None): sqlsearch = re.sub("\\bTHE\\b", '', sqlsearch) if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch) sqlsearch = re.sub(r'\s', '%', sqlsearch) + sqlsearch = sqlsearch + '%' logger.fdebug("searchsql: " + sqlsearch) weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch]) #cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]]) @@ -505,6 +506,10 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None): #logger.fdebug("modcomicnm:" + modcomicnm) #logger.fdebug("modwatchcomic:" + modwatchcomic) + #annuals! + if 'ANNUAL' in comicnm.upper(): + modcomicnm = re.sub("\\bANNUAL\\b", "", modcomicnm.upper()) + watchcomic = re.sub(r'\s', '', watchcomic) comicnm = re.sub(r'\s', '', comicnm) modwatchcomic = re.sub(r'\s', '', modwatchcomic) @@ -514,9 +519,9 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None): if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper(): logger.fdebug("matched on:" + comicnm + "..." + watchcomic.upper()) pass - elif ("ANNUAL" in week['EXTRA']): - pass - #print ( row[3] + " matched on ANNUAL") +# elif ("ANNUAL" in week['EXTRA']): +# pass +# print ( row[3] + " matched on ANNUAL") else: break if ("NA" not in week['ISSUE']) and ("HC" not in week['ISSUE']): @@ -529,9 +534,11 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None): #kp.append(row[0]) #ki.append(row[1]) #kc.append(comicnm) - if ("ANNUAL" in week['EXTRA']): + if "ANNUAL" in comicnm.upper(): watchfndextra.append("annual") + ComicName = str(unlines[cnt]) + " Annual" else: + ComicName = str(unlines[cnt]) watchfndextra.append("none") watchfnd.append(comicnm) watchfndiss.append(week['ISSUE']) @@ -541,7 +548,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None): else: ComicIssue = str(watchfndiss[tot -1]) ComicDate = str(week['SHIPDATE']) - ComicName = str(unlines[cnt]) + #ComicName = str(unlines[cnt]) logger.fdebug("Watchlist hit for : " + ComicName + " ISSUE: " + str(watchfndiss[tot -1])) # here we add to comics.latest updater.latest_update(ComicID=ComicID, LatestIssue=ComicIssue, LatestDate=ComicDate)
Timestamp
${item['ReadingOrder']} ${item['StoryArc']} ${item['ComicName']} (${item['SeriesYear']})
${issue['ComicName']}${issue['Issue_Number']} + <% + if any(d['IssueID'] == str(issue['IssueID']) for d in ann_list): + adjcomicname = issue['ComicName'] + ' Annual' + else: + adjcomicname = issue['ComicName'] + endif + %> + ${adjcomicname}${issue['Issue_Number']} ${issue['IssueDate']}
${upcome['ComicName']}${upcome['IssueNumber']}${upcome['DisplayComicName']}${upcome['IssueNumber']} ${upcome['IssueDate']} ${upcome['Status']}