fix: Experimental search, fix: dognzb not able to download nzbs, fix:nzb.su fix for nzb filenames (broke renaming/post-processing), add: newznab provider added, add:explicit search.log

This commit is contained in:
evilhero 2012-12-16 12:57:02 -05:00
parent 455b06bc2c
commit b0873994b9
7 changed files with 377 additions and 79 deletions

View File

@ -201,6 +201,60 @@
</td> </td>
<td> <td>
<fieldset>
<legend>Newznab</legend>
<div class="row checkbox">
<input id="usenewznab" type="checkbox" name="newznab" onclick="initConfigCheckbox($(this));" value="1" ${config['use_newznab']} /><label>Use Newznab</label>
</div>
<div id="newznab_providers">
<div class="config" id="newznab1">
<div class="row">
<label>Newznab Host</label>
<input type="text" name="newznab_host" value="${config['newznab_host']}" size="30">
<small>e.g. http://nzb.su</small>
</div>
<div class="row">
<label>Newznab API</label>
<input type="text" name="newznab_apikey" value="${config['newznab_api']}" size="36">
</div>
<div class="row checkbox">
<input id="newznab_enabled" type="checkbox" name="newznab_enabled" onclick="initConfigCheckbox($(this));" value="1" ${config['newznab_enabled']} /><label>Enabled</label>
</div>
</div>
<%
newznab_number = 2
%>
%for newznab in config['extra_newznabs']:
<%
if newznab[2] == '1' or newznab[2] == 1:
newznab_enabled = "checked"
else:
newznab_enabled = ""
%>
<div class="config" id="newznab${newznab_number}">
<div class="row">
<label>Newznab Host</label>
<input type="text" name="newznab_host${newznab_number}" value="${newznab[0]}" size="30">
</div>
<div class="row">
<label>Newznab API</label>
<input type="text" name="newznab_api${newznab_number}" value="${newznab[1]}" size="36">
</div>
<div class="row checkbox">
<input id="newznab_enabled" type="checkbox" name="newznab_enabled${newznab_number}" value="1" ${newznab_enabled} /><label>Enabled</label>
</div>
<div class="row">
<input type="button" class="remove" id="newznab${newznab_number}" value="Remove ${newznab[0]}">
</div>
</div>
<%
newznab_number += 1
%>
%endfor
<input type="button" value="Add Newznab" class="add_newznab" id="add_newznab" />
</div>
</fieldset>
</td> </td>
</tr> </tr>
@ -224,7 +278,7 @@
<fieldset> <fieldset>
<legend>Post-Processing</legend> <legend>Post-Processing</legend>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix">
<input type="checkbox" name="rename_files" value="1" ${config['rename_files']} /><label>Rename files <small>(edit sabnzbd/ComicRN.py)</small></label> <input type="checkbox" name="rename_files" value="1" ${config['rename_files']} /><label>Rename files </label>
</div> </div>
<div class="row"</div> <div class="row"</div>
</div> </div>
@ -370,12 +424,35 @@
function initThisPage() function initThisPage()
{ {
var deletedNewznabs = 0;
$(".remove").click(function() {
$(this).parent().parent().remove();
deletedNewznabs = deletedNewznabs + 1;
});
$("#add_newznab").click(function() {
var intId = $("#newznab_providers > div").size() + deletedNewznabs + 1;
var formfields = $("<div class=\"config\" id=\"newznab" + intId + "\"><div class=\"row\"><label>Newznab Host</label><input type=\"text\" name=\"newznab_host" + intId + "\" size=\"30\"></div><div class=\"row\"><label>Newznab API</label><input type=\"text\" name=\"newznab_api" + intId + "\" size=\"36\"></div><div class=\"row\"><label>Category Mapping</label><input type=\"text\" name=\"newznab_category" + intId + "\" size=\"36\"></div><div class=\"row checkbox\"><input type=\"checkbox\" name=\"newznab_enabled" + intId + "\" value=\"1\" checked /><label>Enabled</label></div>");
var removeButton = $("<div class=\"row\"><input type=\"button\" class=\"remove\" value=\"Remove\" /></div>");
removeButton.click(function() {
$(this).parent().remove();
deletedNewznabs = deletedNewznabs + 1;
});
formfields.append(removeButton);
formfields.append("</div>");
$("#add_newznab").before(formfields);
});
$(function() { $(function() {
$( "#tabs" ).tabs(); $( "#tabs" ).tabs();
}); });
initActions(); initActions();
initConfigCheckbox("#launch_browser"); initConfigCheckbox("#launch_browser");
initConfigCheckbox("#usenewznab");
initConfigCheckbox("#usenzbsu"); initConfigCheckbox("#usenzbsu");
initConfigCheckbox("#usedognzb"); initConfigCheckbox("#usedognzb");
initConfigCheckbox("#useexperimental"); initConfigCheckbox("#useexperimental");

0
mylar/PostProcessor.py Normal file → Executable file
View File

View File

@ -20,6 +20,7 @@ import os, sys, subprocess
import threading import threading
import webbrowser import webbrowser
import sqlite3 import sqlite3
import itertools
import csv import csv
from lib.apscheduler.scheduler import Scheduler from lib.apscheduler.scheduler import Scheduler
@ -120,6 +121,12 @@ NZBSU_APIKEY = None
DOGNZB = False DOGNZB = False
DOGNZB_APIKEY = None DOGNZB_APIKEY = None
NEWZNAB = False
NEWZNAB_HOST = None
NEWZNAB_APIKEY = None
NEWZNAB_ENABLED = False
EXTRA_NEWZNABS = []
RAW = False RAW = False
RAW_PROVIDER = None RAW_PROVIDER = None
RAW_USERNAME = None RAW_USERNAME = None
@ -191,6 +198,7 @@ def initialize():
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, \ DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, \
LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, \ LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, \
NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, \ NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, \
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \ RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, \ PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY
@ -205,6 +213,7 @@ def initialize():
CheckSection('DOGnzb') CheckSection('DOGnzb')
CheckSection('Raw') CheckSection('Raw')
CheckSection('Experimental') CheckSection('Experimental')
CheckSection('Newznab')
# Set global variables based on config file or use defaults # Set global variables based on config file or use defaults
try: try:
HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 8090) HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 8090)
@ -239,8 +248,8 @@ def initialize():
CORRECT_METADATA = bool(check_setting_int(CFG, 'General', 'correct_metadata', 0)) CORRECT_METADATA = bool(check_setting_int(CFG, 'General', 'correct_metadata', 0))
MOVE_FILES = bool(check_setting_int(CFG, 'General', 'move_files', 0)) MOVE_FILES = bool(check_setting_int(CFG, 'General', 'move_files', 0))
RENAME_FILES = bool(check_setting_int(CFG, 'General', 'rename_files', 0)) RENAME_FILES = bool(check_setting_int(CFG, 'General', 'rename_files', 0))
FOLDER_FORMAT = check_setting_str(CFG, 'General', 'folder_format', 'Artist/Album [Year]') FOLDER_FORMAT = check_setting_str(CFG, 'General', 'folder_format', '$Series-($Year)')
FILE_FORMAT = check_setting_str(CFG, 'General', 'file_format', 'Track Artist - Album [Year]- Title') FILE_FORMAT = check_setting_str(CFG, 'General', 'file_format', '$Series $Issue ($Year)')
BLACKHOLE = bool(check_setting_int(CFG, 'General', 'blackhole', 0)) BLACKHOLE = bool(check_setting_int(CFG, 'General', 'blackhole', 0))
BLACKHOLE_DIR = check_setting_str(CFG, 'General', 'blackhole_dir', '') BLACKHOLE_DIR = check_setting_str(CFG, 'General', 'blackhole_dir', '')
REPLACE_SPACES = bool(check_setting_int(CFG, 'General', 'replace_spaces', 0)) REPLACE_SPACES = bool(check_setting_int(CFG, 'General', 'replace_spaces', 0))
@ -269,11 +278,21 @@ def initialize():
EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0)) EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0))
NEWZNAB = bool(check_setting_int(CFG, 'Newznab', 'newznab', 0))
NEWZNAB_HOST = check_setting_str(CFG, 'Newznab', 'newznab_host', '')
NEWZNAB_APIKEY = check_setting_str(CFG, 'Newznab', 'newznab_apikey', '')
NEWZNAB_ENABLED = bool(check_setting_int(CFG, 'Newznab', 'newznab_enabled', 1))
# Need to pack the extra newznabs back into a list of tuples
flattened_newznabs = check_setting_str(CFG, 'Newznab', 'extra_newznabs', [], log=False)
EXTRA_NEWZNABS = list(itertools.izip(*[itertools.islice(flattened_newznabs, i, None, 3) for i in range(3)]))
# update folder formats in the config & bump up config version # update folder formats in the config & bump up config version
if CONFIG_VERSION == '0': if CONFIG_VERSION == '0':
from mylar.helpers import replace_all from mylar.helpers import replace_all
file_values = { 'tracknumber': 'Track', 'title': 'Title','artist' : 'Artist', 'album' : 'Album', 'year' : 'Year' } file_values = { 'issue': 'Issue', 'title': 'Title', 'series' : 'Series', 'year' : 'Year' }
folder_values = { 'artist' : 'Artist', 'album':'Album', 'year' : 'Year', 'releasetype' : 'Type', 'first' : 'First', 'lowerfirst' : 'first' } folder_values = { 'series' : 'Series', 'publisher':'Publisher', 'year' : 'Year', 'first' : 'First', 'lowerfirst' : 'first' }
FILE_FORMAT = replace_all(FILE_FORMAT, file_values) FILE_FORMAT = replace_all(FILE_FORMAT, file_values)
FOLDER_FORMAT = replace_all(FOLDER_FORMAT, folder_values) FOLDER_FORMAT = replace_all(FOLDER_FORMAT, folder_values)
@ -283,26 +302,21 @@ def initialize():
from mylar.helpers import replace_all from mylar.helpers import replace_all
file_values = { 'Track': '$Track', file_values = { 'Issue': '$Issue',
'Title': '$Title', 'Title': '$Title',
'Artist': '$Artist', 'Series': '$Series',
'Album': '$Album',
'Year': '$Year', 'Year': '$Year',
'track': '$track',
'title': '$title', 'title': '$title',
'artist': '$artist', 'series': '$series',
'album': '$album',
'year': '$year' 'year': '$year'
} }
folder_values = { 'Artist': '$Artist', folder_values = { 'Series': '$Series',
'Album': '$Album', 'Publisher': '$Publisher',
'Year': '$Year', 'Year': '$Year',
'Type': '$Type',
'First': '$First', 'First': '$First',
'artist': '$artist', 'series': '$series',
'album': '$album', 'publisher': '$publisher',
'year': '$year', 'year': '$year',
'type': '$type',
'first': '$first' 'first': '$first'
} }
FILE_FORMAT = replace_all(FILE_FORMAT, file_values) FILE_FORMAT = replace_all(FILE_FORMAT, file_values)
@ -335,6 +349,12 @@ def initialize():
except OSError: except OSError:
logger.error('Could not create cache dir. Check permissions of datadir: ' + DATA_DIR) logger.error('Could not create cache dir. Check permissions of datadir: ' + DATA_DIR)
# Sanity check for search interval. Set it to at least 6 hours
if SEARCH_INTERVAL < 360:
logger.info("Search interval too low. Resetting to 6 hour minimum")
SEARCH_INTERVAL = 360
# Initialize the database # Initialize the database
logger.info('Checking to see if the database has all tables....') logger.info('Checking to see if the database has all tables....')
try: try:
@ -477,6 +497,19 @@ def config_write():
new_config['Experimental'] = {} new_config['Experimental'] = {}
new_config['Experimental']['experimental'] = int(EXPERIMENTAL) new_config['Experimental']['experimental'] = int(EXPERIMENTAL)
new_config['Newznab'] = {}
new_config['Newznab']['newznab'] = int(NEWZNAB)
new_config['Newznab']['newznab_host'] = NEWZNAB_HOST
new_config['Newznab']['newznab_apikey'] = NEWZNAB_APIKEY
new_config['Newznab']['newznab_enabled'] = int(NEWZNAB_ENABLED)
# Need to unpack the extra newznabs for saving in config.ini
flattened_newznabs = []
for newznab in EXTRA_NEWZNABS:
for item in newznab:
flattened_newznabs.append(item)
new_config['Newznab']['extra_newznabs'] = flattened_newznabs
new_config['Raw'] = {} new_config['Raw'] = {}
new_config['Raw']['raw'] = int(RAW) new_config['Raw']['raw'] = int(RAW)
new_config['Raw']['raw_provider'] = RAW_PROVIDER new_config['Raw']['raw_provider'] = RAW_PROVIDER
@ -523,7 +556,7 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT)') c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT)') c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT)')
# c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)') c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)')
# c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)') # c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)')
#new #new

92
mylar/findcomicfeed.py Executable file
View File

@ -0,0 +1,92 @@
#!/usr/bin/env python
import os
import sys
import lib.feedparser as feedparser
#import feedparser
import re
def Startit(searchName, searchIssue, searchYear):
#searchName = "Uncanny Avengers"
#searchIssue = "01"
#searchYear = "2012"
#clean up searchName due to webparse.
searchName = searchName.replace("%20", " ")
print "name:"+searchName
print "issue:"+searchIssue
print "year:"+searchYear
# searchName = input("Enter a Title: ")
# searchIssue =input("Enter an Issue #: ")
# searchYear = input("Enter a year: ")
splitSearch = searchName.split(" ")
joinSearch = "+".join(splitSearch)+"+"+searchIssue
searchIsOne = "0"+searchIssue
searchIsTwo = "00"+searchIssue
feed = feedparser.parse("http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&minsize=10&dq=%s&max=25&more=1" %joinSearch)
totNum = len(feed.entries)
keyPair = {}
regList = []
entries = []
mres = {}
countUp = 0
print (str(totNum))+" results"
while countUp < totNum:
urlParse = feed.entries[countUp].enclosures[0]
#keyPair[feed.entries[countUp].title] = feed.entries[countUp].link
keyPair[feed.entries[countUp].title] = urlParse["href"]
countUp=countUp+1
#print(keyPair)
# keyPair.keys()
#for title, link in keyPair.items():
# print(title, link)
for title, link in keyPair.items():
#print("titlesplit: " + str(title.split("\"")))
splitTitle = title.split("\"")
for subs in splitTitle:
print("looking at: " + str(subs))
regEx = re.findall("\\b%s\\b\\s*\\b%s\\b\\s*[(]\\b%s\\b[)]" %(searchName, searchIssue, searchYear), subs, flags=re.IGNORECASE)
regExOne = re.findall("\\b%s\\b\\s*\\b%s\\b\\s*[(]\\b%s\\b[)]" %(searchName, searchIsOne, searchYear), subs, flags=re.IGNORECASE)
regExTwo = re.findall("\\b%s\\b\\s*\\b%s\\b\\s*[(]\\b%s\\b[)]" %(searchName, searchIsTwo, searchYear), subs, flags=re.IGNORECASE)
#print("regex: " + str(regEx))
if regEx or regExOne or regExTwo:
print("name: " + str(title))
print("sub: " + str(subs))
print("-----")
print("url: " + str(link))
print("-----")
#regList.append(title)
#regList.append(subs)
entries.append({
'title': str(subs),
'link': str(link)
})
if len(entries) >= 1:
mres['entries'] = entries
return mres
# print("Title: "+regList[0])
# print("Link: "+keyPair[regList[0]])
else:
print("No Results Found")
return "no results"
#mres['entries'] = entries
#return mres

0
mylar/latest.py Normal file → Executable file
View File

166
mylar/search.py Normal file → Executable file
View File

@ -15,13 +15,14 @@
import mylar import mylar
from mylar import logger, db, updater, helpers, parseit from mylar import logger, db, updater, helpers, parseit, findcomicfeed
nzbsu_APIkey = mylar.NZBSU_APIKEY nzbsu_APIkey = mylar.NZBSU_APIKEY
dognzb_APIkey = mylar.DOGNZB_APIKEY dognzb_APIkey = mylar.DOGNZB_APIKEY
LOG = mylar.LOG_DIR LOG = mylar.LOG_DIR
import pickle
import lib.feedparser as feedparser import lib.feedparser as feedparser
import urllib import urllib
import os, errno import os, errno
@ -33,7 +34,6 @@ import re
import time import time
from xml.dom.minidom import parseString from xml.dom.minidom import parseString
import urllib2 import urllib2
from datetime import datetime from datetime import datetime
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID): def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID):
@ -54,6 +54,23 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
if mylar.EXPERIMENTAL == 1: if mylar.EXPERIMENTAL == 1:
nzbprovider.append('experimental') nzbprovider.append('experimental')
nzbp+=1 nzbp+=1
if mylar.NEWZNAB == 1:
nzbprovider.append('newznab')
nzbp+=1
#newznabs = 0
newznab_hosts = [(mylar.NEWZNAB_HOST, mylar.NEWZNAB_APIKEY, mylar.NEWZNAB_ENABLED)]
for newznab_host in mylar.EXTRA_NEWZNABS:
if newznab_host[2] == '1' or newznab_host[2] == 1:
newznab_hosts.append(newznab_host)
newznabs = newznabs + 1
#categories = "7030"
#for newznab_host in newznab_hosts:
# mylar.NEWZNAB_APIKEY = newznab_host[1]
# mylar.NEWZNAB_HOST = newznab_host[0]
# -------- # --------
nzbpr = nzbp-1 nzbpr = nzbp-1
findit = 'no' findit = 'no'
@ -67,6 +84,22 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
IssDateFix = "no" IssDateFix = "no"
while (nzbpr >= 0 ): while (nzbpr >= 0 ):
if nzbprovider[nzbpr] == 'newznab':
#this is for newznab
nzbprov = 'newznab'
for newznab_host in newznab_hosts:
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host)
if findit == 'yes':
break
else:
if IssDateFix == "yes":
logger.info(u"Hang on - this issue was published between Nov/Dec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYearFix, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host)
if findit == 'yes':
break
nzbpr-=1
if nzbprovider[nzbpr] == 'experimental': if nzbprovider[nzbpr] == 'experimental':
#this is for experimental #this is for experimental
nzbprov = 'experimental' nzbprov = 'experimental'
@ -75,7 +108,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
break break
else: else:
if IssDateFix == "yes": if IssDateFix == "yes":
logger.info(u"Hang on - this issue was published between /NovDec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...") logger.info(u"Hang on - this issue was published between Nov/Dec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYearFix, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID) findit = NZB_SEARCH(ComicName, IssueNumber, ComicYearFix, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
if findit == 'yes': if findit == 'yes':
break break
@ -122,15 +155,18 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
# ---- # ----
return findit return findit
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID): def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host=None):
logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " using " + str(nzbprov)) logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + "(" + str(ComicYear) + ") using " + str(nzbprov))
if nzbprov == 'nzb.su': if nzbprov == 'nzb.su':
apikey = mylar.NZBSU_APIKEY apikey = mylar.NZBSU_APIKEY
elif nzbprov == 'dognzb': elif nzbprov == 'dognzb':
apikey = mylar.DOGNZB_APIKEY apikey = mylar.DOGNZB_APIKEY
elif nzbprov == 'experimental': elif nzbprov == 'experimental':
apikey = 'none' apikey = 'none'
#print ("-------------------------") elif nzbprov == 'newznab':
host_newznab = newznab_host[0]
apikey = newznab_host[1]
print ("using Newznab of : " + str(host_newznab))
if mylar.PREFERRED_QUALITY == 0: filetype = "" if mylar.PREFERRED_QUALITY == 0: filetype = ""
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr" elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
@ -202,22 +238,29 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
findurl = "http://dognzb.cr/api?t=search&apikey=" + str(apikey) + "&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030" findurl = "http://dognzb.cr/api?t=search&apikey=" + str(apikey) + "&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030"
elif nzbprov == 'nzb.su': elif nzbprov == 'nzb.su':
findurl = "http://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030" findurl = "http://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
elif nzbprov == 'newznab':
findurl = str(host_newznab) + "/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
bb = feedparser.parse(findurl) bb = feedparser.parse(findurl)
elif nzbprov == 'experimental': elif nzbprov == 'experimental':
bb = parseit.MysterBinScrape(comsearch[findloop], comyear) #bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
bb = findcomicfeed.Startit(cm, isssearch[findloop], comyear)
# since the regexs in findcomicfeed do the 3 loops, lets force the exit after
cmloopit == 1
done = False done = False
foundc = "no" foundc = "no"
log2file = ""
if bb == "no results": if bb == "no results":
pass pass
foundc = "no" foundc = "no"
else: else:
for entry in bb['entries']: for entry in bb['entries']:
#print ("Entry:" + str(entry['title'])) thisentry = str(entry['title'])
logger.fdebug("Entry: " + str(thisentry))
cleantitle = re.sub('_', ' ', str(entry['title'])) cleantitle = re.sub('_', ' ', str(entry['title']))
cleantitle = helpers.cleanName(str(cleantitle)) cleantitle = helpers.cleanName(str(cleantitle))
nzbname = cleantitle nzbname = cleantitle
#print ("cleantitle:" + str(cleantitle)) logger.fdebug("Cleantitle: " + str(cleantitle))
if len(re.findall('[^()]+', cleantitle)) == 1: cleantitle = "abcdefghijk 0 (1901).cbz" if len(re.findall('[^()]+', cleantitle)) == 1: cleantitle = "abcdefghijk 0 (1901).cbz"
if done: if done:
break break
@ -238,31 +281,31 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
while (cnt < lenm): while (cnt < lenm):
if m[cnt] is None: break if m[cnt] is None: break
#print (str(cnt) + ". Bracket Word: " + m[cnt] ) logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt]))
if cnt == 0: if cnt == 0:
comic_andiss = m[cnt] comic_andiss = m[cnt]
#print ("Comic:" + str(comic_andiss)) logger.fdebug("Comic: " + str(comic_andiss))
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
#print ("year detected!") logger.fdebug("year detected: " + str(m[cnt]))
result_comyear = m[cnt] result_comyear = m[cnt]
if str(comyear) in result_comyear: if str(comyear) in result_comyear:
#print (str(comyear) + " - right - years match baby!") logger.fdebug(str(comyear) + " - right years match baby!")
yearmatch = "true" yearmatch = "true"
else: else:
#print (str(comyear) + " - not right - years don't match ") logger.fdebug(str(comyear) + " - not right - years do not match")
yearmatch = "false" yearmatch = "false"
if 'digital' in m[cnt] and len(m[cnt]) == 7: if 'digital' in m[cnt] and len(m[cnt]) == 7:
pass pass
#print ("digital edition") #print ("digital edition")
if ' of ' in m[cnt]: if ' of ' in m[cnt]:
#print ("mini-series detected : " + str(m[cnt])) logger.fdebug("mini-series detected : " + str(m[cnt]))
result_of = m[cnt] result_of = m[cnt]
if 'cover' in m[cnt]: if 'cover' in m[cnt]:
#print ("covers detected") logger.fdebug("covers detected: " + str(m[cnt]))
result_comcovers = m[cnt] result_comcovers = m[cnt]
for ripper in ripperlist: for ripper in ripperlist:
if ripper in m[cnt]: if ripper in m[cnt]:
#print ("Scanner detected:" + str(m[cnt])) logger.fdebug("Scanner detected: " + str(m[cnt]))
result_comscanner = m[cnt] result_comscanner = m[cnt]
cnt+=1 cnt+=1
@ -270,73 +313,82 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
splitit = [] splitit = []
watchcomic_split = [] watchcomic_split = []
comic_iss = re.sub('[\-\:\,]', '', str(comic_andiss)) comic_iss_b4 = re.sub('[\-\:\,]', '', str(comic_andiss))
logger.fdebug("original nzb comic and issue: " + str(comic_iss_b4))
#log2file = log2file + "o.g.comic: " + str(comic_iss_b4) + "\n"
comic_iss = comic_iss_b4.replace('.',' ')
logger.fdebug("adjusted nzb comic and issue: " + str(comic_iss))
splitit = comic_iss.split(None) splitit = comic_iss.split(None)
#something happened to dognzb searches or results...added a '.' in place of spaces
#screwed up most search results with dognzb. Let's try to adjust.
watchcomic_split = findcomic[findloop].split(None) watchcomic_split = findcomic[findloop].split(None)
#log2file = log2file + "adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss) + "\n"
bmm = re.findall('v\d', comic_iss) bmm = re.findall('v\d', comic_iss)
#print ("vers - " + str(bmm))
if len(bmm) > 0: splitst = len(splitit) - 2 if len(bmm) > 0: splitst = len(splitit) - 2
else: splitst = len(splitit) - 1 else: splitst = len(splitit) - 1
if (splitst) != len(watchcomic_split): if (splitst) != len(watchcomic_split):
#print ("incorrect comic lengths...not a match") logger.fdebug("incorrect comic lengths...not a match")
if str(splitit[0]).lower() == "the": if str(splitit[0]).lower() == "the":
#print ("THE word detected...attempting to adjust pattern matching") logger.fdebug("THE word detected...attempting to adjust pattern matching")
splitit[0] = splitit[4:] splitit[0] = splitit[4:]
else: else:
#print ("length match..proceeding") logger.fdebug("length match..proceeding")
n = 0 n = 0
scount = 0 scount = 0
#print ("search-length:" + str(len(splitit))) logger.fdebug("search-length: " + str(len(splitit)))
#print ("watchlist-length:" + str(len(watchcomic_split))) logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
while ( n <= len(splitit)-1 ): while ( n <= len(splitit)-1 ):
#print ("splitit:" + str(splitit[n])) logger.fdebug("splitit: " + str(splitit[n]))
if n < len(splitit)-1 and n < len(watchcomic_split)-1: if n < len(splitit)-1 and n < len(watchcomic_split):
#print ( str(n) + ". Comparing: " + watchcomic_split[n] + " .to. " + splitit[n] ) logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
if str(watchcomic_split[n].lower()) in str(splitit[n].lower()): if str(watchcomic_split[n].lower()) in str(splitit[n].lower()):
#print ("word matched on : " + splitit[n]) logger.fdebug("word matched on : " + str(splitit[n]))
scount+=1 scount+=1
#elif ':' in splitit[n] or '-' in splitit[n]: #elif ':' in splitit[n] or '-' in splitit[n]:
# splitrep = splitit[n].replace('-', '') # splitrep = splitit[n].replace('-', '')
# print ("non-character keyword...skipped on " + splitit[n]) # print ("non-character keyword...skipped on " + splitit[n])
elif str(splitit[n].lower()).startswith('v'): elif str(splitit[n].lower()).startswith('v'):
#print ("possible verisoning..checking") logger.fdebug("possible verisoning..checking")
#we hit a versioning # - account for it #we hit a versioning # - account for it
if splitit[n][1:].isdigit(): if splitit[n][1:].isdigit():
comicversion = str(splitit[n]) comicversion = str(splitit[n])
#print ("version found:" + str(comicversion)) logger.fdebug("version found: " + str(comicversion))
else: else:
#print ("issue section") logger.fdebug("issue section")
if splitit[n].isdigit(): if splitit[n].isdigit():
#print ("issue detected") logger.fdebug("issue detected")
comiss = splitit[n] comiss = splitit[n]
comicNAMER = n - 1 comicNAMER = n - 1
comNAME = splitit[0] comNAME = splitit[0]
cmnam = 1 cmnam = 1
while (cmnam < comicNAMER): while (cmnam <= comicNAMER):
comNAME = str(comNAME) + " " + str(splitit[cmnam]) comNAME = str(comNAME) + " " + str(splitit[cmnam])
cmnam+=1 cmnam+=1
#print ("comic: " + str(comNAME)) logger.fdebug("comic: " + str(comNAME))
else: else:
#print ("non-match for: " + splitit[n]) logger.fdebug("non-match for: "+ str(splitit[n]))
pass pass
n+=1 n+=1
#set the match threshold to 80% (for now)
# if it's less than 80% consider it a non-match and discard.
spercent = ( scount/int(len(splitit)) ) * 100 spercent = ( scount/int(len(splitit)) ) * 100
#print (str(spercent) + "% match") logger.fdebug(str(spercent) + "% match")
#if spercent >= 75: print ("it's a go captain...") #if spercent >= 80:
#if spercent < 75: print ("failure - we only got " + str(spercent) + "% right!") # logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!")
#print ("this should be a match!") #if spercent < 80:
# logger.fdebug("failure - we only got " + str(spercent) + "% right!")
# continue
logger.fdebug("this should be a match!")
#issue comparison now as well #issue comparison now as well
if int(findcomiciss[findloop]) == int(comiss): if int(findcomiciss[findloop]) == int(comiss):
#print ("issues match!") logger.fdebug('issues match!')
logger.info(u"Found " + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov) ) logger.info(u"Found " + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov) )
## -- inherit issue. Comic year is non-standard. nzb year is the year ## -- inherit issue. Comic year is non-standard. nzb year is the year
## -- comic was printed, not the start year of the comic series and ## -- comic was printed, not the start year of the comic series and
## -- thus the deciding component if matches are correct or not ## -- thus the deciding component if matches are correct or not
linkstart = os.path.splitext(entry['link'])[0] linkstart = os.path.splitext(entry['link'])[0]
#following is JUST for nzb.su #following is JUST for nzb.su
if nzbprov == 'nzb.su': if nzbprov == 'nzb.su' or nzbprov == 'newznab':
linkit = os.path.splitext(entry['link'])[1] linkit = os.path.splitext(entry['link'])[1]
linkit = linkit.replace("&", "%26") linkit = linkit.replace("&", "%26")
linkapi = str(linkstart) + str(linkit) linkapi = str(linkstart) + str(linkit)
@ -371,10 +423,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
filenamenzb = os.path.split(linkapi)[1] filenamenzb = os.path.split(linkapi)[1]
#filenzb = os.path.join(tmppath,filenamenzb) #filenzb = os.path.join(tmppath,filenamenzb)
if nzbprov == 'nzb.su': if nzbprov == 'nzb.su' or nzbprov == 'newznab' or nzbprov == 'experimental':
filenzb = linkstart[21:] #filenzb = linkstart[21:]
elif nzbprov == 'experimental': #elif nzbprov == 'experimental':
filenzb = filenamenzb[6:] #let's send a clean copy to SAB because the name could be stupid.
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
#filenzb = str(filenamenzb)
elif nzbprov == 'dognzb': elif nzbprov == 'dognzb':
filenzb = str(filenamenzb) filenzb = str(filenamenzb)
@ -429,11 +483,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
#else: #else:
#print "Queue already paused" #print "Queue already paused"
# END OF NOT NEEDED. # END OF NOT NEEDED.
if mylar.RENAME_FILES == 1: #redudant. if mylar.RENAME_FILES == 1:
tmpapi = str(mylar.SAB_HOST) + "/api?mode=addlocalfile&name=" + str(savefile) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY) tmpapi = str(mylar.SAB_HOST) + "/api?mode=addlocalfile&name=" + str(savefile) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
else: #outdated...
tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY) # else:
# tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
# time.sleep(5) # time.sleep(5)
#end outdated.
print "send-to-SAB:" + str(tmpapi) print "send-to-SAB:" + str(tmpapi)
try: try:
urllib2.urlopen(tmpapi) urllib2.urlopen(tmpapi)
@ -524,8 +580,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
done = True done = True
break break
else: else:
#print ("issues don't match..") log2file = log2file + "issues don't match.." + "\n"
foundc = "no" foundc = "no"
# write the log to file now so it logs / file found.
#newlog = mylar.CACHE_DIR + "/searchlog.txt"
#local_file = open(newlog, "a")
#pickle.dump(str(log2file), local_file)
#local_file.write(log2file)
#local_file.close
#log2file = ""
if done == True: break if done == True: break
cmloopit-=1 cmloopit-=1
findloop+=1 findloop+=1
@ -533,6 +596,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
print ("found-yes") print ("found-yes")
foundcomic.append("yes") foundcomic.append("yes")
updater.nzblog(IssueID, nzbname) updater.nzblog(IssueID, nzbname)
nzbpr == 0
break break
elif foundc == "no" and nzbpr <> 0: elif foundc == "no" and nzbpr <> 0:
logger.info(u"More than one search provider given - trying next one.") logger.info(u"More than one search provider given - trying next one.")

View File

@ -329,8 +329,8 @@ class WebInterface(object):
if popit: if popit:
weeklyresults = myDB.select("SELECT * from weekly") weeklyresults = myDB.select("SELECT * from weekly")
pulldate = myDB.action("SELECT * from weekly").fetchone() pulldate = myDB.action("SELECT * from weekly").fetchone()
if pulldate is None: #if pulldate is None:
raise cherrypy.HTTPRedirect("home") # raise cherrypy.HTTPRedirect("home")
else: else:
return self.manualpull() return self.manualpull()
return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'],pullfilter=False) return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'],pullfilter=False)
@ -507,6 +507,11 @@ class WebInterface(object):
"use_dognzb" : helpers.checked(mylar.DOGNZB), "use_dognzb" : helpers.checked(mylar.DOGNZB),
"dognzb_api" : mylar.DOGNZB_APIKEY, "dognzb_api" : mylar.DOGNZB_APIKEY,
"use_experimental" : helpers.checked(mylar.EXPERIMENTAL), "use_experimental" : helpers.checked(mylar.EXPERIMENTAL),
"use_newznab" : helpers.checked(mylar.NEWZNAB),
"newznab_host" : mylar.NEWZNAB_HOST,
"newznab_api" : mylar.NEWZNAB_APIKEY,
"newznab_enabled" : helpers.checked(mylar.NEWZNAB_ENABLED),
"extra_newznabs" : mylar.EXTRA_NEWZNABS,
"destination_dir" : mylar.DESTINATION_DIR, "destination_dir" : mylar.DESTINATION_DIR,
"replace_spaces" : helpers.checked(mylar.REPLACE_SPACES), "replace_spaces" : helpers.checked(mylar.REPLACE_SPACES),
"replace_char" : mylar.REPLACE_CHAR, "replace_char" : mylar.REPLACE_CHAR,
@ -541,12 +546,12 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID) raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
comic_config.exposed = True comic_config.exposed = True
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8181, http_password=None, launch_browser=0, download_scan_interval=None, nzb_search_interval=None, libraryscan_interval=None, def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, download_scan_interval=None, nzb_search_interval=None, libraryscan_interval=None,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=0, log_dir=None, blackhole=0, blackhole_dir=None, sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=0, log_dir=None, blackhole=0, blackhole_dir=None,
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0, raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
preferred_quality=0, move_files=0, rename_files=0, folder_format=None, file_format=None, preferred_quality=0, move_files=0, rename_files=0, folder_format=None, file_format=None,
destination_dir=None, replace_spaces=0, replace_char=None, autowant_all=0, autowant_upcoming=0, zero_level=0, zero_level_n=None, interface=None): destination_dir=None, replace_spaces=0, replace_char=None, autowant_all=0, autowant_upcoming=0, zero_level=0, zero_level_n=None, interface=None, **kwargs):
mylar.HTTP_HOST = http_host mylar.HTTP_HOST = http_host
mylar.HTTP_PORT = http_port mylar.HTTP_PORT = http_port
mylar.HTTP_USERNAME = http_username mylar.HTTP_USERNAME = http_username
@ -574,6 +579,10 @@ class WebInterface(object):
mylar.RAW_PASSWORD = raw_password mylar.RAW_PASSWORD = raw_password
mylar.RAW_GROUPS = raw_groups mylar.RAW_GROUPS = raw_groups
mylar.EXPERIMENTAL = experimental mylar.EXPERIMENTAL = experimental
mylar.NEWZNAB = newznab
mylar.NEWZNAB_HOST = newznab_host
mylar.NEWZNAB_APIKEY = newznab_apikey
mylar.NEWZNAB_ENABLED = newznab_enabled
mylar.PREFERRED_QUALITY = int(preferred_quality) mylar.PREFERRED_QUALITY = int(preferred_quality)
mylar.MOVE_FILES = move_files mylar.MOVE_FILES = move_files
mylar.RENAME_FILES = rename_files mylar.RENAME_FILES = rename_files
@ -588,6 +597,29 @@ class WebInterface(object):
mylar.AUTOWANT_UPCOMING = autowant_upcoming mylar.AUTOWANT_UPCOMING = autowant_upcoming
mylar.INTERFACE = interface mylar.INTERFACE = interface
mylar.LOG_DIR = log_dir mylar.LOG_DIR = log_dir
# Handle the variable config options. Note - keys with False values aren't getting passed
mylar.EXTRA_NEWZNABS = []
print ("here")
for kwarg in kwargs:
if kwarg.startswith('newznab_host'):
newznab_number = kwarg[12:]
newznab_host = kwargs['newznab_host' + newznab_number]
newznab_api = kwargs['newznab_api' + newznab_number]
try:
newznab_enabled = int(kwargs['newznab_enabled' + newznab_number])
except KeyError:
newznab_enabled = 0
mylar.EXTRA_NEWZNABS.append((newznab_host, newznab_api, newznab_enabled))
print ("there")
# Sanity checking
if mylar.SEARCH_INTERVAL < 360:
logger.info("Search interval too low. Resetting to 6 hour minimum")
mylar.SEARCH_INTERVAL = 360
print ("boo")
# Write the config
mylar.config_write() mylar.config_write()
raise cherrypy.HTTPRedirect("config") raise cherrypy.HTTPRedirect("config")