mirror of
https://github.com/evilhero/mylar
synced 2025-03-13 07:22:59 +00:00
Fix: DogNZB issues and not being able to find any results, Fix: Newznab selection caused additional searching and duplicate sends, IMP: added verbose logging option in Config (currently only for searches and multiple-volume series adds)
This commit is contained in:
parent
2b0c8cfffb
commit
59f409eac4
6 changed files with 85 additions and 37 deletions
|
@ -57,6 +57,10 @@
|
||||||
<div class="row checkbox">
|
<div class="row checkbox">
|
||||||
<input type="checkbox" name="launch_browser" value="1" ${config['launch_browser']} /> <label>Launch Browser on Startup</label>
|
<input type="checkbox" name="launch_browser" value="1" ${config['launch_browser']} /> <label>Launch Browser on Startup</label>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="row checkbox">
|
||||||
|
<input type="checkbox" name="logverbose" value="1" ${config['logverbose']} /> <label>Verbose Logging</label>
|
||||||
|
<br/><small>*Use this only when experiencing problems*</small>
|
||||||
|
</div>
|
||||||
|
|
||||||
</fieldset>
|
</fieldset>
|
||||||
</td>
|
</td>
|
||||||
|
|
|
@ -68,7 +68,7 @@ HTTP_USERNAME = None
|
||||||
HTTP_PASSWORD = None
|
HTTP_PASSWORD = None
|
||||||
HTTP_ROOT = None
|
HTTP_ROOT = None
|
||||||
LAUNCH_BROWSER = False
|
LAUNCH_BROWSER = False
|
||||||
|
LOGVERBOSE = 1
|
||||||
GIT_PATH = None
|
GIT_PATH = None
|
||||||
INSTALL_TYPE = None
|
INSTALL_TYPE = None
|
||||||
CURRENT_VERSION = None
|
CURRENT_VERSION = None
|
||||||
|
@ -192,7 +192,7 @@ def initialize():
|
||||||
|
|
||||||
with INIT_LOCK:
|
with INIT_LOCK:
|
||||||
|
|
||||||
global __INITIALIZED__, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, \
|
global __INITIALIZED__, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, LOGVERBOSE, \
|
||||||
HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \
|
HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \
|
||||||
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \
|
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \
|
||||||
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, \
|
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, \
|
||||||
|
@ -228,6 +228,7 @@ def initialize():
|
||||||
HTTP_PASSWORD = check_setting_str(CFG, 'General', 'http_password', '')
|
HTTP_PASSWORD = check_setting_str(CFG, 'General', 'http_password', '')
|
||||||
HTTP_ROOT = check_setting_str(CFG, 'General', 'http_root', '/')
|
HTTP_ROOT = check_setting_str(CFG, 'General', 'http_root', '/')
|
||||||
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
|
LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1))
|
||||||
|
LOGVERBOSE = bool(check_setting_int(CFG, 'General', 'logverbose', 1))
|
||||||
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
|
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
|
||||||
LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', '')
|
LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', '')
|
||||||
|
|
||||||
|
@ -450,6 +451,7 @@ def config_write():
|
||||||
new_config['General']['http_root'] = HTTP_ROOT
|
new_config['General']['http_root'] = HTTP_ROOT
|
||||||
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
|
new_config['General']['launch_browser'] = int(LAUNCH_BROWSER)
|
||||||
new_config['General']['log_dir'] = LOG_DIR
|
new_config['General']['log_dir'] = LOG_DIR
|
||||||
|
new_config['General']['logverbose'] = int(LOGVERBOSE)
|
||||||
new_config['General']['git_path'] = GIT_PATH
|
new_config['General']['git_path'] = GIT_PATH
|
||||||
|
|
||||||
new_config['General']['check_github'] = int(CHECK_GITHUB)
|
new_config['General']['check_github'] = int(CHECK_GITHUB)
|
||||||
|
|
|
@ -81,7 +81,7 @@ class RotatingLogger(object):
|
||||||
logger.error(message)
|
logger.error(message)
|
||||||
|
|
||||||
mylar_log = RotatingLogger('mylar.log', MAX_SIZE, MAX_FILES)
|
mylar_log = RotatingLogger('mylar.log', MAX_SIZE, MAX_FILES)
|
||||||
search_log = RotatingLogger('search.log', MAX_SIZE, MAX_FILES)
|
#search_log = RotatingLogger('search.log', MAX_SIZE, MAX_FILES)
|
||||||
|
|
||||||
def debug(message):
|
def debug(message):
|
||||||
mylar_log.log(message, level='DEBUG')
|
mylar_log.log(message, level='DEBUG')
|
||||||
|
@ -96,5 +96,8 @@ def error(message):
|
||||||
mylar_log.log(message, level='ERROR')
|
mylar_log.log(message, level='ERROR')
|
||||||
|
|
||||||
def fdebug(message):
|
def fdebug(message):
|
||||||
search_log.log(message, level='DEBUG')
|
#if mylar.LOGVERBOSE == 1:
|
||||||
|
mylar_log.log(message, level='DEBUG')
|
||||||
|
#else:
|
||||||
|
# mylar_log.log(message, level='DEBUG')
|
||||||
|
|
||||||
|
|
|
@ -407,40 +407,52 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
|
||||||
def GCDAdd(gcdcomicid):
|
def GCDAdd(gcdcomicid):
|
||||||
serieschoice = []
|
serieschoice = []
|
||||||
series = {}
|
series = {}
|
||||||
|
logger.fdebug("this one's for Ataribaby:")
|
||||||
|
logger.fdebug("I'm trying to find these GCD comicid's:" + str(gcdcomicid))
|
||||||
for gcdid in gcdcomicid:
|
for gcdid in gcdcomicid:
|
||||||
#print ("gcdid:" + str(gcdid))
|
logger.fdebug("looking at gcdid:" + str(gcdid))
|
||||||
input2 = 'http://www.comics.org/series/' + str(gcdid)
|
input2 = 'http://www.comics.org/series/' + str(gcdid)
|
||||||
|
logger.fdebug("---url: " + str(input2))
|
||||||
resp = urllib2.urlopen ( input2 )
|
resp = urllib2.urlopen ( input2 )
|
||||||
soup = BeautifulSoup ( resp )
|
soup = BeautifulSoup ( resp )
|
||||||
|
logger.fdebug("SeriesName section...")
|
||||||
parsen = soup.find("span", {"id" : "series_name"})
|
parsen = soup.find("span", {"id" : "series_name"})
|
||||||
|
logger.fdebug("series name (UNPARSED): " + str(parsen))
|
||||||
subpar = parsen('a')[0]
|
subpar = parsen('a')[0]
|
||||||
|
logger.fdebug("series name parsed value: " + str(subpar))
|
||||||
resultName = subpar.findNext(text=True)
|
resultName = subpar.findNext(text=True)
|
||||||
#print ("ComicName: " + resultName)
|
logger.fdebug("ComicName: " + str(resultName))
|
||||||
#covers-start
|
#covers-start
|
||||||
|
logger.fdebug("Covers section...")
|
||||||
coverst = soup.find("div", {"id" : "series_cover"})
|
coverst = soup.find("div", {"id" : "series_cover"})
|
||||||
if coverst < 0:
|
if coverst < 0:
|
||||||
gcdcover = "None"
|
gcdcover = "None"
|
||||||
|
logger.fdebug("unable to find any covers - setting to None")
|
||||||
else:
|
else:
|
||||||
subcoverst = coverst('img',src=True)[0]
|
subcoverst = coverst('img',src=True)[0]
|
||||||
|
logger.fdebug("cover (UNPARSED) : " + str(subcoverst))
|
||||||
gcdcover = subcoverst['src']
|
gcdcover = subcoverst['src']
|
||||||
#print ("Cover: " + str(gcdcover))
|
logger.fdebug("Cover: " + str(gcdcover))
|
||||||
#covers end
|
#covers end
|
||||||
#publisher start
|
#publisher start
|
||||||
|
logger.fdebug("Publisher section...")
|
||||||
pubst = soup.find("div", {"class" : "item_data"})
|
pubst = soup.find("div", {"class" : "item_data"})
|
||||||
|
logger.fdebug("publisher (UNPARSED): " + str(pubst))
|
||||||
try:
|
try:
|
||||||
subpubst = pubst('a')[0]
|
subpubst = pubst('a')[0]
|
||||||
|
logger.fdebug("publisher parsed value : " + str(subpubst))
|
||||||
publisher = subpubst.findNext(text=True)
|
publisher = subpubst.findNext(text=True)
|
||||||
except IndexError,TypeError:
|
except IndexError:
|
||||||
publisher = "Unknown"
|
publisher = "Unknown"
|
||||||
#print ("Publisher: " + str(publisher))
|
logger.fdebug("Publisher: " + str(publisher))
|
||||||
#publisher end
|
#publisher end
|
||||||
parsed = soup.find("div", {"id" : "series_data"})
|
parsed = soup.find("div", {"id" : "series_data"})
|
||||||
|
logger.fdebug("series_data: " + str(parsed))
|
||||||
#print ("parse:" + str(parsed))
|
#print ("parse:" + str(parsed))
|
||||||
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
|
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
|
||||||
|
logger.fdebug("publication_dates: " + str(subtxt3))
|
||||||
pubdate = subtxt3.findNext(text=True).rstrip()
|
pubdate = subtxt3.findNext(text=True).rstrip()
|
||||||
#print ("pubdate:" + str(pubdate))
|
logger.fdebug("pubdate:" + str(pubdate))
|
||||||
subtxt4 = parsed.find("dd", {"id" : "issues_published"})
|
subtxt4 = parsed.find("dd", {"id" : "issues_published"})
|
||||||
noiss = subtxt4.findNext(text=True)
|
noiss = subtxt4.findNext(text=True)
|
||||||
lenwho = len(noiss)
|
lenwho = len(noiss)
|
||||||
|
@ -450,8 +462,8 @@ def GCDAdd(gcdcomicid):
|
||||||
stringout = noiss[:lent]
|
stringout = noiss[:lent]
|
||||||
noissues = stringout.rstrip(' \t\r\n\0')
|
noissues = stringout.rstrip(' \t\r\n\0')
|
||||||
numbering = stringit.rstrip(' \t\r\n\0')
|
numbering = stringit.rstrip(' \t\r\n\0')
|
||||||
#print ("noissues:" + str(noissues))
|
logger.fdebug("noissues:" + str(noissues))
|
||||||
#print ("numbering:" + str(numbering))
|
logger.fdebug("numbering:" + str(numbering))
|
||||||
serieschoice.append({
|
serieschoice.append({
|
||||||
"ComicID": gcdid,
|
"ComicID": gcdid,
|
||||||
"ComicName": resultName,
|
"ComicName": resultName,
|
||||||
|
|
|
@ -37,7 +37,7 @@ import urllib2
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID):
|
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID):
|
||||||
if ComicYear == None: ComicYear = '2012'
|
if ComicYear == None: ComicYear = '2013'
|
||||||
else: ComicYear = str(ComicYear)[:4]
|
else: ComicYear = str(ComicYear)[:4]
|
||||||
##nzb provider selection##
|
##nzb provider selection##
|
||||||
##'dognzb' or 'nzb.su' or 'experimental'
|
##'dognzb' or 'nzb.su' or 'experimental'
|
||||||
|
@ -72,6 +72,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
|
||||||
# mylar.NEWZNAB_HOST = newznab_host[0]
|
# mylar.NEWZNAB_HOST = newznab_host[0]
|
||||||
|
|
||||||
# --------
|
# --------
|
||||||
|
logger.fdebug("there are : " + str(nzbp) + " search providers you have selected.")
|
||||||
nzbpr = nzbp-1
|
nzbpr = nzbp-1
|
||||||
findit = 'no'
|
findit = 'no'
|
||||||
|
|
||||||
|
@ -89,9 +90,11 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
|
||||||
#this is for newznab
|
#this is for newznab
|
||||||
nzbprov = 'newznab'
|
nzbprov = 'newznab'
|
||||||
for newznab_host in newznab_hosts:
|
for newznab_host in newznab_hosts:
|
||||||
|
logger.fdebug("newnzb_host: " + str(newznab_host))
|
||||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host)
|
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host)
|
||||||
if findit == 'yes':
|
if findit == 'yes':
|
||||||
break
|
logger.fdebug("findit = found!")
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
if IssDateFix == "yes":
|
if IssDateFix == "yes":
|
||||||
logger.info(u"Hang on - this issue was published between Nov/Dec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
|
logger.info(u"Hang on - this issue was published between Nov/Dec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
|
||||||
|
@ -100,7 +103,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
|
||||||
break
|
break
|
||||||
nzbpr-=1
|
nzbpr-=1
|
||||||
|
|
||||||
if nzbprovider[nzbpr] == 'experimental':
|
elif nzbprovider[nzbpr] == 'experimental':
|
||||||
#this is for experimental
|
#this is for experimental
|
||||||
nzbprov = 'experimental'
|
nzbprov = 'experimental'
|
||||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID)
|
||||||
|
@ -114,7 +117,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
|
||||||
break
|
break
|
||||||
nzbpr-=1
|
nzbpr-=1
|
||||||
|
|
||||||
if nzbprovider[nzbpr] == 'nzb.su':
|
elif nzbprovider[nzbpr] == 'nzb.su':
|
||||||
# ----
|
# ----
|
||||||
# this is for nzb.su
|
# this is for nzb.su
|
||||||
#d = feedparser.parse("http://nzb.su/rss?t=7030&dl=1&i=" + str(nzbsu_APIID) + "&r=" + str(nzbsu_APIkey))
|
#d = feedparser.parse("http://nzb.su/rss?t=7030&dl=1&i=" + str(nzbsu_APIID) + "&r=" + str(nzbsu_APIkey))
|
||||||
|
@ -153,6 +156,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueI
|
||||||
nzbpr-=1
|
nzbpr-=1
|
||||||
|
|
||||||
# ----
|
# ----
|
||||||
|
if findit == 'yes': return findit
|
||||||
return findit
|
return findit
|
||||||
|
|
||||||
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host=None):
|
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, IssDateFix, IssueID, newznab_host=None):
|
||||||
|
@ -166,7 +170,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
elif nzbprov == 'newznab':
|
elif nzbprov == 'newznab':
|
||||||
host_newznab = newznab_host[0]
|
host_newznab = newznab_host[0]
|
||||||
apikey = newznab_host[1]
|
apikey = newznab_host[1]
|
||||||
print ("using Newznab of : " + str(host_newznab))
|
logger.fdebug("using Newznab host of : " + str(host_newznab))
|
||||||
|
|
||||||
if mylar.PREFERRED_QUALITY == 0: filetype = ""
|
if mylar.PREFERRED_QUALITY == 0: filetype = ""
|
||||||
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
|
elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
|
||||||
|
@ -239,7 +243,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
elif nzbprov == 'nzb.su':
|
elif nzbprov == 'nzb.su':
|
||||||
findurl = "http://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
findurl = "http://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
||||||
elif nzbprov == 'newznab':
|
elif nzbprov == 'newznab':
|
||||||
findurl = str(host_newznab) + "/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
findurl = str(host_newznab) + "api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
|
||||||
|
logger.fdebug("search-url: " + str(findurl))
|
||||||
bb = feedparser.parse(findurl)
|
bb = feedparser.parse(findurl)
|
||||||
elif nzbprov == 'experimental':
|
elif nzbprov == 'experimental':
|
||||||
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
|
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
|
||||||
|
@ -254,9 +259,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
foundc = "no"
|
foundc = "no"
|
||||||
else:
|
else:
|
||||||
for entry in bb['entries']:
|
for entry in bb['entries']:
|
||||||
|
logger.fdebug("checking search result: " + str(entry['title']))
|
||||||
thisentry = str(entry['title'])
|
thisentry = str(entry['title'])
|
||||||
logger.fdebug("Entry: " + str(thisentry))
|
logger.fdebug("Entry: " + str(thisentry))
|
||||||
cleantitle = re.sub('_', ' ', str(entry['title']))
|
cleantitle = re.sub('[_/.]', ' ', str(entry['title']))
|
||||||
cleantitle = helpers.cleanName(str(cleantitle))
|
cleantitle = helpers.cleanName(str(cleantitle))
|
||||||
nzbname = cleantitle
|
nzbname = cleantitle
|
||||||
|
|
||||||
|
@ -281,6 +287,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
|
|
||||||
while (cnt < lenm):
|
while (cnt < lenm):
|
||||||
if m[cnt] is None: break
|
if m[cnt] is None: break
|
||||||
|
if m[cnt] == ' ':
|
||||||
|
pass
|
||||||
|
else:
|
||||||
logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt]))
|
logger.fdebug(str(cnt) + ". Bracket Word: " + str(m[cnt]))
|
||||||
if cnt == 0:
|
if cnt == 0:
|
||||||
comic_andiss = m[cnt]
|
comic_andiss = m[cnt]
|
||||||
|
@ -295,8 +304,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
logger.fdebug(str(comyear) + " - not right - years do not match")
|
logger.fdebug(str(comyear) + " - not right - years do not match")
|
||||||
yearmatch = "false"
|
yearmatch = "false"
|
||||||
if 'digital' in m[cnt] and len(m[cnt]) == 7:
|
if 'digital' in m[cnt] and len(m[cnt]) == 7:
|
||||||
|
logger.fdebug("digital edition detected")
|
||||||
pass
|
pass
|
||||||
#print ("digital edition")
|
|
||||||
if ' of ' in m[cnt]:
|
if ' of ' in m[cnt]:
|
||||||
logger.fdebug("mini-series detected : " + str(m[cnt]))
|
logger.fdebug("mini-series detected : " + str(m[cnt]))
|
||||||
result_of = m[cnt]
|
result_of = m[cnt]
|
||||||
|
@ -322,7 +331,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
#something happened to dognzb searches or results...added a '.' in place of spaces
|
#something happened to dognzb searches or results...added a '.' in place of spaces
|
||||||
#screwed up most search results with dognzb. Let's try to adjust.
|
#screwed up most search results with dognzb. Let's try to adjust.
|
||||||
watchcomic_split = findcomic[findloop].split(None)
|
watchcomic_split = findcomic[findloop].split(None)
|
||||||
#log2file = log2file + "adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss) + "\n"
|
logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
|
||||||
bmm = re.findall('v\d', comic_iss)
|
bmm = re.findall('v\d', comic_iss)
|
||||||
if len(bmm) > 0: splitst = len(splitit) - 2
|
if len(bmm) > 0: splitst = len(splitit) - 2
|
||||||
else: splitst = len(splitit) - 1
|
else: splitst = len(splitit) - 1
|
||||||
|
@ -396,23 +405,30 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
# this should work for every other provider
|
# this should work for every other provider
|
||||||
linkstart = linkstart.replace("&", "%26")
|
linkstart = linkstart.replace("&", "%26")
|
||||||
linkapi = str(linkstart)
|
linkapi = str(linkstart)
|
||||||
|
logger.fdebug("link given by: " + str(nzbprov))
|
||||||
|
logger.fdebug("link: " + str(linkstart))
|
||||||
|
logger.fdebug("linkforapi: " + str(linkapi))
|
||||||
#here we distinguish between rename and not.
|
#here we distinguish between rename and not.
|
||||||
#blackhole functinality---
|
#blackhole functinality---
|
||||||
#let's download the file to a temporary cache.
|
#let's download the file to a temporary cache.
|
||||||
|
|
||||||
if mylar.BLACKHOLE:
|
if mylar.BLACKHOLE:
|
||||||
|
logger.fdebug("using blackhole directory at : " + str(mylar.BLACKHOLE_DIR))
|
||||||
if os.path.exists(mylar.BLACKHOLE_DIR):
|
if os.path.exists(mylar.BLACKHOLE_DIR):
|
||||||
filenamenzb = str(ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ").nzb"
|
filenamenzb = str(ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ").nzb"
|
||||||
urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb))
|
urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb))
|
||||||
|
logger.fdebug("filename saved to your blackhole as : " + str(filenamenzb))
|
||||||
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) )
|
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) )
|
||||||
#end blackhole
|
#end blackhole
|
||||||
|
|
||||||
else:
|
else:
|
||||||
tmppath = mylar.CACHE_DIR
|
tmppath = mylar.CACHE_DIR
|
||||||
if os.path.exists(tmppath):
|
if os.path.exists(tmppath):
|
||||||
|
logger.fdebug("cache directory successfully found at : " + str(tmppath))
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
#let's make the dir.
|
#let's make the dir.
|
||||||
|
logger.fdebug("couldn't locate cache directory, attempting to create at : " + str(mylar.CACHE_DIR))
|
||||||
try:
|
try:
|
||||||
os.makedirs(str(mylar.CACHE_DIR))
|
os.makedirs(str(mylar.CACHE_DIR))
|
||||||
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
|
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
|
||||||
|
@ -423,32 +439,38 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
|
|
||||||
filenamenzb = os.path.split(linkapi)[1]
|
filenamenzb = os.path.split(linkapi)[1]
|
||||||
#filenzb = os.path.join(tmppath,filenamenzb)
|
#filenzb = os.path.join(tmppath,filenamenzb)
|
||||||
if nzbprov == 'nzb.su' or nzbprov == 'newznab' or nzbprov == 'experimental':
|
logger.fdebug("unalterted nzb name: " + str(filenamenzb))
|
||||||
#filenzb = linkstart[21:]
|
#let's send a clean copy to SAB because the names are random characters and/or could be stupid.
|
||||||
#elif nzbprov == 'experimental':
|
|
||||||
#let's send a clean copy to SAB because the name could be stupid.
|
|
||||||
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
|
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
|
||||||
#filenzb = str(filenamenzb)
|
logger.fdebug("prettified nzb name: " + str(filenzb))
|
||||||
elif nzbprov == 'dognzb':
|
|
||||||
filenzb = str(filenamenzb)
|
|
||||||
|
|
||||||
if mylar.RENAME_FILES == 1:
|
if mylar.RENAME_FILES == 1:
|
||||||
|
logger.fdebug("Rename Files enabled..")
|
||||||
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
|
filenzb = str(ComicName.replace(' ', '_')) + "_" + str(IssueNumber) + "_(" + str(comyear) + ")"
|
||||||
|
logger.fdebug("this should be the same as prettified nzb name:" + str(filenzb))
|
||||||
if mylar.REPLACE_SPACES:
|
if mylar.REPLACE_SPACES:
|
||||||
|
logger.fdebug("Replace spaces option enabled")
|
||||||
|
logger.fdebug("replace character: " + str(mylar.REPLACE_CHAR))
|
||||||
repchar = mylar.REPLACE_CHAR
|
repchar = mylar.REPLACE_CHAR
|
||||||
repurlchar = mylar.REPLACE_CHAR
|
repurlchar = mylar.REPLACE_CHAR
|
||||||
else:
|
else:
|
||||||
|
logger.fdebug("Replace spaces option NOT enabled")
|
||||||
repchar = ' '
|
repchar = ' '
|
||||||
repurlchar = "%20"
|
repurlchar = "%20"
|
||||||
#let's make sure there's no crap in the ComicName since it's O.G.
|
#let's make sure there's no crap in the ComicName since it's O.G.
|
||||||
|
logger.fdebug("original Name of comic: " + str(ComicName))
|
||||||
ComicNM = re.sub('[\:\,]', '', str(ComicName))
|
ComicNM = re.sub('[\:\,]', '', str(ComicName))
|
||||||
|
logger.fdebug("altered Name of comic: " + str(ComicNM))
|
||||||
renameit = str(ComicNM) + " " + str(IssueNumber) + " (" + str(SeriesYear) + ")" + " " + "(" + str(comyear) + ")"
|
renameit = str(ComicNM) + " " + str(IssueNumber) + " (" + str(SeriesYear) + ")" + " " + "(" + str(comyear) + ")"
|
||||||
|
logger.fdebug("altered Name with additional info: " + str(renameit))
|
||||||
renamethis = renameit.replace(' ', repchar)
|
renamethis = renameit.replace(' ', repchar)
|
||||||
|
logger.fdebug("...with replace spaces: " + str(renamethis))
|
||||||
renamer1 = renameit.replace(' ', repurlchar)
|
renamer1 = renameit.replace(' ', repurlchar)
|
||||||
renamer = re.sub("\&", "%26", str(renamer1))
|
renamer = re.sub("\&", "%26", str(renamer1))
|
||||||
|
logger.fdebug("...adjusting for url restrictions: " + str(renamer))
|
||||||
|
|
||||||
savefile = str(tmppath) + "/" + str(filenzb) + ".nzb"
|
savefile = str(tmppath) + "/" + str(filenzb) + ".nzb"
|
||||||
print "savefile:" + str(savefile)
|
logger.fdebug("nzb file to be saved: " + str(savefile))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
urllib.urlretrieve(linkapi, str(savefile))
|
urllib.urlretrieve(linkapi, str(savefile))
|
||||||
|
@ -462,7 +484,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
|
|
||||||
logger.info(u"Sucessfully retrieved nzb file using " + str(nzbprov))
|
logger.info(u"Sucessfully retrieved nzb file using " + str(nzbprov))
|
||||||
nzbname = str(filenzb)
|
nzbname = str(filenzb)
|
||||||
print "nzbname:" + str(nzbname)
|
logger.fdebug("nzbname used for post-processing:" + str(nzbname))
|
||||||
# NOT NEEDED ANYMORE.
|
# NOT NEEDED ANYMORE.
|
||||||
#print (str(mylar.RENAME_FILES))
|
#print (str(mylar.RENAME_FILES))
|
||||||
|
|
||||||
|
@ -490,7 +512,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
# tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
|
# tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
|
||||||
# time.sleep(5)
|
# time.sleep(5)
|
||||||
#end outdated.
|
#end outdated.
|
||||||
print "send-to-SAB:" + str(tmpapi)
|
logger.fdebug("Completed Send-To-SAB URL:" + str(tmpapi))
|
||||||
try:
|
try:
|
||||||
urllib2.urlopen(tmpapi)
|
urllib2.urlopen(tmpapi)
|
||||||
except urllib2.URLError:
|
except urllib2.URLError:
|
||||||
|
@ -554,6 +576,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
#---END OF NOT NEEDED.
|
#---END OF NOT NEEDED.
|
||||||
#delete the .nzb now.
|
#delete the .nzb now.
|
||||||
if mylar.PROG_DIR is not "/":
|
if mylar.PROG_DIR is not "/":
|
||||||
|
logger.fdebug("preparing to remove temporary nzb file at: " + str(savefile))
|
||||||
os.remove(savefile)
|
os.remove(savefile)
|
||||||
logger.info(u"Removed temporary save file")
|
logger.info(u"Removed temporary save file")
|
||||||
#--- NOT NEEDED.
|
#--- NOT NEEDED.
|
||||||
|
@ -595,13 +618,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
|
||||||
if foundc == "yes":
|
if foundc == "yes":
|
||||||
print ("found-yes")
|
print ("found-yes")
|
||||||
foundcomic.append("yes")
|
foundcomic.append("yes")
|
||||||
|
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
|
||||||
updater.nzblog(IssueID, nzbname)
|
updater.nzblog(IssueID, nzbname)
|
||||||
nzbpr == 0
|
nzbpr == 0
|
||||||
break
|
continue
|
||||||
elif foundc == "no" and nzbpr <> 0:
|
elif foundc == "no" and nzbpr <> 0:
|
||||||
logger.info(u"More than one search provider given - trying next one.")
|
logger.info(u"More than one search provider given - trying next one.")
|
||||||
elif foundc == "no" and nzbpr == 0:
|
elif foundc == "no" and nzbpr == 0:
|
||||||
foundcomic.append("no")
|
foundcomic.append("no")
|
||||||
|
logger.fdebug("couldn't find a matching comic")
|
||||||
if IssDateFix == "no":
|
if IssDateFix == "no":
|
||||||
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
|
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
|
||||||
break
|
break
|
||||||
|
|
|
@ -474,6 +474,7 @@ class WebInterface(object):
|
||||||
"http_port" : mylar.HTTP_PORT,
|
"http_port" : mylar.HTTP_PORT,
|
||||||
"http_pass" : mylar.HTTP_PASSWORD,
|
"http_pass" : mylar.HTTP_PASSWORD,
|
||||||
"launch_browser" : helpers.checked(mylar.LAUNCH_BROWSER),
|
"launch_browser" : helpers.checked(mylar.LAUNCH_BROWSER),
|
||||||
|
"logverbose" : helpers.checked(mylar.LOGVERBOSE),
|
||||||
"download_scan_interval" : mylar.DOWNLOAD_SCAN_INTERVAL,
|
"download_scan_interval" : mylar.DOWNLOAD_SCAN_INTERVAL,
|
||||||
"nzb_search_interval" : mylar.SEARCH_INTERVAL,
|
"nzb_search_interval" : mylar.SEARCH_INTERVAL,
|
||||||
"libraryscan_interval" : mylar.LIBRARYSCAN_INTERVAL,
|
"libraryscan_interval" : mylar.LIBRARYSCAN_INTERVAL,
|
||||||
|
@ -534,7 +535,7 @@ class WebInterface(object):
|
||||||
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
|
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
|
||||||
comic_config.exposed = True
|
comic_config.exposed = True
|
||||||
|
|
||||||
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, download_scan_interval=None, nzb_search_interval=None, libraryscan_interval=None,
|
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, logverbose=0, download_scan_interval=None, nzb_search_interval=None, libraryscan_interval=None,
|
||||||
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=0, log_dir=None, blackhole=0, blackhole_dir=None,
|
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=0, log_dir=None, blackhole=0, blackhole_dir=None,
|
||||||
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
|
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
|
||||||
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
|
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
|
||||||
|
@ -545,6 +546,7 @@ class WebInterface(object):
|
||||||
mylar.HTTP_USERNAME = http_username
|
mylar.HTTP_USERNAME = http_username
|
||||||
mylar.HTTP_PASSWORD = http_password
|
mylar.HTTP_PASSWORD = http_password
|
||||||
mylar.LAUNCH_BROWSER = launch_browser
|
mylar.LAUNCH_BROWSER = launch_browser
|
||||||
|
mylar.LOGVERBOSE = logverbose
|
||||||
mylar.DOWNLOAD_SCAN_INTERVAL = download_scan_interval
|
mylar.DOWNLOAD_SCAN_INTERVAL = download_scan_interval
|
||||||
mylar.SEARCH_INTERVAL = nzb_search_interval
|
mylar.SEARCH_INTERVAL = nzb_search_interval
|
||||||
mylar.LIBRARYSCAN_INTERVAL = libraryscan_interval
|
mylar.LIBRARYSCAN_INTERVAL = libraryscan_interval
|
||||||
|
|
Loading…
Add table
Reference in a new issue