Merge branch 'development'

This commit is contained in:
evilhero 2013-01-21 13:27:13 -05:00
commit 1014f0c86c
13 changed files with 247 additions and 168 deletions

View File

@ -194,9 +194,9 @@
</div>
<label>Year Options</label>
<div class="row radio left clearfix">
<input type="radio" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="fuzzy_year" value="0" /><label>Keep the Year as is<small>Default</small></label>
<input type="radio" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="fuzzy_year" value="1" /><label>Year Removal<small>Remove issue publication year from searches (dangerous)</small></label>
<input type="radio" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="fuzzy_year" value="2" /><label>Fuzzy the Year<small>Increase & Decrease the issue publication year by one</small></label>
<input type="radio" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="fuzzy_year" value="0" ${comicConfig['fuzzy_year0']} /><label>Keep the Year as is<small>Default</small></label>
<input type="radio" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="fuzzy_year" value="1" ${comicConfig['fuzzy_year1']} /><label>Year Removal<small>Remove issue publication year from searches (dangerous)</small></label>
<input type="radio" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="fuzzy_year" value="2" ${comicConfig['fuzzy_year2']} /><label>Fuzzy the Year<small>Increase & Decrease the issue publication year by one</small></label>
</div>
<input type="submit" value="Update"/>
</div>

View File

@ -95,11 +95,17 @@
<td>
<fieldset>
<legend>Interval</legend>
<div class="row">
<div class="row">
<label>NZB Search Interval</label>
<input type="text" name="nzb_search_interval" value="${config['nzb_search_interval']}" size="4">mins
</div>
<div class="row checkbox">
<input type="checkbox" name="nzb_startup_search" value="1" ${config['nzb_startup_search']} /><label>NZB Search on startup</label>
</div>
</div>
</div>
<div class="row">
<label>Download Scan Interval</label>
<input type="text" name="download_scan_interval" value="${config['download_scan_interval']}" size="4">mins
@ -360,7 +366,7 @@
<small>enter in the absolute path to the script</small>
</div>
<div class="row checkbox left clearfix">
<input type="checkbox" name="enable_extra_scripts" value="1" ${config['enable_extra_scripts']} /><label>Use Extra Script AFTER Post-Processing</label>
<input type="checkbox" name="enable_extra_scripts" value="1" ${config['enable_extra_scripts']} /><label>Use Extra Script AFTER Post-Processing</label>
</div>
<div class="row">
<label>Extra Script Location</label>
@ -393,6 +399,7 @@
<input type="text" name="file_format" value="${config['file_format']}" size="43">
<small>Use: $Series, $Year, $Issue<br />
E.g.: $Series $Issue ($Year) = Animal Man 0 (2012) </small>
</fieldset>
<fieldset>
@ -432,6 +439,10 @@
</select>
</div>
<div class="row checkboxclearfix">
<input type="checkbox" name="lowercase_filenames" value="1" ${config['lowercase_filenames']} /><label>Lowercase the entire filename</label><br/>
<small>(will work regardless if Rename Files is enabled)</small>
</div>
</fieldset>

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

View File

@ -1,5 +1,4 @@
<%inherit file="base.html" />
<%!
<%inherit file="base.html" /> <%!
import mylar
from mylar.helpers import checked
%>
@ -13,29 +12,29 @@
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix">Search Question</h1>
<h1 class="clearfix">Ultron Error-Checker</h1>
</div>
<div id="tabs">
<ul>
<li><a href="#tabs-1">More Information</a></li>
<li><a href="#tabs-1">Analysis Required</a></li>
</ul>
<div id="tabs-1" class="configtable">
<table>
<tr>
<td width="200">
<td width="250">
<fieldset>
<div>
<img src="${comicimage}" alt="" height="350" width="230" />
</div>
<div class="row checkbox">
<input type="checkbox" name="add_to_csv" value="1"/><label><small>Add the selected entry to the custom_exceptions.csv (default)</small></label>
</div>
</fieldset>
</td>
<td>
<fieldset>
<legend>Error-Checking...</legend>
<p><strong>I can't add the requsted comic.</strong></p>
<td width="100%">
<span style="position:absolute">
<img src="interfaces/default/images/ultron.png" style="float:right" height="125" width="125">
<fieldset>
<center><legend>Error-Check.</legend></center>
<strong>I cannot add the requsted comic.</strong>
<p>I've figured out that the Comic that you've selected to watch isn't listed
correctly on the other databases I need to query. This is most likely due to
an incorrect spelling, but sometimes it could because the year is wrong, or even
@ -46,8 +45,8 @@
<legend>${comicname} (${comicyear})</br>
${comicissues} Issues</legend></center>
<br/>
</fieldset>
</td>
</fieldset>
</span></td>
</tr>
</table>
<table class="display" id="searchmanage_table">
@ -69,7 +68,7 @@
<td id="comicname"><a href="${result['ComicURL']}" title="${result['ComicName']}" target="_blank">${result['ComicName']}</td>
<td id="comicissues"><title="${result['ComicYear']}">${result['ComicYear']}</td>
<td id="comicissues">${result['ComicIssues']}</td>
<td id="addcomic"><a href="from_Exceptions?comicid=${result['ComicID']}&comicname=${result['ComicName']}&comicyear=${result['ComicYear']}&comicissues=${result['ComicIssues']}&comicpublisher=${result['ComicPublisher']}&gcdid=${result['GCDID']}"><span class="ui-icon-plus"></span>Add Series</a></td>
<td id="addcomic"><a href="from_Exceptions?comicid=${result['ComicID']}&comicname=${result['ComicName'] |u}&comicyear=${result['ComicYear']}&comicissues=${result['ComicIssues']}&comicpublisher=${result['ComicPublisher']}&gcdid=${result['GCDID']}"><span class="ui-icon-plus"></span>Add Series</a></td>
</tr>
%endfor
%else:
@ -81,10 +80,9 @@
</div>
</tbody>
</table>
<form action="error_change" method="GET">
<form action="error_change" method="GET">
<input type="hidden" name="comicid" value=${comicid}>
<input type="hidden" name="comicyear" value=${comicyear}>
<input type="hidden" name="comicissues" value=${comicissues}>
<inptu type="hidden" name="comicname" value=${comicname}>
<div><br/>
<label><center><strong>Didn't get it right? Enter what it should be (or the GCD-ID) here:</strong></label></center>
<center><input type="text" name="errorgcd" size="30"><input type="submit" value="Update"/></center>
@ -99,7 +97,7 @@
function initThisPage() {
jQuery( "#tabs" ).tabs();
initActions();
initConfigCheckbox("#add_to_csv");
initConfigCheckbox("#addtocsv");
};
$(document).ready(function() {
initThisPage();

View File

@ -31,11 +31,8 @@
<td class="publisher">${result['publisher']}</a></td>
<td class="comicyear">${result['comicyear']}</a></td>
<td class="issues">${result['issues']}</td>
%if type == 'album':
<td class="add" nowrap="nowrap"><a href="addReleaseById?rid=${result['albumid']}"><span class="ui-icon ui-icon-plus"></span> Add this album</a></td>
%else:
<td class="add" nowrap="nowrap"><a href="addComic?comicid=${result['comicid']}&comicname=${result['name']}&comicyear=${result['comicyear']}&comicpublisher=${result['publisher']}&comicimage=${result['comicimage']}&comicissues=${result['issues']}"><span class="ui-icon ui-icon-plus"></span> Add this Comic</a></td>
%endif
<td class="add" nowrap="nowrap"><a href="addComic?comicid=${result['comicid']}&comicname=${result['name'] |u}&comicyear=${result['comicyear']}&comicpublisher=${result['publisher']}&comicimage=${result['comicimage']}&comicissues=${result['issues']}"><span class="ui-icon ui-icon-plus"></span> Add this Comic</a></td>
</tr>
%endfor
%endif

View File

@ -284,9 +284,14 @@ class PostProcessor(object):
#rename file and move to new path
#nfilename = series + " " + issueno + " (" + seriesyear + ")"
file_values = {'$Series': series,
'$Issue': prettycomiss,
'$Year': issueyear
'$Year': issueyear,
'$series': series.lower(),
'$Publisher': publisher,
'$publisher': publisher.lower(),
'$Volume': seriesyear
}
for root, dirnames, filenames in os.walk(self.nzb_folder):
@ -312,7 +317,10 @@ class PostProcessor(object):
self._log("New Filename: " + nfilename, logger.DEBUG)
src = self.nzb_folder + "/" + ofilename
dst = comlocation + "/" + nfilename + ext
if mylar.LOWERCASE_FILENAMES:
dst = (comlocation + "/" + nfilename + ext).lower()
else:
dst = comlocation + "/" + nfilename + ext.lower()
self._log("Source:" + src, logger.DEBUG)
self._log("Destination:" + dst, logger.DEBUG)
os.rename(self.nzb_folder + "/" + ofilename, self.nzb_folder + "/" + nfilename + ext)

View File

@ -22,6 +22,7 @@ import webbrowser
import sqlite3
import itertools
import csv
import shutil
from lib.apscheduler.scheduler import Scheduler
from lib.configobj import ConfigObj
@ -85,6 +86,7 @@ USENET_RETENTION = None
ADD_COMICS = False
SEARCH_INTERVAL = 360
NZB_STARTUP_SEARCH = False
LIBRARYSCAN_INTERVAL = 300
DOWNLOAD_SCAN_INTERVAL = 5
INTERFACE = None
@ -104,6 +106,7 @@ REPLACE_SPACES = False
REPLACE_CHAR = None
ZERO_LEVEL = False
ZERO_LEVEL_N = None
LOWERCASE_FILENAME = False
USE_MINSIZE = False
MINSIZE = None
USE_MAXSIZE = False
@ -111,6 +114,7 @@ MAXSIZE = None
AUTOWANT_UPCOMING = True
AUTOWANT_ALL = False
COMIC_COVER_LOCAL = False
ADD_TO_CSV = True
SAB_HOST = None
SAB_USERNAME = None
@ -206,12 +210,12 @@ def initialize():
global __INITIALIZED__, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, LOGVERBOSE, \
HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \
LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, \
NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS
if __INITIALIZED__:
@ -243,6 +247,8 @@ def initialize():
LOGVERBOSE = bool(check_setting_int(CFG, 'General', 'logverbose', 1))
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', '')
if not CACHE_DIR:
CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', '')
CHECK_GITHUB = bool(check_setting_int(CFG, 'General', 'check_github', 1))
CHECK_GITHUB_ON_STARTUP = bool(check_setting_int(CFG, 'General', 'check_github_on_startup', 1))
@ -252,13 +258,14 @@ def initialize():
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', '1500')
SEARCH_INTERVAL = check_setting_int(CFG, 'General', 'search_interval', 360)
NZB_STARTUP_SEARCH = bool(check_setting_int(CFG, 'General', 'nzb_startup_search', 0))
LIBRARYSCAN_INTERVAL = check_setting_int(CFG, 'General', 'libraryscan_interval', 300)
DOWNLOAD_SCAN_INTERVAL = check_setting_int(CFG, 'General', 'download_scan_interval', 5)
INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default')
AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0))
AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 1))
COMIC_COVER_LOCAL = bool(check_setting_int(CFG, 'General', 'comic_cover_local', 0))
PREFERRED_QUALITY = check_setting_int(CFG, 'General', 'preferred_quality', 0)
PREFERRED_QUALITY = bool(check_setting_int(CFG, 'General', 'preferred_quality', 0))
CORRECT_METADATA = bool(check_setting_int(CFG, 'General', 'correct_metadata', 0))
MOVE_FILES = bool(check_setting_int(CFG, 'General', 'move_files', 0))
RENAME_FILES = bool(check_setting_int(CFG, 'General', 'rename_files', 0))
@ -270,10 +277,12 @@ def initialize():
REPLACE_CHAR = check_setting_str(CFG, 'General', 'replace_char', '')
ZERO_LEVEL = bool(check_setting_int(CFG, 'General', 'zero_level', 0))
ZERO_LEVEL_N = check_setting_str(CFG, 'General', 'zero_level_n', '')
LOWERCASE_FILENAMES = bool(check_setting_int(CFG, 'General', 'lowercase_filenames', 0))
USE_MINSIZE = bool(check_setting_int(CFG, 'General', 'use_minsize', 0))
MINSIZE = check_setting_str(CFG, 'General', 'minsize', '')
USE_MAXSIZE = bool(check_setting_int(CFG, 'General', 'use_maxsize', 0))
MAXSIZE = check_setting_str(CFG, 'General', 'maxsize', '')
ADD_TO_CSV = bool(check_setting_int(CFG, 'General', 'add_to_csv', 1))
ENABLE_EXTRA_SCRIPTS = bool(check_setting_int(CFG, 'General', 'enable_extra_scripts', 0))
EXTRA_SCRIPTS = check_setting_str(CFG, 'General', 'extra_scripts', '')
@ -374,7 +383,9 @@ def initialize():
logger.mylar_log.initLogger(verbose=VERBOSE)
# Put the cache dir in the data dir for now
CACHE_DIR = os.path.join(DATA_DIR, 'cache')
if not CACHE_DIR:
CACHE_DIR = os.path.join(str(DATA_DIR), 'cache')
logger.info("cache set to : " + str(CACHE_DIR))
if not os.path.exists(CACHE_DIR):
try:
os.makedirs(CACHE_DIR)
@ -473,8 +484,6 @@ def config_write():
new_config = ConfigObj()
new_config.filename = CONFIG_FILE
print ("falalal")
new_config['General'] = {}
new_config['General']['config_version'] = CONFIG_VERSION
new_config['General']['http_port'] = HTTP_PORT
@ -486,6 +495,7 @@ def config_write():
new_config['General']['log_dir'] = LOG_DIR
new_config['General']['logverbose'] = int(LOGVERBOSE)
new_config['General']['git_path'] = GIT_PATH
new_config['General']['cache_dir'] = CACHE_DIR
new_config['General']['check_github'] = int(CHECK_GITHUB)
new_config['General']['check_github_on_startup'] = int(CHECK_GITHUB_ON_STARTUP)
@ -495,13 +505,14 @@ def config_write():
new_config['General']['usenet_retention'] = USENET_RETENTION
new_config['General']['search_interval'] = SEARCH_INTERVAL
new_config['General']['nzb_startup_search'] = int(NZB_STARTUP_SEARCH)
new_config['General']['libraryscan_interval'] = LIBRARYSCAN_INTERVAL
new_config['General']['download_scan_interval'] = DOWNLOAD_SCAN_INTERVAL
new_config['General']['interface'] = INTERFACE
new_config['General']['autowant_all'] = AUTOWANT_ALL
new_config['General']['autowant_upcoming'] = AUTOWANT_UPCOMING
new_config['General']['preferred_quality'] = PREFERRED_QUALITY
new_config['General']['comic_cover_local'] = COMIC_COVER_LOCAL
new_config['General']['autowant_all'] = int(AUTOWANT_ALL)
new_config['General']['autowant_upcoming'] = int(AUTOWANT_UPCOMING)
new_config['General']['preferred_quality'] = int(PREFERRED_QUALITY)
new_config['General']['comic_cover_local'] = int(COMIC_COVER_LOCAL)
new_config['General']['correct_metadata'] = int(CORRECT_METADATA)
new_config['General']['move_files'] = int(MOVE_FILES)
new_config['General']['rename_files'] = int(RENAME_FILES)
@ -513,10 +524,12 @@ def config_write():
new_config['General']['replace_char'] = REPLACE_CHAR
new_config['General']['zero_level'] = int(ZERO_LEVEL)
new_config['General']['zero_level_n'] = ZERO_LEVEL_N
new_config['General']['lowercase_filenames'] = LOWERCASE_FILENAMES
new_config['General']['use_minsize'] = int(USE_MINSIZE)
new_config['General']['minsize'] = MINSIZE
new_config['General']['use_maxsize'] = int(USE_MAXSIZE)
new_config['General']['maxsize'] = MAXSIZE
new_config['General']['add_to_csv'] = int(ADD_TO_CSV)
new_config['General']['enable_extra_scripts'] = int(ENABLE_EXTRA_SCRIPTS)
new_config['General']['extra_scripts'] = EXTRA_SCRIPTS
@ -588,8 +601,9 @@ def start():
#now the scheduler (check every 24 hours)
SCHED.add_interval_job(weeklypull.pullit, hours=24)
#let's do a run at the Wanted issues here (on startup).
threading.Thread(target=search.searchforissue).start()
#let's do a run at the Wanted issues here (on startup) if enabled.
if NZB_STARTUP_SEARCH:
threading.Thread(target=search.searchforissue).start()
if CHECK_GITHUB:
SCHED.add_interval_job(versioncheck.checkGithub, minutes=CHECK_GITHUB_INTERVAL)
@ -690,7 +704,12 @@ def csv_load():
csvfile = open(str(EXCEPTIONS_FILE), "rb")
except (OSError,IOError):
if i == 1:
logger.error("No Custom Exceptions found. Using base exceptions only.")
logger.info("No Custom Exceptions found - Using base exceptions only. Creating blank custom_exceptions for your personal use.")
try:
shutil.copy(os.path.join(DATA_DIR,"custom_exceptions_sample.csv"), EXCEPTIONS_FILE)
except (OSError,IOError):
logger.error("Cannot create custom_exceptions.csv in " + str(DATA_DIR) + ". Make sure _sample.csv is present and/or check permissions.")
return
else:
logger.error("Could not locate " + str(EXCEPTIONS[i]) + " file. Make sure it's in datadir: " + DATA_DIR)
break

View File

@ -42,7 +42,7 @@ def is_exists(comicid):
def addComictoDB(comicid,mismatch=None,pullupd=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
from mylar import cache
# from mylar import cache
myDB = db.DBConnection()
@ -133,8 +133,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None):
#do work to generate folder path
values = {'$Series': series,
'$Publisher': publisher,
'$Year': year
'$Publisher': publisher,
'$Year': year,
'$series': series.lower(),
'$publisher': publisher.lower(),
'$Volume': year
}
#print mylar.FOLDER_FORMAT
@ -190,7 +193,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None):
urllib.urlretrieve(str(comic['ComicImage']), str(coverfile))
try:
with open(str(coverfile)) as f:
ComicImage = "cache/" + str(comicid) + ".jpg"
ComicImage = os.path.join('cache',str(comicid) + ".jpg")
logger.info(u"Sucessfully retrieved cover for " + str(comic['ComicName']))
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.COMIC_COVER_LOCAL:
@ -364,8 +367,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None):
logger.info(u"Updating complete for: " + comic['ComicName'])
if pullupd is None:
# lets' check the pullist for anyting at this time as well since we're here.
if mylar.AUTOWANT_UPCOMING:
# lets' check the pullist for anything at this time as well since we're here.
# do this for only Present comics....
if mylar.AUTOWANT_UPCOMING and 'Present' in gcdinfo['resultPublished']:
logger.info(u"Checking this week's pullist for new issues of " + str(comic['ComicName']))
updater.newpullcheck(comic['ComicName'], comicid)
@ -377,7 +381,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None):
for result in results:
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST):
foundNZB = search.searchforissue(result['IssueID'])
if foundNZB == "yes":
updater.foundsearch(result['ComicID'], result['IssueID'])
@ -406,6 +410,7 @@ def GCDimport(gcomicid, pullupd=None):
ComicName = comic[0]
ComicYear = comic[1]
ComicIssues = comic[2]
ComicPublished = comic[3]
comlocation = comic[5]
ComicPublisher = comic[6]
#ComicImage = comic[4]
@ -469,10 +474,12 @@ def GCDimport(gcomicid, pullupd=None):
year = ComicYear
#do work to generate folder path
values = {'$Series': series,
'$Publisher': publisher,
'$Year': year
'$Publisher': publisher,
'$Year': year,
'$series': series.lower(),
'$publisher': publisher.lower(),
'$Volume': year
}
if mylar.FOLDER_FORMAT == '':
@ -654,7 +661,7 @@ def GCDimport(gcomicid, pullupd=None):
if pullupd is None:
# lets' check the pullist for anyting at this time as well since we're here.
if mylar.AUTOWANT_UPCOMING:
if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished:
logger.info(u"Checking this week's pullist for new issues of " + str(ComicName))
updater.newpullcheck(comic['ComicName'], gcomicid)
@ -666,7 +673,7 @@ def GCDimport(gcomicid, pullupd=None):
for result in results:
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB) and (mylar.SAB_HOST):
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST):
foundNZB = search.searchforissue(result['IssueID'])
if foundNZB == "yes":
updater.foundsearch(result['ComicID'], result['IssueID'])

View File

@ -32,10 +32,10 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
comicyr = ComicYear
comicis = Total
comicid = ComicID
print ( "comicname: " + str(comicnm) )
print ( "comicyear: " + str(comicyr) )
print ( "comichave: " + str(comicis) )
print ( "comicid: " + str(comicid) )
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicid: " + str(comicid) )
comicnm = re.sub(' ', '+', comicnm)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
@ -331,7 +331,21 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
gcdinfo['gcdchoice'] = gcdchoice
else:
pass
#--if 2 identical issue numbers legitimately exist, but have different
#--publication dates, try to distinguish
logger.fdebug("2 identical issue #'s have been found...determining if it's intentional.")
#get current issue & publication date.
logger.fdebug("Issue #:" + str(gcdinfo['ComicIssue']))
logger.fdebug("IssueDate: " + str(gcdinfo['ComicDate']))
#get conflicting issue from tuple
for d in gcdchoice:
if str(d['GCDIssue']) == str(gcdinfo['ComicIssue']):
logger.fdebug("Issue # already in tuple - checking IssueDate:" + str(d['GCDDate']) )
if str(d['GCDDate']) == str(gcdinfo['ComicDate']):
logger.fdebug("Issue #'s and dates match...skipping.")
else:
logger.fdebug("Issue#'s match but different publication dates, not skipping.")
#pass
#logger.fdebug("Duplicate issue detected in DB - ignoring subsequent issue # " + str(gcdinfo['ComicIssue']))
PI = ParseIssue
@ -462,9 +476,9 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
if pb in comicpub:
#keep publisher in url if a biggie.
uhuh = "yes"
print (" publisher match : " + str(comicpub))
#print (" publisher match : " + str(comicpub))
conv_pub = comicpub.split()[0]
print (" converted publisher to : " + str(conv_pub))
#print (" converted publisher to : " + str(conv_pub))
#1st run setup - leave it all as it is.
comicrun.append(comicnm)
cruncnt = 0
@ -479,40 +493,27 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
cruncnt+=1
totalcount = 0
cr = 0
print ("cruncnt is " + str(cruncnt))
#print ("cruncnt is " + str(cruncnt))
while (cr <= cruncnt):
print ("cr is " + str(cr))
#print ("cr is " + str(cr))
comicnm = comicrun[cr]
#leaving spaces in will screw up the search...let's take care of it
comicnm = re.sub(' ', '+', comicnm)
print ("comicnm: " + str(comicnm))
#input = 'http://www.comics.org/series/name/' + str(comicnm) + '/sort/alpha'
#print ("comicnm: " + str(comicnm))
if uhuh == "yes":
publink = "&pub_name=" + str(conv_pub)
if uhuh == "no":
publink = "&pub_name="
# input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + str(publink) + '&is_indexed=None'
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&keywords=&order1=series&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31' + '&title=&feature=&job_number=&pages=&script=&pencils=&inks=&colors=&letters=&story_editing=&genre=&characters=&synopsis=&reprint_notes=&story_reprinted=None&notes=' + str(publink) + '&pub_notes=&brand=&brand_notes=&indicia_publisher=&is_surrogate=None&ind_pub_notes=&series=' + str(comicnm) + '&series_year_began=&series_notes=&tracking_notes=&issue_count=&is_comics=None&format=&color=&dimensions=&paper_stock=&binding=&publishing_format=&issues=&volume=&issue_title=&variant_name=&issue_date=&indicia_frequency=&price=&issue_pages=&issue_editing=&isbn=&barcode=&issue_notes=&issue_reprinted=None&is_indexed=None'
print ("input: " + str(input))
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
try:
cntit = soup.find("div", {"class" : "item_data"})
# catchit = pubst('a')[0]
except (IndexError, TypeError):
cntit = soup.findAll("div", {"class" : "left"})[1]
# catchit = pubst.find("a")
truecnt = cntit.findNext(text=True)
cnt = int(cnt1 + cnt2)
print ("truecnt: " + str(truecnt))
print ("cnt1: " + str(cnt1))
print ("cnt2: " + str(cnt2))
print (str(cnt) + " results")
# print ("cnt1: " + str(cnt1))
# print ("cnt2: " + str(cnt2))
# print (str(cnt) + " results")
resultName = []
resultID = []
@ -532,11 +533,11 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
print ( "Comic Name: " + str(resultName[n]) )
# print ( "Comic Name: " + str(resultName[n]) )
pub = resultp('a')[0]
resultPublisher.append(pub.findNext(text=True))
print ( "Publisher: " + str(resultPublisher[n]) )
# print ( "Publisher: " + str(resultPublisher[n]) )
fip = resultp('a',href=True)[1]
resultID.append(fip['href'])
@ -553,19 +554,9 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
resultIssues[n] = resultIssues[n].replace(' ','')
# print ( "Year: " + str(resultYear[n]) )
# print ( "Issues: " + str(resultIssues[n]) )
print ("comchkchoice: " + str(comchkchoice))
# if (cr == 0 and n == 0) or (comchkchoice is None):
# print ("initial add.")
# comchkchoice.append({
# "ComicID": str(comicid),
# "ComicName": str(resultName[n]),
# "GCDID": str(resultID[n]),
# "ComicYear" : str(resultYear[n]),
# "ComicPublisher" : str(resultPublisher[n]),
# "ComicIssues" : str(resultIssues[n])
# })
# print ("comchkchoice: " + str(comchkchoice))
if not any(d.get('GCDID', None) == str(resultID[n]) for d in comchkchoice):
print ( str(resultID[n]) + " not in DB...adding.")
#print ( str(resultID[n]) + " not in DB...adding.")
comchkchoice.append({
"ComicID": str(comicid),
"ComicName": str(resultName[n]),
@ -575,8 +566,8 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
"ComicURL" : "http://www.comics.org" + str(resultID[n]),
"ComicIssues" : str(resultIssues[n])
})
else:
print ( str(resultID[n]) + " already in DB...skipping" )
#else:
#print ( str(resultID[n]) + " already in DB...skipping" )
n+=1
cr+=1
totalcount= totalcount + cnt

View File

@ -334,6 +334,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
elif nzbprov == 'nzb.su':
findurl = "http://www.nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it.
if host_newznab[-1] != "/": host_newznab = str(host_newznab) + "/"
findurl = str(host_newznab) + "api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
logger.fdebug("search-url: " + str(findurl))
elif nzbprov == 'nzbx':
@ -390,8 +392,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
if cnt == 0:
comic_andiss = m[cnt]
logger.fdebug("Comic: " + str(comic_andiss))
if UseFuzzy == "0" or UseFuzzy == "2" or IssDateFix == "yes":
logger.fdebug("UseFuzzy is : " + str(UseFuzzy))
if UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix == "yes":
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
logger.fdebug("year detected: " + str(m[cnt]))
result_comyear = m[cnt]
@ -499,6 +501,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
logger.fdebug("splitit: " + str(splitit[n]))
if n < (splitst) and n < len(watchcomic_split):
logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
if '+' in watchcomic_split[n]:
watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n]))
if str(watchcomic_split[n].lower()) in str(splitit[n].lower()):
logger.fdebug("word matched on : " + str(splitit[n]))
scount+=1
@ -599,11 +603,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
if os.path.exists(mylar.BLACKHOLE_DIR):
#pretty this biatch up.
Bl_ComicName = re.sub('[/:/,\/]', '', str(ComicName))
filenamenzb = str(Bl_ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ").nzb"
filenamenzb = str(re.sub(" ", ".", str(Bl_ComicName))) + "." + str(IssueNumber) + ".(" + str(comyear) + ").nzb"
urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb))
logger.fdebug("filename saved to your blackhole as : " + str(filenamenzb))
logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) )
nzbname = str(Bl_ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ")"
#end blackhole
else:
@ -624,17 +627,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
logger.fdebug("link to retrieve via api:" + str(linkapi))
#let's change all space to decimals for simplicity
nzbname = re.sub(" ", ".", str(entry['title']))
nzbname = re.sub('[\,\:]', '', str(nzbname))
extensions = ('.cbr', '.cbz')
if nzbname.lower().endswith(extensions):
fd, ext = os.path.splitext(nzbname)
logger.fdebug("Removed extension from nzb: " + ext)
nzbname = re.sub(str(ext), '', str(nzbname))
logger.fdebug("nzbname used for post-processing:" + str(nzbname))
#we need to change the nzbx string now to allow for the nzbname rename.
if nzbprov == 'nzbx':
@ -680,6 +672,24 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
# os.remove(savefile)
# logger.info(u"Removed temporary save file")
#raise an exception to break out of loop
#let's change all space to decimals for simplicity
if mylar.BLACKHOLE:
bhole_cname = re.sub('[/:/,\/]', '', str(ComicName))
nzbname = str(re.sub(" ", ".", str(bhole_cname))) + "." + str(IssueNumber) + ".(" + str(comyear) + ")"
else:
nzbname = re.sub(" ", ".", str(entry['title']))
nzbname = re.sub('[\,\:]', '', str(nzbname))
extensions = ('.cbr', '.cbz')
if nzbname.lower().endswith(extensions):
fd, ext = os.path.splitext(nzbname)
logger.fdebug("Removed extension from nzb: " + ext)
nzbname = re.sub(str(ext), '', str(nzbname))
logger.fdebug("nzbname used for post-processing:" + str(nzbname))
foundc = "yes"
done = True
break

View File

@ -93,6 +93,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate):
logger.fdebug("Available to be marked for download - checking..." + str(issuechk['ComicName']) + " Issue: " + str(issuechk['Issue_Number']))
logger.fdebug("...Existing status: " + str(issuechk['Status']))
control = {"IssueID": issuechk['IssueID']}
newValue['IssueID'] = issuechk['IssueID']
if issuechk['Status'] == "Snatched":
values = { "Status": "Snatched"}
newValue['Status'] = "Snatched"
@ -106,7 +107,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate):
values = { "Status": "Skipped"}
newValue['Status'] = "Skipped"
#was in wrong place :(
if mylar.AUTOWANT_UPCOMING:
if mylar.AUTOWANT_UPCOMING:
#for issues not in db - to be added to Upcoming table.
if issuechk is None:
newValue['Status'] = "Wanted"
@ -256,11 +257,11 @@ def forceRescan(ComicID):
while (som < fcn):
#counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
#logger.fdebug("checking word - " + str(fcnew[som]))
if ".cbr" in fcnew[som]:
if ".cbr" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbr", "")
elif ".cbz" in fcnew[som]:
elif ".cbz" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbz", "")
if "(c2c)" in fcnew[som]:
if "(c2c)" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace("(c2c)", " ")
get_issue = shlex.split(str(fcnew[som]))
if fcnew[som] != " ":
@ -272,7 +273,7 @@ def forceRescan(ComicID):
except ValueError, TypeError:
#not numeric
fcnew[som] = fcnew[som].replace(".", "")
#logger.fdebug("new word: " + str(fcnew[som]))
#logger.fdebug("NOT NUMERIC - new word: " + str(fcnew[som]))
else:
#numeric
pass
@ -354,7 +355,6 @@ def forceRescan(ComicID):
#else:
# if the issue # matches, but there is no year present - still match.
# determine a way to match on year if present, or no year (currently).
som+=1
if haveissue == "yes": break
n+=1
@ -378,13 +378,16 @@ def forceRescan(ComicID):
issStatus = "Wanted"
else:
issStatus = "Skipped"
controlValueDict = {"IssueID": reiss['IssueID']}
newValueDict = {"Status": issStatus }
elif haveissue == "yes":
issStatus = "Downloaded"
controlValueDict = {"IssueID": reiss['IssueID']}
newValueDict = {"Location": isslocation,
"ComicSize": issSize,
"Status": issStatus
}
controlValueDict = {"IssueID": reiss['IssueID']}
newValueDict = {"Location": isslocation,
"ComicSize": issSize,
"Status": issStatus
}
myDB.upsert("issues", newValueDict, controlValueDict)
fn+=1

View File

@ -13,6 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import cherrypy
import datetime
@ -67,9 +69,13 @@ class WebInterface(object):
issues = myDB.select('SELECT * from issues WHERE ComicID=? order by Int_IssueNumber DESC', [ComicID])
if comic is None:
raise cherrypy.HTTPRedirect("home")
usethefuzzy = comic['UseFuzzy']
if usethefuzzy is None: usethefuzzy = "0"
comicConfig = {
"comiclocation" : mylar.COMIC_LOCATION,
"use_fuzzy" : comic['UseFuzzy']
"fuzzy_year0" : helpers.radio(int(usethefuzzy), 0),
"fuzzy_year1" : helpers.radio(int(usethefuzzy), 1),
"fuzzy_year2" : helpers.radio(int(usethefuzzy), 2)
}
return serve_template(templatename="artistredone.html", title=comic['ComicName'], comic=comic, issues=issues, comicConfig=comicConfig)
artistPage.exposed = True
@ -99,11 +105,11 @@ class WebInterface(object):
sresults = []
cresults = []
mismatch = "no"
print ("comicid: " + str(comicid))
print ("comicname: " + str(comicname))
print ("comicyear: " + str(comicyear))
print ("comicissues: " + str(comicissues))
print ("comicimage: " + str(comicimage))
#print ("comicid: " + str(comicid))
#print ("comicname: " + str(comicname))
#print ("comicyear: " + str(comicyear))
#print ("comicissues: " + str(comicissues))
#print ("comicimage: " + str(comicimage))
#here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc).
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
if CV_EXcomicid is None: # pass #
@ -117,7 +123,7 @@ class WebInterface(object):
logger.info(u"I couldn't find an exact match for " + str(comicname) + " (" + str(comicyear) + ") - gathering data for Error-Checking screen (this could take a minute)..." )
i = 0
loopie, cnt = parseit.ComChk(comicname, comicyear, comicpublisher, comicissues, comicid)
print ("total count : " + str(cnt))
#print ("total count : " + str(cnt))
while (i < cnt):
try:
stoopie = loopie['comchkchoice'][i]
@ -133,7 +139,7 @@ class WebInterface(object):
'GCDID' : stoopie['GCDID']
})
i+=1
return serve_template(templatename="searchfix.html", title="Error Check", comicname=comicname, comicid=comicid, comicyear=comicyear, comicimage=comicimage, comicissues=comicissues,cresults=cresults)
return serve_template(templatename="searchfix.html", title="Error Check", comicname=comicname, comicid=comicid, comicyear=comicyear, comicimage=comicimage, comicissues=comicissues, cresults=cresults)
else:
nomatch = "false"
logger.info(u"Quick match success..continuing.")
@ -177,9 +183,17 @@ class WebInterface(object):
#99, (comicid), (gcdid), none
logger.info("saving new information into custom_exceptions.csv...")
except_info = "none #" + str(comicname) + "-(" + str(comicyear) + ")"
with open('custom_exceptions.csv', 'a') as f:
except_file = os.path.join(mylar.DATA_DIR,"custom_exceptions.csv")
if not os.path.exists(except_file):
try:
csvfile = open(str(except_file), 'rb')
csvfile.close()
except (OSError,IOError):
logger.error("Could not locate " + str(except_file) + " file. Make sure it's in datadir: " + mylar.DATA_DIR + " with proper permissions.")
return
with open(str(except_file), 'a') as f:
f.write('%s,%s,%s,%s\n' % ("99", str(comicid), str(gcdid), str(except_info)) )
logger.info("re-loading csv file so it's all nice and current.")
mylar.csv_load()
@ -404,7 +418,7 @@ class WebInterface(object):
def upcoming(self):
myDB = db.DBConnection()
#upcoming = myDB.select("SELECT * from issues WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
upcoming = myDB.select("SELECT * from upcoming WHERE IssueDate > date('now') order by IssueDate DESC")
upcoming = myDB.select("SELECT * from upcoming WHERE IssueDate > date('now') AND IssueID is NULL order by IssueDate DESC")
issues = myDB.select("SELECT * from issues WHERE Status='Wanted'")
#let's move any items from the upcoming table into the wanted table if the date has already passed.
#gather the list...
@ -541,6 +555,7 @@ class WebInterface(object):
"logverbose" : helpers.checked(mylar.LOGVERBOSE),
"download_scan_interval" : mylar.DOWNLOAD_SCAN_INTERVAL,
"nzb_search_interval" : mylar.SEARCH_INTERVAL,
"nzb_startup_search" : helpers.checked(mylar.NZB_STARTUP_SEARCH),
"libraryscan_interval" : mylar.LIBRARYSCAN_INTERVAL,
"sab_host" : mylar.SAB_HOST,
"sab_user" : mylar.SAB_USERNAME,
@ -565,9 +580,9 @@ class WebInterface(object):
"destination_dir" : mylar.DESTINATION_DIR,
"replace_spaces" : helpers.checked(mylar.REPLACE_SPACES),
"replace_char" : mylar.REPLACE_CHAR,
"use_minsize" : mylar.USE_MINSIZE,
"use_minsize" : helpers.checked(mylar.USE_MINSIZE),
"minsize" : mylar.MINSIZE,
"use_maxsize" : mylar.USE_MAXSIZE,
"use_maxsize" : helpers.checked(mylar.USE_MAXSIZE),
"maxsize" : mylar.MAXSIZE,
"interface_list" : interface_list,
"autowant_all" : helpers.checked(mylar.AUTOWANT_ALL),
@ -583,6 +598,8 @@ class WebInterface(object):
"file_format" : mylar.FILE_FORMAT,
"zero_level" : helpers.checked(mylar.ZERO_LEVEL),
"zero_level_n" : mylar.ZERO_LEVEL_N,
"add_to_csv" : helpers.checked(mylar.ADD_TO_CSV),
"lowercase_filenames" : helpers.checked(mylar.LOWERCASE_FILENAMES),
"enable_extra_scripts" : helpers.checked(mylar.ENABLE_EXTRA_SCRIPTS),
"extra_scripts" : mylar.EXTRA_SCRIPTS,
"branch" : version.MYLAR_VERSION,
@ -603,23 +620,19 @@ class WebInterface(object):
def error_change(self, comicid, errorgcd):
if errorgcd[:5].isdigit():
print ("GCD-ID detected : + str(errorgcd)[:5]")
print ("GCD-ID detected : " + str(errorgcd)[:5])
print ("I'm assuming you know what you're doing - going to force-match.")
self.from_Exceptions(comicid=comicid,gcdid=errorgcd)
else:
print ("Assuming rewording of Comic - adjusting to : " + str(errorgcd))
self.addComic(errorgcd)
Err_Info = mylar.cv.getComic(comicid,'comic')
self.addComic(comicid=comicid,comicname=str(errorgcd), comicyear=Err_Info['ComicYear'], comicissues=Err_Info['ComicIssues'], comicpublisher=Err_Info['ComicPublisher'])
error_change.exposed = True
def comic_config(self, com_location, alt_search, fuzzy_year, ComicID):
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None):
myDB = db.DBConnection()
print ("fuzzy:" + fuzzy_year)
if fuzzy_year == '0': fuzzy_string = "None"
elif fuzzy_year == '1': fuzzy_string = "Remove Year"
elif fuzzy_year == '2': fuzzy_string = "Fuzzy Year"
#--- this is for multipe search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
# ffs_alt = []
@ -648,14 +661,22 @@ class WebInterface(object):
asearch = str(alt_search)
controlValueDict = {'ComicID': ComicID}
newValues = {"ComicLocation": com_location,
"AlternateSearch": str(asearch),
"UseFuzzy": fuzzy_year }
newValues = {"ComicLocation": com_location }
#"QUALalt_vers": qual_altvers,
#"QUALScanner": qual_scanner,
#"QUALtype": qual_type,
#"QUALquality": qual_quality
#}
if asearch is not None:
if asearch == '':
newValues['AlternateSearch'] = "None"
else:
newValues['AlternateSearch'] = str(asearch)
if fuzzy_year is None:
newValues['UseFuzzy'] = "0"
else:
newValues['UseFuzzy'] = str(fuzzy_year)
#force the check/creation of directory com_location here
if os.path.isdir(str(com_location)):
@ -672,11 +693,11 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
comic_config.exposed = True
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, logverbose=0, download_scan_interval=None, nzb_search_interval=None, libraryscan_interval=None,
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, logverbose=0, download_scan_interval=None, nzb_search_interval=None, nzb_startup_search=0, libraryscan_interval=None,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, log_dir=None, blackhole=0, blackhole_dir=None,
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
preferred_quality=0, move_files=0, rename_files=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None,
destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs):
mylar.HTTP_HOST = http_host
mylar.HTTP_PORT = http_port
@ -686,6 +707,7 @@ class WebInterface(object):
mylar.LOGVERBOSE = logverbose
mylar.DOWNLOAD_SCAN_INTERVAL = download_scan_interval
mylar.SEARCH_INTERVAL = nzb_search_interval
mylar.NZB_STARTUP_SEARCH = nzb_startup_search
mylar.LIBRARYSCAN_INTERVAL = libraryscan_interval
mylar.SAB_HOST = sab_host
mylar.SAB_USERNAME = sab_username
@ -718,6 +740,8 @@ class WebInterface(object):
mylar.REPLACE_CHAR = replace_char
mylar.ZERO_LEVEL = zero_level
mylar.ZERO_LEVEL_N = zero_level_n
mylar.ADD_TO_CSV = add_to_csv
mylar.LOWERCASE_FILENAMES = lowercase_filenames
mylar.USE_MINSIZE = use_minsize
mylar.MINSIZE = minsize
mylar.USE_MAXSIZE = use_maxsize

View File

@ -34,7 +34,9 @@ def pullit():
popit = myDB.select("SELECT count(*) FROM sqlite_master WHERE name='weekly' and type='table'")
if popit:
try:
pulldate = myDB.action("SELECT SHIPDATE from weekly").fetchone()
pull_date = myDB.action("SELECT SHIPDATE from weekly").fetchone()
logger.info(u"Weekly pull list present - checking if it's up-to-date..")
pulldate = pull_date['SHIPDATE']
except sqlite3.OperationalError, msg:
conn=sqlite3.connect(mylar.DB_FILE)
c=conn.cursor()
@ -130,6 +132,8 @@ def pullit():
logger.info(u"No new pull-list available - will re-check again in 24 hours.")
pullitcheck()
return
else:
logger.info(u"Preparing to update to the new listing.")
break
else:
for yesyes in checkit:
@ -273,6 +277,7 @@ def pullit():
newtxtfile.write(str(shipdate) + '\t' + str(pub) + '\t' + str(issue) + '\t' + str(comicnm) + '\t' + str(comicrm) + '\tSkipped' + '\n')
prevcomic = str(comicnm)
previssue = str(issue)
logger.info(u"Populating the NEW Weekly Pull list into Mylar.")
newtxtfile.close()
mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")
@ -304,6 +309,7 @@ def pullit():
csvfile.close()
connection.commit()
connection.close()
logger.info(u"Weekly Pull List successfully loaded.")
#let's delete the files
pullpath = str(mylar.CACHE_DIR) + "/"
os.remove( str(pullpath) + "Clean-newreleases.txt" )
@ -311,6 +317,7 @@ def pullit():
pullitcheck()
def pullitcheck(comic1off_name=None,comic1off_id=None):
logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
myDB = db.DBConnection()
not_t = ['TP',
@ -326,6 +333,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
unlines = []
llen = []
ccname = []
pubdate = []
w = 0
tot = 0
chkout = []
@ -353,25 +361,28 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
w = 1
else:
#let's read in the comic.watchlist from the db here
cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher from comics")
cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished from comics")
while True:
watchd = cur.fetchone()
#print ("watchd: " + str(watchd))
if watchd is None:
break
a_list.append(watchd[1])
b_list.append(watchd[2])
comicid.append(watchd[0])
#print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
#if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
lines.append(a_list[w].strip())
unlines.append(a_list[w].strip())
llen.append(a_list[w].splitlines())
ccname.append(a_list[w].strip())
tmpwords = a_list[w].split(None)
ltmpwords = len(tmpwords)
ltmp = 1
w+=1
if 'Present' in watchd[4]:
# let's not even bother with comics that are in the Present.
a_list.append(watchd[1])
b_list.append(watchd[2])
comicid.append(watchd[0])
pubdate.append(watchd[4])
#print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
#if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
lines.append(a_list[w].strip())
unlines.append(a_list[w].strip())
llen.append(a_list[w].splitlines())
ccname.append(a_list[w].strip())
tmpwords = a_list[w].split(None)
ltmpwords = len(tmpwords)
ltmp = 1
w+=1
cnt = int(w-1)
cntback = int(w-1)
kp = []
@ -473,6 +484,6 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
logger.fdebug("There are " + str(otot) + " comics this week to get!")
#print ("However I've already grabbed " + str(btotal) )
#print ("I need to get " + str(tot) + " comic(s)!" )
logger.info(u"Finished checking for comics on my watchlist.")
#con.close()
return