FIX: on/off option for searching for nzbs on startup, FIX: Updating the weekly pull list would always happen (now only on new lists), IMP: Abit more info on startup, FIX: No trailing slash on newznab provider would always return 0 results, FIX: directly adding GCD ID would result in fail, IMP: removed some unecessary code

This commit is contained in:
evilhero 2013-01-15 12:32:08 -05:00
parent 7403e7bcf2
commit 45e42bea2c
8 changed files with 89 additions and 81 deletions

View File

@ -95,11 +95,17 @@
<td>
<fieldset>
<legend>Interval</legend>
<div class="row">
<div class="row">
<label>NZB Search Interval</label>
<input type="text" name="nzb_search_interval" value="${config['nzb_search_interval']}" size="4">mins
</div>
<div class="row checkbox">
<input type="checkbox" name="nzb_startup_search" value="1" ${config['nzb_startup_search']} /><label>NZB Search on startup</label>
</div>
</div>
</div>
<div class="row">
<label>Download Scan Interval</label>
<input type="text" name="download_scan_interval" value="${config['download_scan_interval']}" size="4">mins

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

View File

@ -1,5 +1,4 @@
<%inherit file="base.html" />
<%!
<%inherit file="base.html" /> <%!
import mylar
from mylar.helpers import checked
%>
@ -13,29 +12,31 @@
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix">Search Question</h1>
<h1 class="clearfix">Ultron Error-Checker</h1>
</div>
<div id="tabs">
<ul>
<li><a href="#tabs-1">More Information</a></li>
<li><a href="#tabs-1">Analysis Required</a></li>
</ul>
<div id="tabs-1" class="configtable">
<table>
<tr>
<td width="200">
<td width="250">
<fieldset>
<div>
<img src="${comicimage}" alt="" height="350" width="230" />
</div>
<div class="row checkbox">
<input type="checkbox" name="add_to_csv" value="1"/><label><small>Add the selected entry to the custom_exceptions.csv (default)</small></label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="addtocsv" value="1" ${mylar.ADD_TO_CSV} /><label><small>Add the selected entry to the custom_exceptions.csv (default)</small></label>
</div>
</fieldset>
</td>
<td>
<fieldset>
<legend>Error-Checking...</legend>
<p><strong>I can't add the requsted comic.</strong></p>
<td width="100%">
<span style="position:absolute">
<img src="interfaces/default/images/ultron.png" style="float:right" height="125" width="125">
<fieldset>
<center><legend>Error-Check.</legend></center>
<strong>I cannot add the requsted comic.</strong>
<p>I've figured out that the Comic that you've selected to watch isn't listed
correctly on the other databases I need to query. This is most likely due to
an incorrect spelling, but sometimes it could because the year is wrong, or even
@ -46,8 +47,8 @@
<legend>${comicname} (${comicyear})</br>
${comicissues} Issues</legend></center>
<br/>
</fieldset>
</td>
</fieldset>
</span></td>
</tr>
</table>
<table class="display" id="searchmanage_table">
@ -83,6 +84,7 @@
</table>
<form action="error_change" method="GET">
<input type="hidden" name="comicid" value=${comicid}>
<input type="hidden" name="add_to_csv" value=addtocsv>
<div><br/>
<label><center><strong>Didn't get it right? Enter what it should be (or the GCD-ID) here:</strong></label></center>
<center><input type="text" name="errorgcd" size="30"><input type="submit" value="Update"/></center>
@ -97,7 +99,7 @@
function initThisPage() {
jQuery( "#tabs" ).tabs();
initActions();
initConfigCheckbox("#add_to_csv");
initConfigCheckbox("#addtocsv");
};
$(document).ready(function() {
initThisPage();

View File

@ -85,6 +85,7 @@ USENET_RETENTION = None
ADD_COMICS = False
SEARCH_INTERVAL = 360
NZB_STARTUP_SEARCH = False
LIBRARYSCAN_INTERVAL = 300
DOWNLOAD_SCAN_INTERVAL = 5
INTERFACE = None
@ -112,6 +113,7 @@ MAXSIZE = None
AUTOWANT_UPCOMING = True
AUTOWANT_ALL = False
COMIC_COVER_LOCAL = False
ADD_TO_CSV = True
SAB_HOST = None
SAB_USERNAME = None
@ -207,12 +209,12 @@ def initialize():
global __INITIALIZED__, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, LOGVERBOSE, \
HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, LAUNCH_BROWSER, GIT_PATH, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, MUSIC_DIR, DESTINATION_DIR, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, \
LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, BLACKHOLE, BLACKHOLE_DIR, \
NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS,\
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS
if __INITIALIZED__:
@ -253,6 +255,7 @@ def initialize():
USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', '1500')
SEARCH_INTERVAL = check_setting_int(CFG, 'General', 'search_interval', 360)
NZB_STARTUP_SEARCH = bool(check_setting_int(CFG, 'General', 'nzb_startup_search', 0))
LIBRARYSCAN_INTERVAL = check_setting_int(CFG, 'General', 'libraryscan_interval', 300)
DOWNLOAD_SCAN_INTERVAL = check_setting_int(CFG, 'General', 'download_scan_interval', 5)
INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default')
@ -276,6 +279,7 @@ def initialize():
MINSIZE = check_setting_str(CFG, 'General', 'minsize', '')
USE_MAXSIZE = bool(check_setting_int(CFG, 'General', 'use_maxsize', 0))
MAXSIZE = check_setting_str(CFG, 'General', 'maxsize', '')
ADD_TO_CSV = bool(check_setting_int(CFG, 'General', 'add_to_csv', 1))
ENABLE_EXTRA_SCRIPTS = bool(check_setting_int(CFG, 'General', 'enable_extra_scripts', 0))
EXTRA_SCRIPTS = check_setting_str(CFG, 'General', 'extra_scripts', '')
@ -475,8 +479,6 @@ def config_write():
new_config = ConfigObj()
new_config.filename = CONFIG_FILE
print ("falalal")
new_config['General'] = {}
new_config['General']['config_version'] = CONFIG_VERSION
new_config['General']['http_port'] = HTTP_PORT
@ -497,6 +499,7 @@ def config_write():
new_config['General']['usenet_retention'] = USENET_RETENTION
new_config['General']['search_interval'] = SEARCH_INTERVAL
new_config['General']['nzb_startup_search'] = int(NZB_STARTUP_SEARCH)
new_config['General']['libraryscan_interval'] = LIBRARYSCAN_INTERVAL
new_config['General']['download_scan_interval'] = DOWNLOAD_SCAN_INTERVAL
new_config['General']['interface'] = INTERFACE
@ -520,6 +523,7 @@ def config_write():
new_config['General']['minsize'] = MINSIZE
new_config['General']['use_maxsize'] = int(USE_MAXSIZE)
new_config['General']['maxsize'] = MAXSIZE
new_config['General']['add_to_csv'] = int(ADD_TO_CSV)
new_config['General']['enable_extra_scripts'] = int(ENABLE_EXTRA_SCRIPTS)
new_config['General']['extra_scripts'] = EXTRA_SCRIPTS
@ -591,8 +595,9 @@ def start():
#now the scheduler (check every 24 hours)
SCHED.add_interval_job(weeklypull.pullit, hours=24)
#let's do a run at the Wanted issues here (on startup).
threading.Thread(target=search.searchforissue).start()
#let's do a run at the Wanted issues here (on startup) if enabled.
if NZB_STARTUP_SEARCH:
threading.Thread(target=search.searchforissue).start()
if CHECK_GITHUB:
SCHED.add_interval_job(versioncheck.checkGithub, minutes=CHECK_GITHUB_INTERVAL)

View File

@ -441,11 +441,11 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
comicis = Total
comicid = ComicID
comicpub = ComicPublisher
print ( "comicname: " + str(comicnm) )
print ( "comicyear: " + str(comicyr) )
print ( "comichave: " + str(comicis) )
print ( "comicpub: " + str(comicpub) )
print ( "comicid: " + str(comicid) )
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicpub: " + str(comicpub) )
#print ( "comicid: " + str(comicid) )
# do 3 runs at the comics.org search to get the best results
comicrun = []
# &pub_name=DC
@ -462,9 +462,9 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
if pb in comicpub:
#keep publisher in url if a biggie.
uhuh = "yes"
print (" publisher match : " + str(comicpub))
#print (" publisher match : " + str(comicpub))
conv_pub = comicpub.split()[0]
print (" converted publisher to : " + str(conv_pub))
#print (" converted publisher to : " + str(conv_pub))
#1st run setup - leave it all as it is.
comicrun.append(comicnm)
cruncnt = 0
@ -479,40 +479,27 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
cruncnt+=1
totalcount = 0
cr = 0
print ("cruncnt is " + str(cruncnt))
#print ("cruncnt is " + str(cruncnt))
while (cr <= cruncnt):
print ("cr is " + str(cr))
#print ("cr is " + str(cr))
comicnm = comicrun[cr]
#leaving spaces in will screw up the search...let's take care of it
comicnm = re.sub(' ', '+', comicnm)
print ("comicnm: " + str(comicnm))
#input = 'http://www.comics.org/series/name/' + str(comicnm) + '/sort/alpha'
#print ("comicnm: " + str(comicnm))
if uhuh == "yes":
publink = "&pub_name=" + str(conv_pub)
if uhuh == "no":
publink = "&pub_name="
# input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + str(publink) + '&is_indexed=None'
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&keywords=&order1=series&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31' + '&title=&feature=&job_number=&pages=&script=&pencils=&inks=&colors=&letters=&story_editing=&genre=&characters=&synopsis=&reprint_notes=&story_reprinted=None&notes=' + str(publink) + '&pub_notes=&brand=&brand_notes=&indicia_publisher=&is_surrogate=None&ind_pub_notes=&series=' + str(comicnm) + '&series_year_began=&series_notes=&tracking_notes=&issue_count=&is_comics=None&format=&color=&dimensions=&paper_stock=&binding=&publishing_format=&issues=&volume=&issue_title=&variant_name=&issue_date=&indicia_frequency=&price=&issue_pages=&issue_editing=&isbn=&barcode=&issue_notes=&issue_reprinted=None&is_indexed=None'
print ("input: " + str(input))
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
cnt1 = len(soup.findAll("tr", {"class" : "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class" : "listing_odd"}))
# try:
# cntit = soup.find("div", {"class" : "item_data"})
# catchit = pubst('a')[0]
# except (IndexError, TypeError):
# cntit = soup.findAll("div", {"class" : "left"})[1]
# catchit = pubst.find("a")
# truecnt = cntit.findNext(text=True)
cnt = int(cnt1 + cnt2)
# print ("truecnt: " + str(truecnt))
print ("cnt1: " + str(cnt1))
print ("cnt2: " + str(cnt2))
print (str(cnt) + " results")
# print ("cnt1: " + str(cnt1))
# print ("cnt2: " + str(cnt2))
# print (str(cnt) + " results")
resultName = []
resultID = []
@ -532,11 +519,11 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
resultp = soup.findAll("tr", {"class" : "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
print ( "Comic Name: " + str(resultName[n]) )
# print ( "Comic Name: " + str(resultName[n]) )
pub = resultp('a')[0]
resultPublisher.append(pub.findNext(text=True))
print ( "Publisher: " + str(resultPublisher[n]) )
# print ( "Publisher: " + str(resultPublisher[n]) )
fip = resultp('a',href=True)[1]
resultID.append(fip['href'])
@ -553,19 +540,9 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
resultIssues[n] = resultIssues[n].replace(' ','')
# print ( "Year: " + str(resultYear[n]) )
# print ( "Issues: " + str(resultIssues[n]) )
print ("comchkchoice: " + str(comchkchoice))
# if (cr == 0 and n == 0) or (comchkchoice is None):
# print ("initial add.")
# comchkchoice.append({
# "ComicID": str(comicid),
# "ComicName": str(resultName[n]),
# "GCDID": str(resultID[n]),
# "ComicYear" : str(resultYear[n]),
# "ComicPublisher" : str(resultPublisher[n]),
# "ComicIssues" : str(resultIssues[n])
# })
# print ("comchkchoice: " + str(comchkchoice))
if not any(d.get('GCDID', None) == str(resultID[n]) for d in comchkchoice):
print ( str(resultID[n]) + " not in DB...adding.")
#print ( str(resultID[n]) + " not in DB...adding.")
comchkchoice.append({
"ComicID": str(comicid),
"ComicName": str(resultName[n]),
@ -575,8 +552,8 @@ def ComChk(ComicName, ComicYear, ComicPublisher, Total, ComicID):
"ComicURL" : "http://www.comics.org" + str(resultID[n]),
"ComicIssues" : str(resultIssues[n])
})
else:
print ( str(resultID[n]) + " already in DB...skipping" )
#else:
#print ( str(resultID[n]) + " already in DB...skipping" )
n+=1
cr+=1
totalcount= totalcount + cnt

View File

@ -334,6 +334,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
elif nzbprov == 'nzb.su':
findurl = "http://www.nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it.
if host_newznab[-1] != "/": host_newznab = str(host_newznab) + "/"
findurl = str(host_newznab) + "api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
logger.fdebug("search-url: " + str(findurl))
elif nzbprov == 'nzbx':
@ -390,9 +392,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
if cnt == 0:
comic_andiss = m[cnt]
logger.fdebug("Comic: " + str(comic_andiss))
logger.fdebug("UseFuzzy is : " + str(UseFuzzy))
if UseFuzzy == "0" or UseFuzzy == "2" or IssDateFix == "yes":
#logger.fdebug("UseFuzzy is : " + str(UseFuzzy))
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
logger.fdebug("year detected: " + str(m[cnt]))
result_comyear = m[cnt]

View File

@ -541,6 +541,7 @@ class WebInterface(object):
"logverbose" : helpers.checked(mylar.LOGVERBOSE),
"download_scan_interval" : mylar.DOWNLOAD_SCAN_INTERVAL,
"nzb_search_interval" : mylar.SEARCH_INTERVAL,
"nzb_startup_search" : helpers.checked(mylar.NZB_STARTUP_SEARCH),
"libraryscan_interval" : mylar.LIBRARYSCAN_INTERVAL,
"sab_host" : mylar.SAB_HOST,
"sab_user" : mylar.SAB_USERNAME,
@ -565,9 +566,9 @@ class WebInterface(object):
"destination_dir" : mylar.DESTINATION_DIR,
"replace_spaces" : helpers.checked(mylar.REPLACE_SPACES),
"replace_char" : mylar.REPLACE_CHAR,
"use_minsize" : mylar.USE_MINSIZE,
"use_minsize" : helpers.checked(mylar.USE_MINSIZE),
"minsize" : mylar.MINSIZE,
"use_maxsize" : mylar.USE_MAXSIZE,
"use_maxsize" : helpers.checked(mylar.USE_MAXSIZE),
"maxsize" : mylar.MAXSIZE,
"interface_list" : interface_list,
"autowant_all" : helpers.checked(mylar.AUTOWANT_ALL),
@ -583,7 +584,8 @@ class WebInterface(object):
"file_format" : mylar.FILE_FORMAT,
"zero_level" : helpers.checked(mylar.ZERO_LEVEL),
"zero_level_n" : mylar.ZERO_LEVEL_N,
"lowercase_filenames" : mylar.LOWERCASE_FILENAMES,
"add_to_csv" : helpers.checked(mylar.ADD_TO_CSV),
"lowercase_filenames" : helpers.checked(mylar.LOWERCASE_FILENAMES),
"enable_extra_scripts" : helpers.checked(mylar.ENABLE_EXTRA_SCRIPTS),
"extra_scripts" : mylar.EXTRA_SCRIPTS,
"branch" : version.MYLAR_VERSION,
@ -602,9 +604,10 @@ class WebInterface(object):
return serve_template(templatename="config.html", title="Settings", config=config)
config.exposed = True
def error_change(self, comicid, errorgcd):
def error_change(self, comicid, errorgcd, add_to_csv):
print ("addtocsv is : " + str(add_to_csv))
if errorgcd[:5].isdigit():
print ("GCD-ID detected : + str(errorgcd)[:5]")
print ("GCD-ID detected : " + str(errorgcd)[:5])
print ("I'm assuming you know what you're doing - going to force-match.")
self.from_Exceptions(comicid=comicid,gcdid=errorgcd)
else:
@ -614,12 +617,17 @@ class WebInterface(object):
error_change.exposed = True
def comic_config(self, com_location, alt_search, fuzzy_year, ComicID):
def comic_config(self, com_location, alt_search, ComicID, fuzzy_year=None):
myDB = db.DBConnection()
print ("fuzzy:" + fuzzy_year)
if fuzzy_year == '0': fuzzy_string = "None"
elif fuzzy_year == '1': fuzzy_string = "Remove Year"
elif fuzzy_year == '2': fuzzy_string = "Fuzzy Year"
if fuzzy_year is not None:
newValues['UseFuzzy'] = fuzzy_year
if fuzzy_year == '0':
fuzzy_string = "None"
elif fuzzy_year == '1':
fuzzy_string = "Remove Year"
elif fuzzy_year == '2':
fuzzy_string = "Fuzzy Year"
#--- this is for multipe search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
@ -650,8 +658,8 @@ class WebInterface(object):
controlValueDict = {'ComicID': ComicID}
newValues = {"ComicLocation": com_location,
"AlternateSearch": str(asearch),
"UseFuzzy": fuzzy_year }
"AlternateSearch": str(asearch) }
#"QUALalt_vers": qual_altvers,
#"QUALScanner": qual_scanner,
#"QUALtype": qual_type,
@ -673,11 +681,11 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
comic_config.exposed = True
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, logverbose=0, download_scan_interval=None, nzb_search_interval=None, libraryscan_interval=None,
def configUpdate(self, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, launch_browser=0, logverbose=0, download_scan_interval=None, nzb_search_interval=None, nzb_startup_search=0, libraryscan_interval=None,
sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, log_dir=None, blackhole=0, blackhole_dir=None,
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
preferred_quality=0, move_files=0, rename_files=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None,
destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs):
mylar.HTTP_HOST = http_host
mylar.HTTP_PORT = http_port
@ -687,6 +695,7 @@ class WebInterface(object):
mylar.LOGVERBOSE = logverbose
mylar.DOWNLOAD_SCAN_INTERVAL = download_scan_interval
mylar.SEARCH_INTERVAL = nzb_search_interval
mylar.NZB_STARTUP_SEARCH = nzb_startup_search
mylar.LIBRARYSCAN_INTERVAL = libraryscan_interval
mylar.SAB_HOST = sab_host
mylar.SAB_USERNAME = sab_username
@ -719,6 +728,7 @@ class WebInterface(object):
mylar.REPLACE_CHAR = replace_char
mylar.ZERO_LEVEL = zero_level
mylar.ZERO_LEVEL_N = zero_level_n
mylar.ADD_TO_CSV = add_to_csv
mylar.LOWERCASE_FILENAMES = lowercase_filenames
mylar.USE_MINSIZE = use_minsize
mylar.MINSIZE = minsize

View File

@ -34,7 +34,9 @@ def pullit():
popit = myDB.select("SELECT count(*) FROM sqlite_master WHERE name='weekly' and type='table'")
if popit:
try:
pulldate = myDB.action("SELECT SHIPDATE from weekly").fetchone()
pull_date = myDB.action("SELECT SHIPDATE from weekly").fetchone()
logger.info(u"Weekly pull list present - checking if it's up-to-date..")
pulldate = pull_date['SHIPDATE']
except sqlite3.OperationalError, msg:
conn=sqlite3.connect(mylar.DB_FILE)
c=conn.cursor()
@ -130,6 +132,8 @@ def pullit():
logger.info(u"No new pull-list available - will re-check again in 24 hours.")
pullitcheck()
return
else:
logger.info(u"Preparing to update to the new listing.")
break
else:
for yesyes in checkit:
@ -273,6 +277,7 @@ def pullit():
newtxtfile.write(str(shipdate) + '\t' + str(pub) + '\t' + str(issue) + '\t' + str(comicnm) + '\t' + str(comicrm) + '\tSkipped' + '\n')
prevcomic = str(comicnm)
previssue = str(issue)
logger.info(u"Populating the NEW Weekly Pull list into Mylar.")
newtxtfile.close()
mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")
@ -304,6 +309,7 @@ def pullit():
csvfile.close()
connection.commit()
connection.close()
logger.info(u"Weekly Pull List successfully loaded.")
#let's delete the files
pullpath = str(mylar.CACHE_DIR) + "/"
os.remove( str(pullpath) + "Clean-newreleases.txt" )
@ -311,6 +317,7 @@ def pullit():
pullitcheck()
def pullitcheck(comic1off_name=None,comic1off_id=None):
logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
myDB = db.DBConnection()
not_t = ['TP',
@ -473,6 +480,6 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
logger.fdebug("There are " + str(otot) + " comics this week to get!")
#print ("However I've already grabbed " + str(btotal) )
#print ("I need to get " + str(tot) + " comic(s)!" )
logger.info(u"Finished checking for comics on my watchlist.")
#con.close()
return