FIX:(#1336) Search would crash when attempting to search using a Torznab entry, FIX:(#1338) Added try/exception trap for mktime arguements that where trying to convert improperly formatted dates, FIX:(#1335) Manage Issue View shows annuals with incorrect Status in multiple status views, IMP: Alt_pull 2 method to check for new/updated pull-list every 4 hours instead of 24 (altpull 0/1), FIX: Fixed an error when parsing a filename that contained an numeric + alpha combination, FIX: On ComicDetails for a series would incorrectly indicate that every issue was a Digital Edition print, FIX: Watch action on weekly pull list fixed - will now either indicate as Add Series when the issue information is available on CV (altpull2), or will stay in a Watched status until the information is populated and then auto-add the given series, FIX: Removed Delete Arc dialog box, and redacted back to just 'Remove Arc' option, FIX:(#1332) Unable to Refresh comics via the Manage Comics tab, FIX:(#1135) IndexError when importing more than 100 comics (thnx razorsliph), FIX: When importing and comparing against years when no volume present, would use invalid years for comparisons resulting in manual intervention or wrong series, FIX: Manual Intervention linking during Import would return a blank page, FIX: Legacy mode for 32P would result in parse error, FIX: When Importing, and move files was not selected would pass invalid arguements and not update Import Results screen status, FIX: If copy mode enabled and Duplicate Dump Folder would attempt to reprocess/retag copied file repeatidly, FIX: Fixed some typos and removed some more unnecessary logging statements, IMP: Added option to export all issues in a Downloaded status (Manage / Advanced Options)

This commit is contained in:
evilhero 2016-08-09 21:21:08 -04:00
parent 95b9b10786
commit 06376bc81b
21 changed files with 184 additions and 112 deletions

View File

@ -135,7 +135,7 @@
<label><big>Status: </big><norm>${comic['Status']}</norm></label>
</div>
<%
if comic['Type'] == 'None' or comic['Type'] is None:
if comic['Type'] == 'None' or comic['Type'] is None or comic['Type'] == 'Print':
comictype = 'Print'
else:
comictype = 'Digital'

View File

@ -516,7 +516,7 @@
<input id="enable_rss" type="checkbox" onclick="initConfigCheckbox($(this));" name="enable_rss" value=1 ${config['enable_rss']} /><label>Enable RSS Feed Searches (nzbs & torrents)</label>
</div>
<div class="config">
<label>RSS Inteval Feed Check</label>
<label>RSS Interval Feed Check</label>
<input type="text" name="rss_checkinterval" value="${config['rss_checkinterval']}" size="6" /><small>(Mins)</small>
<a href="#" style="float:right" type="button" onclick="doAjaxCall('force_rss',$(this))" data-success="RSS Force now running" data-error="Error trying to retrieve RSS Feeds"><span class="ui-icon ui-icon-extlink"></span>Force RSS</a>
</br><small><% rss_last=mylar.RSS_LASTRUN %>last run: ${rss_last}</small>

View File

@ -36,8 +36,10 @@
grade = 'A'
else:
grade = 'Z'
if result['haveit'] != "No":
grade = 'H';
%>
<tr class="grade${grade}">
<td class="blank"></td>

View File

@ -138,7 +138,7 @@
</div>
<div id="tabs-3">
<table class="configtable" summary="Advanced Options">
<table summary="Advanced Options" class="configtable">
<tr>
<td>
@ -155,9 +155,12 @@
<fieldset>
<legend>Export</legend>
<div class="links">
<a href="#" onclick="doAjaxCall('wanted_Export',$(this))" data-sucess="Exported to Wanted list." data-error="Failed to export. Check logs"><span class="ui-icon ui-icon-refresh"></span>Export Wanted to CSV</a>
<a href="#" onclick="doAjaxCall('wanted_Export?mode=Wanted',$(this))" data-sucess="Exported to Wanted list." data-error="Failed to export. Check logs"><span class="ui-icon ui-icon-refresh"></span>Export Wanted to CSV</a>
<a href="#" onclick="doAjaxCall('wanted_Export?mode=Downloaded',$(this))" data-sucess="Exported to Downloaded list." data-error="Failed to export. Check logs"><span class="ui-icon ui-icon-refresh"></span>Export Downloaded to CSV</a>
</div>
<br/><br/>
</fieldset>
</br>
<fieldset>
<legend>Additional Options</legend>
<div classs="links">
<a href="readlist">Reading List Management</a><br/>

View File

@ -67,15 +67,16 @@
%for result in searchresults:
<%
if result['comicyear'] == '2016':
grade = 'A'
grade = 'A'
else:
grade = 'Z'
grade = 'Z'
if result['haveit'] != "No":
grade = 'H';
if result['type'] == 'Digital':
grade = 'H';
rtype = None
if type != 'story_arc':
if result['type'] == 'Digital':
rtype = '[Digital]'
else:
rtype = None
%>
<tr class="grade${grade}">
<td class="blank"></td>

View File

@ -96,20 +96,7 @@
<td id="years">${item['SpanYears']}</td>
<td id="have"><span title="${item['percent']}"></span>${css}<div style="width:${item['percent']}%"><span class="progressbar-front-text">${item['Have']}/${item['Total']}</span></div></td>
<td id="options">
<a href="#" id="remove_confirm" title="Remove Arc from Watchlist" onclick="openDelete(${item['StoryArc']| u},${item['StoryArcID']});"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
<div id="dialogit" title="Delete Story Arc Confirmation" style="display:none" class="configtable">
<form action="removefromreadlist" method="GET" style="vertical-align: middle; text-align: center">
<div class="row checkbox left clearfix">
</br>
<h1><center>${['storyarc']}</center></h1></br>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="delete_type" id="deleteCheck" value="1" ${checked(delete_type)} /><label>Remove Story Arc based on Arc Name</br> (default is ID)</label>
</div>
</br><input type="submit" value="Delete Story Arc">
<input type="hidden" name="ArcName" value=${['storyarc']}>
<input type="hidden" name="StoryArcID" value=${['storyarcid']}>
</form>
</div>
<a title="Remove Arc from Watchlist" onclick="doAjaxCall('removefromreadlist?ArcName=${item['StoryArc']| u}&StoryArcID=${item['StoryArcID']}',$(this),'table')" data-success='Successfully deleted ${item['StoryArc']}'><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
%if item['CV_ArcID']:
<a title="Refresh Series" onclick="doAjaxCall('addStoryArc_thread?arcid=${item['StoryArcID']}&cvarcid=${item['CV_ArcID']}&storyarcname=${item['StoryArc']}&arcrefresh=True',$(this),'table')" data-success="Now refreshing ${item['StoryArc']}."><img src="interfaces/default/images/refresh.png" height="25" width="25" /></a>
%endif

View File

@ -149,7 +149,7 @@
%for f_nodata in future_nodata_upcoming:
<tr class="gradeZ">
<td id="delcolumn">
<a href="upcoming#upcoming_nodata" title="Delete series from auto-Want list" onclick="doAjaxCall('removeautowant?comicname=${f_nodata['ComicName']}&release=${f_nodata['IssueDate']}',$(this),'table')" data-success="${f_nodata['ComicName']} has been removed from the auto-want list"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" class="highqual" /></a>
<a href="upcoming#upcoming_nodata" title="Delete series from auto-Want list" onclick="doAjaxCall('removeautowant?comicname=${f_nodata['ComicName'] |u}&release=${f_nodata['IssueDate']}',$(this),'table')" data-success="${f_nodata['ComicName']} has been removed from the auto-want list"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" class="highqual" /></a>
</td>
%if f_nodata['ComicID'] is not None:
<td id="comicname"><a href="comicDetails?ComicID=${f_nodata['ComicID']}">${f_nodata['ComicName']}</a></td>

View File

@ -100,7 +100,7 @@
<a href="#" title="auto-add by ID available for this series" onclick="doAjaxCall('addbyid?comicid=${weekly['COMICID']}&calledby=True',$(this),'table')" data-success="${weekly['COMIC']} is now being added to your wachlist."><span class="ui-icon ui-icon-plus"></span>add series</a>
%else:
%if weekly['ISSUE'] == '1' or weekly['ISSUE'] == '0':
<a href="#" title="Watch for this series" onclick="doAjaxCall('add2futurewatchlist?ComicName=${weekly['COMIC'] |u}&Issue=${weekly['ISSUE']}&Publisher=${weekly['PUBLISHER']}&ShipDate=${weekinfo}', $(this),'table')" data-success="${weekly['COMIC']} is now on auto-watch/add."><span class="ui-icon ui-icon-plus"></span>Watch</a>
<a href="#" title="Watch for this series" onclick="doAjaxCall('add2futurewatchlist?ComicName=${weekly['COMIC'] |u}&Issue=${weekly['ISSUE']}&Publisher=${weekly['PUBLISHER']}&ShipDate=${weekinfo['midweek']}&weeknumber=${weekinfo['weeknumber']}&year=${weekinfo['year']}',$(this),'table')" data-success="${weekly['COMIC']} is now on auto-watch/add."><span class="ui-icon ui-icon-plus"></span>Watch</a>
%else:
<a href="searchit?name=${weekly['COMIC'] | u}&issue=${weekly['ISSUE']}&mode=pullseries" title="Add this series to your watchlist"><span class="ui-icon ui-icon-plus"></span>add series</a>
%endif

View File

@ -168,23 +168,27 @@ class PostProcessor(object):
path_to_move = dupeinfo[0]['to_dupe']
file_to_move = os.path.split(path_to_move)[1]
if dupeinfo[0]['action'] == 'dupe_src':
if dupeinfo[0]['action'] == 'dupe_src' and mylar.FILE_OPTS == 'move':
logger.info('[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.')
else:
logger.info('[DUPLICATE-CLEANUP] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.')
#check to make sure duplicate_dump directory exists:
checkdirectory = filechecker.validateAndCreateDirectory(mylar.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]')
if mylar.FILE_OPTS == 'move':
logger.info('[DUPLICATE-CLEANUP][MOVE-MODE] New File will not be post-processed. Moving duplicate [' + path_to_move + '] to Duplicate Dump Folder for manual intervention.')
else:
logger.info('[DUPLICATE-CLEANUP][COPY-MODE] NEW File will not be post-processed. Retaining file in original location [' + path_to_move + ']')
return True
#this gets tricky depending on if it's the new filename or the existing filename, and whether or not 'copy' or 'move' has been selected.
try:
shutil.move(path_to_move, os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
except (OSError, IOError):
logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
return False
if mylar.FILE_OPTS == 'move':
#check to make sure duplicate_dump directory exists:
checkdirectory = filechecker.validateAndCreateDirectory(mylar.DUPLICATE_DUMP, True, module='[DUPLICATE-CLEANUP]')
try:
shutil.move(path_to_move, os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
except (OSError, IOError):
logger.warn('[DUPLICATE-CLEANUP] Failed to move ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
return False
logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
return True
logger.warn('[DUPLICATE-CLEANUP] Successfully moved ' + path_to_move + ' ... to ... ' + os.path.join(mylar.DUPLICATE_DUMP, file_to_move))
return True
def Process(self):
module = self.module
@ -279,7 +283,7 @@ class PostProcessor(object):
if not any(re.sub('[\|\s]', '', cname.lower()) == x for x in loopchk):
loopchk.append(re.sub('[\|\s]', '', cname.lower()))
if 'annual' in mod_seriesname.lower():
if all([mylar.ANNUALS_ON, 'annual' in mod_seriesname.lower()]):
mod_seriesname = re.sub('annual', '', mod_seriesname, flags=re.I).strip()
#make sure we add back in the original parsed filename here.

View File

@ -1190,8 +1190,13 @@ def initialize():
runImmediately=True,
delay=30)
if ALT_PULL == 2:
weektimer = 4
else:
weektimer = 24
WeeklyScheduler = scheduler.Scheduler(weeklypullit.Weekly(),
cycleTime=datetime.timedelta(hours=24),
cycleTime=datetime.timedelta(hours=weektimer),
threadName="WEEKLYCHECK",
runImmediately=True,
delay=10)
@ -1654,7 +1659,7 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS readinglist(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, StoreDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS searchresults (SRID TEXT, results Numeric, Series TEXT, publisher TEXT, haveit TEXT, name TEXT, deck TEXT, url TEXT, description TEXT, comicid TEXT, comicimage TEXT, issues TEXT, comicyear TEXT, ogcname TEXT)')
conn.commit
@ -2086,6 +2091,17 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE searchresults ADD COLUMN ogcname TEXT')
## -- futureupcoming Table --
try:
c.execute('SELECT weeknumber from futureupcoming')
except sqlite3.OperationalError:
c.execute('ALTER TABLE futureupcoming ADD COLUMN weeknumber TEXT')
try:
c.execute('SELECT year from futureupcoming')
except sqlite3.OperationalError:
c.execute('ALTER TABLE futureupcoming ADD COLUMN year TEXT')
## -- Failed Table --
try:
c.execute('SELECT DateFailed from Failed')

View File

@ -169,11 +169,6 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
#within the tagging (with CT). This compiles all of the IssueID's during a scan (in 100's), and returns the corresponding CV data
#related to the given IssueID's - namely ComicID, Name, Volume (more at some point, but those are the important ones).
offset = 1
if len(comicidlist) <= 100:
endcnt = len(comicidlist)
else:
endcnt = 100
id_count = 0
import_list = []
logger.fdebug('comicidlist:' + str(comicidlist))
@ -182,6 +177,11 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
#break it up by 100 per api hit
#do the first 100 regardless
in_cnt = 0
if id_count + 100 <= len(comicidlist):
endcnt = id_count + 100
else:
endcnt = len(comicidlist)
for i in range(id_count, endcnt):
if in_cnt == 0:
tmpidlist = str(comicidlist[i])
@ -198,7 +198,6 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
tGIL = GetImportList(searched)
import_list += tGIL
endcnt +=100
id_count +=100
return import_list

View File

@ -426,14 +426,10 @@ class FileChecker(object):
if lastissue_position == (split_file.index(sf) -1) and lastissue_label is not None and '#' not in sf:
#find it in the original file to see if there's a decimal between.
#logger.fdebug('lastissue_label: ' + str(lastissue_label))
#logger.fdebug('current sf: ' + str(sf))
#logger.fdebug('file_length: ' + str(file_length))
#logger.fdebug('search_file_length: ' + str(lastissue_mod_position))
#logger.fdebug('trunced_search_length: ' + modfilename[lastissue_mod_position+1:]
findst = lastissue_mod_position+1
#findst = modfilename.find(lastissue_label, lastissue_mod_position+1) #lastissue_mod_position) #file_length - len(lastissue_label))
#logger.fdebug('findst: ' + str(findst))
if findst > len(modfilename):
findst = len(modfilename) -1
if modfilename[findst] != '.' or modfilename[findst] != '#': #findst != '.' and findst != '#':
if sf.isdigit():
logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue')

View File

@ -2031,6 +2031,19 @@ def issue_status(IssueID):
else:
return False
def crc(filename):
import hashlib
#memory in lieu of speed (line by line)
#prev = 0
#for eachLine in open(filename,"rb"):
# prev = zlib.crc32(eachLine, prev)
#return "%X"%(prev & 0xFFFFFFFF)
#speed in lieu of memory (file into memory entirely)
#return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF)
return hashlib.md5(filename).hexdigest()
def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
import db, logger

View File

@ -582,7 +582,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
lastpubdate = issuedata['LastPubDate']
series_status = issuedata['SeriesStatus']
#move the files...if imported is not empty & not futurecheck (meaning it's not from the mass importer.)
logger.info('imported is : ' + str(imported))
#logger.info('imported is : ' + str(imported))
if imported is None or imported == 'None' or imported == 'futurecheck':
pass
else:

View File

@ -67,6 +67,8 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
comic = files
comicpath = os.path.join(r, files)
comicsize = os.path.getsize(comicpath)
logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes')
t = filechecker.FileChecker(dir=r, file=comic)
results = t.listFiles()
#logger.info(results)
@ -83,7 +85,6 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
#'annualcomicid': annual_comicid,
#'scangroup': scangroup}
logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes')
if results:
resultline = '[PARSE-' + results['parse_status'].upper() + ']'

View File

@ -105,14 +105,16 @@ def locg(pulldate=None,weeknumber=None,year=None):
cl_dyninfo = cl_d.dynamic_replace(comicname)
dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip()
controlValueDict = {'COMIC': comicname,
controlValueDict = {'DYNAMICNAME': dynamic_name,
'ISSUE': re.sub('#', '', x['issue']).strip()}
newValueDict = {'SHIPDATE': x['shipdate'],
'PUBLISHER': x['publisher'],
'STATUS': 'Skipped',
'COMIC': comicname,
'COMICID': comicid,
'ISSUEID': issueid,
'DYNAMICNAME': dynamic_name,
#'DYNAMICNAME': dynamic_name,
'WEEKNUMBER': x['weeknumber'],
'YEAR': x['year']}
myDB.upsert("weekly", newValueDict, controlValueDict)

View File

@ -300,11 +300,11 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
if not any(int(x) == int(i) for x in yearRange):
yearRange.append(str(i))
logger.fdebug('[RESULT] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']')
logger.fdebug('[RESULT][' + str(limityear) + '] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']')
if tmpYr != xmlYr:
xmlYr = tmpYr
if any([limityear in yearRange, limityear == 'None']):
if any(map(lambda v: v in limityear, yearRange)) or limityear == 'None':
xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
idl = len (result.getElementsByTagName('id'))
idt = 0
@ -358,8 +358,8 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
xmltype = 'Print'
elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower():
xmltype = 'Digital'
else:
xmltype = 'Print'
else:
xmltype = 'Print'
if xmlid in comicLibrary:
haveit = comicLibrary[xmlid]

View File

@ -2,12 +2,13 @@ import mylar
from mylar import db, logger, helpers, updater
import os
import shutil
import ast
def movefiles(comicid, comlocation, imported):
#comlocation is destination
#comicid is used for rename
files_moved = []
imported = ast.literal_eval(imported)
myDB = db.DBConnection()
@ -61,19 +62,22 @@ def movefiles(comicid, comlocation, imported):
myDB.upsert("importresults", newValue, controlValue)
return
def archivefiles(comicid, ogdir, ogcname):
def archivefiles(comicid, comlocation, imported):
myDB = db.DBConnection()
# if move files isn't enabled, let's set all found comics to Archive status :)
result = myDB.select("SELECT * FROM importresults WHERE ComicName=?", [ogcname])
if result is None:
pass
else:
imported = ast.literal_eval(imported)
ComicName = imported['ComicName']
impres = imported['filelisting']
if impres is not None:
scandir = []
for res in result:
if any([os.path.dirname(res['ComicLocation']) in x for x in scandir]):
pass
else:
scandir.append(os.path.dirname(res['ComicLocation']))
for impr in impres:
srcimp = impr['comiclocation']
orig_filename = impr['comicfilename']
if not any([os.path.abspath(os.path.join(srcimp, os.pardir)) == x for x in scandir]):
scandir.append(os.path.abspath(os.path.join(srcimp, os.pardir)))
for sdir in scandir:
logger.info('Updating issue information and setting status to Archived for location: ' + sdir)
@ -82,4 +86,22 @@ def archivefiles(comicid, ogdir, ogcname):
logger.info('Now scanning in files.')
updater.forceRescan(comicid)
for result in impres:
try:
res = result['import_id']
except:
#if it's an 'older' import that wasn't imported, just make it a basic match so things can move and update properly.
controlValue = {"ComicFilename": result['comicfilename'],
"SRID": imported['srid']}
newValue = {"Status": "Imported",
"ComicID": comicid}
else:
controlValue = {"impID": result['import_id'],
"ComicFilename": result['comicfilename']}
newValue = {"Status": "Imported",
"SRID": imported['srid'],
"ComicID": comicid}
myDB.upsert("importresults", newValue, controlValue)
return

View File

@ -286,13 +286,15 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.info('Finished searching via :' + str(searchmode) + '. Issue not found - status kept as Wanted.')
else:
logger.fdebug('Could not find issue doing a manual search via : ' + str(searchmode))
if searchprov == '32P' and mylar.MODE_32P == 0:
return findit, 'None'
i+=1
return findit, 'None'
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, nzbprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host=None, ComicVersion=None, SARC=None, IssueArcID=None, RSS=None, ComicID=None, issuetitle=None, unaltered_ComicName=None, allow_packs=None):
if any([allow_packs is None, allow_packs == 'None']):
if any([allow_packs is None, allow_packs == 'None', allow_packs == 0]):
allow_packs = False
logger.info('allow_packs set to :' + str(allow_packs))
@ -305,6 +307,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
elif nzbprov == 'experimental':
apikey = 'none'
verify = False
elif nzbprov == 'Torznab':
verify = False
elif nzbprov == 'newznab':
#updated to include Newznab Name now
name_newznab = newznab_host[0].rstrip()
@ -515,13 +519,17 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == '':
bb = "no results"
rss = "no"
elif nzbprov == '32P':
searchterm = {'series': ComicName, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
#first we find the id on the serieslist of 32P
#then we call the ajax against the id and issue# and volume (if exists)
a = auth32p.info32p(searchterm=searchterm)
bb = a.searchit()
rss = "no"
if nzbprov == '32P':
if all([mylar.MODE_32P == 1,mylar.ENABLE_32P]):
searchterm = {'series': ComicName, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher}
#first we find the id on the serieslist of 32P
#then we call the ajax against the id and issue# and volume (if exists)
a = auth32p.info32p(searchterm=searchterm)
bb = a.searchit()
rss = "no"
else:
bb = "no results"
rss = "no"
elif nzbprov == 'KAT':
cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug("Sending request to [KAT] for " + str(cmname) + " : " + str(mod_isssearch))
@ -797,11 +805,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else:
# convert it to a tuple
dateconv = email.utils.parsedate_tz(pubdate)
# convert it to a numeric time, then subtract the timezone difference (+/- GMT)
if dateconv[-1] is not None:
postdate_int = time.mktime(dateconv[:len(dateconv) -1]) - dateconv[-1]
else:
postdate_int = time.mktime(dateconv[:len(dateconv) -1])
try:
# convert it to a numeric time, then subtract the timezone difference (+/- GMT)
if dateconv[-1] is not None:
postdate_int = time.mktime(dateconv[:len(dateconv) -1]) - dateconv[-1]
else:
postdate_int = time.mktime(dateconv[:len(dateconv) -1])
except:
logger.warn('Unable to parse posting date from provider result set for :' + entry['title'])
continue
#convert it to a Thu, 06 Feb 2014 00:00:00 format
issue_convert = datetime.datetime.strptime(stdate.rstrip(), '%Y-%m-%d')

View File

@ -551,16 +551,16 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
addStoryArc.exposed = True
def wanted_Export(self):
def wanted_Export(self,mode):
import unicodedata
myDB = db.DBConnection()
wantlist = myDB.select("SELECT * FROM issues WHERE Status='Wanted' AND ComicName NOT NULL")
wantlist = myDB.select("SELECT * FROM issues WHERE Status=? AND ComicName NOT NULL", [mode])
if wantlist is None:
logger.info("There aren't any issues marked as Wanted. Aborting Export.")
logger.info("There aren't any issues marked as " + mode + ". Aborting Export.")
return
#write it a wanted_list.csv
logger.info("gathered data - writing to csv...")
except_file = os.path.join(mylar.DATA_DIR, "wanted_list.csv")
except_file = os.path.join(mylar.DATA_DIR, str(mode) + "_list.csv")
if os.path.exists(except_file):
try:
os.remove(except_file)
@ -576,11 +576,11 @@ class WebInterface(object):
for want in wantlist:
wantcomic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [want['ComicID']]).fetchone()
exceptln = wantcomic['ComicName'].encode('ascii', 'replace') + "," + str(wantcomic['ComicYear']) + "," + str(want['Issue_Number']) + "," + str(want['IssueDate']) + "," + str(want['ComicID']) + "," + str(want['IssueID'])
logger.fdebug(exceptln)
#logger.fdebug(exceptln)
wcount+=1
f.write('%s\n' % (exceptln.encode('ascii', 'replace').strip()))
logger.info("Successfully wrote to csv file " + str(wcount) + " entries from your Wanted list.")
logger.info("Successfully wrote to csv file " + str(wcount) + " entries from your " + mode + " list.")
raise cherrypy.HTTPRedirect("home")
wanted_Export.exposed = True
@ -1691,11 +1691,12 @@ class WebInterface(object):
futurepulllist.exposed = True
def add2futurewatchlist(self, ComicName, Issue, Publisher, ShipDate, FutureID=None):
#ShipDate is a tuple ('weeknumber','startweek','midweek','endweek','year')
def add2futurewatchlist(self, ComicName, Issue, Publisher, ShipDate, weeknumber, year, FutureID=None):
#ShipDate is just weekinfo['midweek'] #a tuple ('weeknumber','startweek','midweek','endweek','year')
myDB = db.DBConnection()
logger.info(ShipDate)
if FutureID is not None:
chkfuture = myDB.selectone('SELECT * FROM futureupcoming WHERE ComicName=? AND IssueNumber=? WHERE weeknumber=?', [ComicName, Issue, ShipDate['weeknumber']]).fetchone()
chkfuture = myDB.selectone('SELECT * FROM futureupcoming WHERE ComicName=? AND IssueNumber=? WHERE weeknumber=? AND year=?', [ComicName, Issue, weeknumber, year]).fetchone()
if chkfuture is not None:
logger.info('Already on Future Upcoming list - not adding at this time.')
return
@ -1706,7 +1707,9 @@ class WebInterface(object):
"Publisher": Publisher}
newVal = {"Status": "Wanted",
"IssueDate": ShipDate['midweek']}
"IssueDate": ShipDate,
"weeknumber": weeknumber,
"year": year}
myDB.upsert("futureupcoming", newVal, newCtrl)
@ -2040,10 +2043,14 @@ class WebInterface(object):
status = kwargs['status']
results = []
myDB = db.DBConnection()
issues = myDB.select('SELECT * from issues WHERE Status=?', [status])
if mylar.ANNUALS_ON:
issues = myDB.select("SELECT * from issues WHERE Status=? AND ComicName NOT LIKE '%Annual%'", [status])
annuals = myDB.select("SELECT * from annuals WHERE Status=?", [status])
else:
issues = myDB.select("SELECT * from issues WHERE Status=?", [status])
annuals = []
for iss in issues:
results.append(iss)
annuals = myDB.select('SELECT * from annuals WHERE Status=?', [status])
for ann in annuals:
results.append(ann)
@ -2176,10 +2183,13 @@ class WebInterface(object):
myDB.upsert("comics", newValueDict, controlValueDict)
logger.info('[MANAGE COMICS][RESUME] ' + ComicName + ' has now been put into a Resumed State.')
else:
logger.info('appending ' + str(ComicID) + ' to refresh list.')
comicsToAdd.append(ComicID)
logger.info(comicsToAdd)
if len(comicsToAdd) > 0:
logger.info('[MANAGE COMICS][REFRESH] Refreshing ' + len(comicsToAdd) + ' series')
logger.info('[MANAGE COMICS][REFRESH] Refreshing ' + str(len(comicsToAdd)) + ' series')
threading.Thread(target=updater.dbUpdate, args=[comicsToAdd]).start()
markComics.exposed = True
@ -2641,7 +2651,7 @@ class WebInterface(object):
else:
issue_int = helpers.issuedigits(arc['IssueNumber'])
logger.fdebug('int_issue = ' + str(issue_int))
isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=? AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone()
isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=?", [issue_int, comic['ComicID']]).fetchone() #AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone()
if isschk is None:
logger.fdebug("we matched on name, but issue " + arc['IssueNumber'] + " doesn't exist for " + comic['ComicName'])
else:
@ -3402,10 +3412,8 @@ class WebInterface(object):
#taking this outside of the transaction in an attempt to stop db locking.
if mylar.IMP_MOVE and movealreadyonlist == "yes":
# for md in movedata:
mylar.moveit.movefiles(movedata_comicid, movedata_comiclocation, movedata_comicname)
updater.forceRescan(comicid)
raise cherrypy.HTTPRedirect("importResults")
#figure out # of issues and the year range allowable
@ -3415,12 +3423,16 @@ class WebInterface(object):
if all([yearTOP != None, yearTOP != 'None']):
if int(str(yearTOP)) > 0:
minni = helpers.issuedigits(minISSUE)
#logger.info(minni)
logger.info(minni)
if minni < 1 or minni > 999999999:
logger.info('here')
maxyear = int(str(yearTOP))
else:
maxyear = int(str(yearTOP)) - (minni / 12)
logger.info('there')
maxyear = int(str(yearTOP)) - ( (minni/1000) / 12 )
if str(maxyear) not in yearRANGE:
logger.info('maxyear:' + str(maxyear))
logger.info('yeartop:' + str(yearTOP))
for i in range(maxyear, int(yearTOP),1):
if not any(int(x) == int(i) for x in yearRANGE):
yearRANGE.append(str(i))
@ -3570,8 +3582,8 @@ class WebInterface(object):
if len(search_matches) > 1:
# if we matched on more than one series above, just save those results instead of the entire search result set.
for sres in search_matches:
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(search_matches),
@ -3591,8 +3603,8 @@ class WebInterface(object):
# store the search results for series that returned more than one result for user to select later / when they want.
# should probably assign some random numeric for an id to reference back at some point.
for sres in sresults:
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(sresults),
@ -3652,7 +3664,9 @@ class WebInterface(object):
else:
if not Volume.lower().startswith('v'):
volume = 'v' + str(Volume)
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[DynamicName,Volume])
else:
volume = Volume
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[DynamicName,volume])
files = []
for result in results:
files.append({'comicfilename': result['ComicFilename'],
@ -3666,7 +3680,7 @@ class WebInterface(object):
'filelisting': files,
'srid': SRID}
return serve_template(templatename="importresults_popup.html", title="results", searchtext=ComicName, searchresults=results, imported=imported)
return serve_template(templatename="importresults_popup.html", title="results", searchtext=ComicName, searchresults=searchresults, imported=imported)
importresults_popup.exposed = True

View File

@ -64,7 +64,7 @@ def pullit(forcecheck=None):
newpull.newpull()
elif mylar.ALT_PULL == 2:
logger.info('[PULL-LIST] Populating & Loading pull-list data directly from alternate website')
chk_locg = locg.locg(pulldate)
chk_locg = locg.locg('00000000') #setting this to 00000000 will do a Recreate on every call instead of a Refresh
if chk_locg['status'] == 'up2date':
logger.info('[PULL-LIST] Pull-list is already up-to-date with ' + str(chk_locg['count']) + 'issues. Polling watchlist against it to see if anything is new.')
mylar.PULLNEW = 'no'