IMP: Ability to now use the 'future pull-list' to mark series that have no data as of yet to be 'watched', IMP: Added extra tabs to Upcoming page (Upcoming no data=series on the future-pullist that are to be watched and then auto-added and downloaded when information is available - usually for number 1's, future upcoming=up to 3+ months in advance showing what's coming up for series in watchlist), IMP: Improved date comparisons for determing when issue reboots in a given year are the correct series

This commit is contained in:
evilhero 2014-03-19 15:07:25 -04:00
parent d5d443d716
commit b7347a80ef
6 changed files with 333 additions and 61 deletions

View File

@ -59,28 +59,100 @@
<div class="title">
<h1 class="clearfix"><img src="interfaces/default/images/icon_upcoming.png" alt="Upcoming Issues"/>Upcoming Issues</h1>
</div>
<div id="tabs">
<ul>
<li><a href="#tabs-1">This Week's Upcoming</a></li>
<li><a href="#tabs-2">Upcoming nodata</a></li>
<li><a href="#tabs-3">Future Upcoming</a></li>
</ul>
<div id="tabs-1">
<div class="table_wrapper">
<table class="display_no_select" id="upcoming_table">
<thead>
<tr>
<th id="comicname">Comic</th>
<th id="issuenumber">Issue</th>
<th id="reldate">Release Date</th>
<th id="status">Status</th>
</tr>
</thead>
<tbody>
%for upcome in upcoming:
<tr class="gradeZ">
<td id="comicname"><a href="comicDetails?ComicID=${upcome['ComicID']}">${upcome['DisplayComicName']}</a></td>
<td id="issuenumber">${upcome['IssueNumber']}</td>
<td id="reldate">${upcome['IssueDate']}</td>
<td id="status">${upcome['Status']}</td>
</tr>
%endfor
</tbody>
%if upcoming:
<thead>
<tr>
<th id="comicname">Comic</th>
<th id="issuenumber">Issue</th>
<th id="reldate">Release Date</th>
<th id="status">Status</th>
</tr>
</thead>
<tbody>
%for upcome in upcoming:
<tr class="gradeZ">
<td id="comicname"><a href="comicDetails?ComicID=${upcome['ComicID']}">${upcome['DisplayComicName']}</a></td>
<td id="issuenumber">${upcome['IssueNumber']}</td>
<td id="reldate">${upcome['IssueDate']}</td>
<td id="status">${upcome['Status']}</td>
</tr>
%endfor
</tbody>
%else:
<tr><td align="center" width="100%"> no upcoming data to display</td></tr>
%endif
</table>
</div>
</div>
<div id="tabs-2">
<div class="table_wrapper">
<table class="display_no_select" id="upcoming_table">
%if future_nodata_upcoming:
<thead>
<tr>
<th id="comicname">Comic</th>
<th id="issuenumber">Issue</th>
<th id="reldate">Release Date</th>
<th id="status">Status</th>
</tr>
</thead>
<tbody>
%for f_nodata in future_nodata_upcoming:
<tr class="gradeZ">
<td id="comicname"><a href="comicDetails?ComicID=${f_nodata['ComicID']}">${f_nodata['ComicName']}</a></td>
<td id="issuenumber">${f_nodata['IssueNumber']}</td>
<td id="reldate">${f_nodata['IssueDate']}</td>
<td id="status">${f_nodata['Status']}</td>
</tr>
%endfor
</tbody>
%else:
<tr><td align='center" width="100%">no upcoming future data to display</td></tr>
%endif
</table>
</div>
</div>
<div id="tabs-3">
<div class="table_wrapper">
<table class="display_no_select" id="upcoming_table">
%if futureupcoming:
<thead>
<tr>
<th id="comicname">Comic</th>
<th id="issuenumber">Issue</th>
<th id="reldate">Release Date</th>
<th id="status">Status</th>
</tr>
</thead>
<tbody>
%for f_upcome in futureupcoming:
<tr class="gradeZ">
<td id="comicname">${f_upcome['DisplayComicName']}</td>
<td id="issuenumber">${f_upcome['IssueNumber']}</td>
<td id="reldate">${f_upcome['IssueDate']}</td>
<td id="status">${f_upcome['Status']}</td>
</tr>
%endfor
</tbody>
%else:
<tr><td align="center" width="100%">no upcoming future data to display</td></tr>
%endif
</table>
</div>
</div>
</%def>
<%def name="headIncludes()">
@ -92,6 +164,9 @@
<script>
function initThisPage() {
$(function() {
$( "#tabs" ).tabs();
});
$('#wanted_table').dataTable({
"bDestroy":true,
"bFilter": false,

View File

@ -43,7 +43,7 @@ def is_exists(comicid):
return False
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,calledfrom=None,annload=None):
def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,calledfrom=None,annload=None,chkwant=None,issuechk=None,issuetype=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
# from mylar import cache
@ -141,6 +141,9 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
else:
SeriesYear = comic['ComicYear']
#since the weekly issue check could return either annuals or issues, let's initialize it here so it carries through properly.
weeklyissue_check = []
#let's do the Annual check here.
if mylar.ANNUALS_ON:
#we need to check first to see if there are pre-existing annuals that have been manually added, or else they'll get
@ -230,9 +233,10 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
issname = cleanname
issdate = str(firstval['Issue_Date'])
stdate = str(firstval['Store_Date'])
int_issnum = helpers.issuedigits(issnum)
newCtrl = {"IssueID": issid}
newVals = {"Issue_Number": issnum,
"Int_IssueNumber": helpers.issuedigits(issnum),
"Int_IssueNumber": int_issnum,
"IssueDate": issdate,
"ReleaseDate": stdate,
"IssueName": issname,
@ -242,6 +246,15 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"ReleaseComicName": sr['name'],
"Status": "Skipped"}
myDB.upsert("annuals", newVals, newCtrl)
if issuechk is not None and issuetype == 'annual':
logger.fdebug('comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
if issuechk == int_issnum:
weeklyissue_check.append({"Int_IssueNumber": int_issnum,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": stdate})
n+=1
num_res+=1
@ -739,6 +752,17 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if firstval['Issue_Date'] < firstdate:
firstiss = issnum
firstdate = str(firstval['Issue_Date'])
if issuechk is not None and issuetype == 'series':
logger.fdebug('comparing ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
if issuechk == int_issnum:
weeklyissue_check.append({"Int_IssueNumber": int_issnum,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": storedate})
#--moved to lower function.
# # check if the issue already exists
# iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()
@ -855,6 +879,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if calledfrom == 'dbupdate':
logger.info('returning to dbupdate module')
return
elif calledfrom == 'weekly':
logger.info('Successfully refreshed ' + comic['ComicName'] + ' (' + str(SeriesYear) + '). Returning to Weekly issue comparison.')
logger.info('Update issuedata for ' + str(issuechk) + ' of : ' + str(weeklyissue_check))
return weeklyissue_check
#check for existing files...
statbefore = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
@ -890,11 +919,30 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
else:
logger.info('Already have the latest issue : #' + str(latestiss))
if chkwant is not None:
#if this isn't None, this is being called from the futureupcoming list
#a new series was added automagically, but it has more than 1 issue (probably because it was a back-dated issue)
#the chkwant is a tuple containing all the data for the given series' issues that were marked as Wanted for futureupcoming dates.
chkresults = myDB.select("SELECT * FROM issues WHERE ComicID=? AND Status='Skipped'", [comicid])
if chkresults:
logger.info('[FROM THE FUTURE CHECKLIST] Attempting to grab wanted issues for : ' + comic['ComicName'])
for result in chkresults:
for chkit in chkwant:
logger.fdebug('checking ' + str(chkit['IssueNumber']) + ' against ' + str(result['Issue_Number']))
if chkit['IssueNumber'] == result['Issue_Number']:
logger.fdebug('Searching for : ' + str(result['Issue_Number']))
logger.fdebug('Status of : ' + str(result['Status']))
search.searchforissue(result['IssueID'])
else: logger.info('No issues marked as wanted for ' + comic['ComicName'])
logger.info('Finished grabbing what I could.')
if calledfrom == 'addbyid':
logger.info('Sucessfully added ' + comic['ComicName'] + ' (' + str(SeriesYear) + ') by directly using the ComicVine ID')
return
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
# this is for importing via GCD only and not using CV.
# used when volume spanning is discovered for a Comic (and can't be added using CV).

View File

@ -29,7 +29,7 @@ def solicit(month, year):
mnloop = 0
upcoming = []
publishers = {'DC Comics':'DC Comics', 'Marvel':'Marvel Comics', 'Image':'Image Comics', 'IDW':'IDW Publishing', 'Dark Horse':'Dark Horse Comics'}
publishers = {'DC Comics':'DC Comics', 'DC\'s': 'DC Comics', 'Marvel':'Marvel Comics', 'Image':'Image Comics', 'IDW':'IDW Publishing', 'Dark Horse':'Dark Horse Comics'}
while (mnloop < 5):
if year == 2014:
@ -41,13 +41,19 @@ def solicit(month, year):
else:
datestring = str(month) + str(year)
pagelinks = "http://www.comicbookresources.com/tag/solicits" + str(datestring)
logger.info('datestring:' + datestring)
logger.info('checking:' + pagelinks)
#using the solicits+datestring leaves out some entries occasionally
#should use http://www.comicbookresources.com/tag/soliciations
#then just use the logic below but instead of datestring, find the month term and
#go ahead up to +5 months.
#logger.info('datestring:' + datestring)
#logger.info('checking:' + pagelinks)
pageresponse = urllib2.urlopen ( pagelinks )
soup = BeautifulSoup (pageresponse)
cntlinks = soup.findAll('h3')
lenlinks = len(cntlinks)
logger.info( str(lenlinks) + ' results' )
#logger.info( str(lenlinks) + ' results' )
publish = []
resultURL = []
@ -63,13 +69,15 @@ def solicit(month, year):
if ('Marvel' and 'DC' and 'Image' not in headName) and ('Solicitations' in headName or 'Solicits' in headName):
pubstart = headName.find('Solicitations')
for pub in publishers:
if pub in headName[:pubstart]:
if pub in headName[:pubstart]:
#print 'publisher:' + str(publishers[pub])
publish.append(publishers[pub])
break
#publish.append( headName[:pubstart].strip() )
abc = headt.findAll('a', href=True)[0]
ID_som = abc['href'] #first instance will have the right link...
resultURL.append( ID_som )
#print '[ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
#print '(' + str(cnt) + ') [ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
cnt+=1
x+=1
@ -82,6 +90,8 @@ def solicit(month, year):
#this loops through each 'found' solicit page
shipdate = str(month_string) + '-' + str(year)
while ( loopthis >= 0 ):
#print 'loopthis is : ' + str(loopthis)
#print 'resultURL is : ' + str(resultURL[loopthis])
upcoming += populate(resultURL[loopthis], publish[loopthis], shipdate)
loopthis -=1
@ -152,6 +162,7 @@ def solicit(month, year):
def populate(link,publisher,shipdate):
#this is the secondary url call to populate
input = 'http://www.comicbookresources.com/' + link
#print 'checking ' + str(input)
response = urllib2.urlopen ( input )
soup = BeautifulSoup (response)
abc = soup.findAll('p')
@ -183,6 +194,8 @@ def populate(link,publisher,shipdate):
get_next = False
if prev_chk == True:
tempName = titlet.findNext(text=True)
#logger.info('prev_chk: ' + str(prev_chk) + ' ... get_next: ' + str(get_next))
#logger.info('tempName:' + tempName)
if ' TPB' not in tempName and ' HC' not in tempName and 'GN-TPB' not in tempName and 'for $1' not in tempName.lower() and 'subscription variant' not in tempName.lower() and 'poster' not in tempName.lower():
#print publisher + ' found upcoming'
if '#' in tempName:

View File

@ -271,8 +271,10 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None,
def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None):
logger.fdebug('weekly_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber))
logger.fdebug('weekly_update of table : ' + str(CStatus))
if futurepull:
logger.fdebug('future_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber) + ' to a status of ' + str(CStatus))
else:
logger.fdebug('weekly_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber) + ' to a status of ' + str(CStatus))
# here we update status of weekly table...
# added Issue to stop false hits on series' that have multiple releases in a week
# added CStatus to update status flags on Pullist screen
@ -296,10 +298,12 @@ def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None):
if futurepull is None:
myDB.upsert("weekly", newValue, controlValue)
else:
if issuecheck['ComicID'] is not None:
logger.info('checking ' + str(issuecheck['ComicID']) + ' status of : ' + str(CStatus))
if issuecheck['ComicID'] is not None and CStatus != None:
newValue = {"STATUS": "Wanted",
"ComicID": issuecheck['ComicID']}
logger.info('updating value: ' + str(newValue))
logger.info('updating control: ' + str(controlValue))
myDB.upsert("future", newValue, controlValue)
def newpullcheck(ComicName, ComicID):

View File

@ -936,6 +936,66 @@ class WebInterface(object):
add2futurewatchlist.exposed = True
def future_check(self):
# this is the function that will check the futureupcoming table
# for series that have yet to be released and have no CV data associated with it
# ie. #1 issues would fall into this as there is no series data to poll against until it's released.
# Mylar will look for #1 issues, and in finding any will do the following:
# - check comicvine to see if the series data has been released and / or issue data
# - will automatically import the series (Add A Series) upon finding match
# - will then proceed to mark the issue as Wanted, then remove from the futureupcoming table
# - will then attempt to download the issue(s) in question.
myDB = db.DBConnection()
chkfuture = myDB.action("SELECT * FROM futureupcoming WHERE IssueNumber='1'").fetchall()
if chkfuture is None:
logger.info("There are not any series on your future-list that I consider to be a NEW series")
raise cherrypy.HTTPRedirect("home")
cflist = []
#load in the values on an entry-by-entry basis into a tuple, so that we can query the sql clean again.
for cf in chkfuture:
cflist.append({"ComicName": cf['ComicName'],
"IssueDate": cf['IssueDate'],
"IssueNumber": cf['IssueNumber'], #this should be all #1's as the sql above limits the hits.
"Publisher": cf['Publisher'],
"Status": cf['Status']})
#now we load in
logger.info('I will be looking to see if any information has been released for ' + str(len(cflist)) + ' series that are NEW series')
#limit the search to just the 'current year' since if it's anything but a #1, it should have associated data already.
#limittheyear = []
#limittheyear.append(cf['IssueDate'][-4:])
for ser in cflist:
logger.info('looking for new data for ' + ser['ComicName'] + '[#' + str(ser['IssueNumber']) + '] (' + str(ser['IssueDate'][-4:]) + ')')
searchresults = mb.findComic(ser['ComicName'], mode='pullseries', issue=ser['IssueNumber'], limityear=ser['IssueDate'][-4:])
print searchresults
if len(searchresults) > 1:
logger.info('More than one result returned - this may have to be a manual add')
else:
for sr in searchresults:
#we should probably load all additional issues for the series on the futureupcoming list that are marked as Wanted and then
#throw them to the importer as a tuple, and once imported the import can run the additional search against them.
#now we scan for additional issues of the same series on the upcoming list and mark them accordingly.
chkwant = myDB.action("SELECT * FROM futureupcoming WHERE ComicName=? AND IssueNumber != '1' AND Status='Wanted'", [ser['ComicName']]).fetchall()
if chkwant is None:
logger.info('No extra issues to mark at this time for ' + ser['ComicName'])
else:
chkthewanted = []
for chk in chkwant:
chkthewanted.append({"ComicName": chk['ComicName'],
"IssueDate": chk['IssueDate'],
"IssueNumber": chk['IssueNumber'], #this should be all #1's as the sql above limits the hits.
"Publisher": chk['Publisher'],
"Status": chk['Status']})
logger.info('Marking ' + str(len(chkthewanted)) + ' additional issues as Wanted from ' + ser['ComicName'] + ' series as requested')
importer.addComictoDB(sr['comicid'], "no", chkwant=chkthewanted)
logger.info('Sucessfully imported ' + ser['ComicName'] + ' (' + str(ser['IssueDate'][-4:]) + ')')
raise cherrypy.HTTPRedirect("home")
future_check.exposed = True
def filterpull(self):
myDB = db.DBConnection()
weeklyresults = myDB.select("SELECT * from weekly")
@ -968,6 +1028,7 @@ class WebInterface(object):
if upcomingdata is None:
logger.info('No upcoming data as of yet...')
else:
futureupcoming = []
upcoming = []
for upc in upcomingdata:
@ -984,13 +1045,13 @@ class WebInterface(object):
#logger.fdebug('comparing pubdate of: ' + str(tmpdate) + ' to now date of: ' + str(timenow))
if int(tmpdate) >= int(timenow):
if upc['Status'] == 'Wanted':
upcoming.append({"ComicName": upc['ComicName'],
"IssueNumber": upc['IssueNumber'],
"IssueDate": upc['IssueDate'],
"ComicID": upc['ComicID'],
"IssueID": upc['IssueID'],
"Status": upc['Status'],
"DisplayComicName": upc['DisplayComicName']})
futureupcoming.append({"ComicName": upc['ComicName'],
"IssueNumber": upc['IssueNumber'],
"IssueDate": upc['IssueDate'],
"ComicID": upc['ComicID'],
"IssueID": upc['IssueID'],
"Status": upc['Status'],
"DisplayComicName": upc['DisplayComicName']})
else:
#if it's greater than 7 it's a full date, and shouldn't be displayed ;)
timenow = datetime.datetime.now().strftime('%Y%m%d') #convert to yyyymmdd
@ -1018,6 +1079,9 @@ class WebInterface(object):
ann_list += annuals_list
issues += annuals_list
#let's straightload the series that have no issue data associated as of yet (ie. new series) form the futurepulllist
future_nodata_upcoming = myDB.select('SELECT * FROM futureupcoming')
#let's move any items from the upcoming table into the wanted table if the date has already passed.
#gather the list...
mvupcome = myDB.select("SELECT * from upcoming WHERE IssueDate < date('now') order by IssueDate DESC")
@ -1053,7 +1117,7 @@ class WebInterface(object):
deleteit = myDB.action("DELETE from upcoming WHERE ComicName=? AND IssueNumber=?", [mvup['ComicName'],mvup['IssueNumber']])
return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues, ann_list=ann_list)
return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues, ann_list=ann_list, futureupcoming=futureupcoming, future_nodata_upcoming=future_nodata_upcoming)
upcoming.exposed = True
def skipped2wanted(self, comicid, fromupdate=None):
@ -2605,14 +2669,12 @@ class WebInterface(object):
mylar.CMTAGGER_PATH = re.sub(os.path.basename(mylar.CMTAGGER_PATH), '', mylar.CMTAGGER_PATH)
logger.fdebug("Removed application name from ComicTagger path")
logger.info('nzb_downloader')
#legacy support of older config - reload into old values for consistency.
if mylar.NZB_DOWNLOADER == 0: mylar.USE_SABNZBD = True
elif mylar.NZB_DOWNLOADER == 1: mylar.USE_NZBGET = True
elif mylar.NZB_DOWNLOADER == 2: mylar.USE_BLACKHOLE = True
# Write the config
logger.info('sending to config..')
mylar.config_write()
raise cherrypy.HTTPRedirect("config")

View File

@ -611,7 +611,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepul
#would identify the difference between two #1 titles within the same series year, but have different publishing dates.
#Wolverine (2013) & Wolverine (2014) are good examples of this situation.
#of course initially, the issue data for the newer series wouldn't have any issue data associated with it so it would be
#a null value, but given that the 2013 series (as an exmaple) would be from 2013-05-01, it obviously wouldn't be a match to
#a null value, but given that the 2013 series (as an example) would be from 2013-05-01, it obviously wouldn't be a match to
#the current date & year (2014). Throwing out that, we could just assume that the 2014 would match the #1.
#get the issue number of the 'weeklypull' series.
@ -622,18 +622,48 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepul
### week['ISSUE'] #issue # from pullist
### week['SHIPDATE'] #weeklypull-list date
### comicid[cnt] #comicid of matched series
datecheck = loaditup(watchcomic, comicid[cnt], week['ISSUE'])
logger.fdebug('Now checking date comparison using an issue store date of ' + str(datecheck))
if datecheck == 'no results':
## if it's a futurepull, the dates get mixed up when two titles exist of the same name
## ie. Wolverine-2011 & Wolverine-2014
## we need to set the compare date to today's date ( Now() ) in this case.
if futurepull:
usedate = datetime.datetime.now().strftime('%Y%m%d') #convert to yyyymmdd
else:
usedate = re.sub("[^0-9]", "", week['SHIPDATE'])
if 'ANNUAL' in comicnm.upper():
chktype = 'annual'
else:
chktype = 'series'
datevalues = loaditup(watchcomic, comicid[cnt], week['ISSUE'], chktype)
date_downloaded = None
if datevalues == 'no results':
pass
elif datecheck >= week['SHIPDATE']:
#logger.info('The issue date of issue #' + str(week['ISSUE']) + ' was on ' + str(datecheck) + ' which is on/ahead of ' + str(week['SHIPDATE']))
logger.fdebug('Store Date falls within acceptable range - series MATCH')
pass
elif datecheck < week['SHIPDATE']:
logger.fdebug('The issue date of issue #' + str(week['ISSUE']) + ' was on ' + str(datecheck) + ' which is prior to ' + str(week['SHIPDATE']))
break
else:
datecheck = datevalues[0]['issuedate']
datestatus = datevalues[0]['status']
logger.fdebug('Now checking date comparison using an issue store date of ' + str(datecheck))
logger.fdebug('Using a compare date (usedate) of ' + str(usedate))
#logger.fdebug('Status of ' + str(datestatus))
if int(datecheck) >= int(usedate):
#logger.info('The issue date of issue #' + str(week['ISSUE']) + ' was on ' + str(datecheck) + ' which is on/ahead of ' + str(week['SHIPDATE']))
logger.fdebug('Store Date falls within acceptable range - series MATCH')
elif int(datecheck) < int(usedate):
logger.fdebug('The issue date of issue #' + str(week['ISSUE']) + ' was on ' + str(datecheck) + ' which is prior to ' + str(week['SHIPDATE']))
if date_downloaded is None:
break
if datestatus != 'Downloaded' and datestatus != 'Archived':
pass
else:
logger.fdebug('Issue #' + str(week['ISSUE']) + ' already downloaded.')
date_downloaded = datestatus
if ("NA" not in week['ISSUE']) and ("HC" not in week['ISSUE']):
if ("COMBO PACK" not in week['EXTRA']) and ("2ND PTG" not in week['EXTRA']) and ("3RD PTG" not in week['EXTRA']):
@ -684,7 +714,11 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepul
else:
cstatusid = ComicID
fp = "yes"
updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=cstatus, CID=cstatusid, futurepull=fp)
if date_downloaded is None:
updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=cstatus, CID=cstatusid, futurepull=fp)
else:
updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=date_downloaded, CID=cstatusid, futurepull=fp)
break
break
break
@ -703,17 +737,53 @@ def check(fname, txt):
return any(txt in line for line in dataf)
def loaditup(comicname, comicid, issue):
def loaditup(comicname, comicid, issue, chktype):
myDB = db.DBConnection()
issue_number = helpers.issuedigits(issue)
logger.fdebug('[' + comicname + '] trying to locate issue ' + str(issue) + ' to do comparitive issue analysis for pull-list')
issueload = myDB.action('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()
if chktype == 'annual':
typedisplay = 'annual issue'
logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list')
issueload = myDB.action('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()
else:
typedisplay = 'issue'
logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list')
issueload = myDB.action('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()
if issueload is None:
logger.fdebug('No results matched for Issue number - either this is a NEW series with no data yet, or something is wrong')
return 'no results'
if issueload['ReleaseDate'] is not None or issueload['ReleaseDate'] is not 'None':
logger.fdebug('Returning Release Date for issue # ' + str(issue) + ' of ' + str(issueload['ReleaseDate']))
return issueload['ReleaseDate']
dataissue = []
releasedate = issueload['ReleaseDate']
storedate = issueload['IssueDate']
status = issueload['Status']
if releasedate == '0000-00-00':
logger.fdebug('Store date of 0000-00-00 returned for ' + str(typedisplay) + ' # ' + str(issue) + '. Refreshing series to see if valid date present')
mismatch = 'no'
issuerecheck = mylar.importer.addComictoDB(comicid,mismatch,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
if issuerecheck is not None:
for il in issuerecheck:
#this is only one record..
releasedate = il['IssueDate']
storedate = il['ReleaseDate']
status = il['Status']
logger.fdebug('issue-recheck releasedate is : ' + str(releasedate))
logger.fdebug('issue-recheck storedate of : ' + str(storedate))
if releasedate is not None and releasedate != "None" and releasedate != "":
logger.fdebug('Returning Release Date for ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(releasedate))
thedate = re.sub("[^0-9]", "", releasedate) #convert date to numerics only (should be in yyyymmdd)
#return releasedate
else:
logger.fdebug('Returning Publication Date for issue # ' + str(issue) + ' of ' + str(issueload['PublicationDate']))
return issueload['PublicationDate']
logger.fdebug('Returning Publication Date for issue ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(storedate))
if storedate is None and storedate != "None" and storedate != "":
logger.fdebug('no issue data available - both release date & store date. Returning no results')
return 'no results'
thedate = re.sub("[^0-9]", "", storedate) #convert date to numerics only (should be in yyyymmdd)
#return storedate
dataissue.append({"issuedate": thedate,
"status": status})
return dataissue