added exception listing for GCD imports, multi-volume series selections, multiple selection for issues fixed, upcoming tab now works, pullist marks upcoming issues on comic details page, and a bunch of other stuff

This commit is contained in:
evilhero 2012-10-16 04:16:29 -04:00
parent 1c338d0a9b
commit 964338f0b2
14 changed files with 902 additions and 240 deletions

View File

@ -27,7 +27,7 @@
<img src="interfaces/default/images/loader_black.gif" alt="loading" style="float:left; margin-right: 5px;"/>
%endif
<div class="row">
<a href="http://comicvine.com/volume/${comic['ComicID']}">${comic['ComicName']} (${comic['ComicYear']})</a>
<a href="http://comicvine.com/volume/49-${comic['ComicID']}" target="_blank">${comic['ComicName']} (${comic['ComicYear']})</a>
%if comic['Status'] == 'Loading':
<h3><i>(Comic information is currently being loaded)</i></h3>
%endif
@ -178,16 +178,17 @@
</table>
</div>
</div>
<div class="table_wrapper">
<form action="markissues" method="get" id="markissues">
<input type="hidden" name="ComicID" value=${comic['ComicID']}>
<div id="markissue">Mark selected issues as
<select name="action" onChange="doAjaxCall('markissues',$(this),'table',true);" data-error="You didn't select any issues">
<select name="action" onChange="doAjaxCall('markissues',$(this),'table',true);">
<option disabled="disabled" selected="selected">Choose...</option>
<option value="Wanted">Wanted</option>
<option value="Skipped">Skipped</option>
<option value="Downloaded">Downloaded</option>
</select>
selected issues
<input type="hidden" value="Go">
</div>
<table class="display" id="issue_table">
@ -197,7 +198,6 @@
<th id="issuenumber">Number</th>
<th id="issuename">Name</th>
<th id="reldate">Date</th>
<th id="type">Type</th>
<th id="status">Status</th>
</tr>
</thead>
@ -218,7 +218,6 @@
<td id="issuenumber">${issue['Issue_Number']}</td>
<td id="issuename"><a href="issuePage?IssueID=${issue['IssueID']}">${issue['IssueName']}</a></td>
<td id="reldate">${issue['IssueDate']}</td>
<td id="type">${issue['Type']}</td>
<td id="status">${issue['Status']}
%if issue['Status'] == 'Skipped':
[<a href="#" onclick="doAjaxCall('queueissue?ComicID=${issue['ComicID']}&IssueID=${issue['IssueID']}&ComicIssue=${issue['Issue_Number']}&ComicYear=${issue['IssueDate']}&mode=want',$(this),'table')">want</a>]
@ -233,7 +232,7 @@
</tbody>
</table>
</form>
</div>
</%def>
<%def name="headIncludes()">
@ -283,7 +282,6 @@
null,
null,
null,
null,
null
],

View File

@ -21,6 +21,8 @@
<link rel="apple-touch-icon" href="images/mylarlogo.png">
<link rel="stylesheet" href="interfaces/default/css/style.css">
<link rel="stylesheet" href="interfaces/default/css/jquery-ui.css">
<link rel="icon" href="images/favicon.ico" type="image/x-icon">
<link rel="shortcut icon" href="images/favicon.ico" type="image/x-icon">
${next.headIncludes()}
<script src="js/libs/modernizr-1.7.min.js"></script>

View File

@ -1220,6 +1220,42 @@ div#artistheader h2 a {
filter: progid:dximagetransform.microsoft.gradient(startColorstr=#fafafa, endColorstr=#eaeaea) !important;
-ms-filter: progid:dximagetransform.microsoft.gradient(startColorstr=#fafafa, endColorstr=#eaeaea) !important;
}
#searchmanage_table th#comicname {
min-width: 325px;
text-align: left;
}
#searchmanage_table th#comicpub {
min-width: 100px;
text-align: left;
}
#searchmanage_table th#comicissues {
min-width: 75px;
text-align: left;
}
#searchmanage_table th#addcomic {
min-width: 95px;
text-align: left;
}
#searchmanage_table td#comicname {
min-width: 325px;
text-align: left;
vertical-align: middle;
}
#searchmanage_table td#comicpub {
min-width: 100px;
text-align: left;
vertical-align: middle;
}
#searchmanage_table td#comicissues {
min-width: 75px;
text-align: left;
vertical-align: middle;
}
#searchmanage_table td#addcomic {
min-width: 95px;
vertical-align: middle;
}
.progress-container {
background: #FFF;
border: 1px solid #ccc;

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.9 KiB

View File

@ -0,0 +1,72 @@
<%inherit file="base.html" />
<%!
import mylar
from mylar.helpers import checked
%>
<%def name="headerIncludes()">
<div id="subhead_container">
<div id="subhead_menu">
<a id="menu_link_edit" href="manageComics">Some Button</a>
</div>
</div>
</%def>
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix">Search Question</h1>
</div>
<div id="tabs">
<ul>
<li><a href="#tabs-1">More Information</a></li>
</ul>
<div id="tabs-1" class="configtable">
<fieldset>
<form action="searchScan" method="GET" id="searchScan">
<legend>Volume-Spanning Comic Series</legend>
<p><strong>I've detected multiple series with the same name</strong></p>
<p>I've figured out that the Comic that you've selected to watch is actually
part of a larger series that contains multiple volumes.
Because of the way the data is provided, I can't maintain an up-to-date
listing with much reliability or accuracy. So you need to pick from the list
below which volume from the series that you want to add.<br/></p>
<br/><br/>
<table class="display" id="searchmanage_table">
<thead>
<tr>
<th id="comicname">Comic Name</th>
<th id="comicpub">Year</th>
<th id="comicissues">Issues</th>
<th id="addcomic"></th>
</tr>
</thead>
<tbody>
%if sresults:
%for result in sresults:
<tr>
<td id="comicname"><title="${result['ComicName']}">${result['ComicName']}</td>
<td id="comicpub"><title="${result['ComicYear']}">${result['ComicYear']}</td>
<td id="comicissues">${result['ComicIssues']}</td>
<td id="addcomic"><a href="GCDaddComic?comicid=${result['ComicID']}&comicname=${result['ComicName']}&comicyear=${result['ComicYear']}&comiccover=${result['ComicCover']}&comicissues=${result['ComicIssues']}&comicpublisher=${result['ComicPublisher']}"><span class="ui-icon-plus"></span>Add Series</a></td>
</tr>
%endfor
%endif
</fieldset>
</div>
</tbody>
</table>
</form>
</div>
</div>
</%def>
<%def name="javascriptIncludes()">
<script>
function initThisPage() {
jQuery( "#tabs" ).tabs();
initActions();
};
$(document).ready(function() {
initThisPage();
});
</script>
</%def>

View File

@ -58,14 +58,13 @@
<th id="comicname">Comic</th>
<th id="issuenumber">Issue</th>
<th id="reldate">Release Date</th>
<th id="type">Type</th>
<th id="status">Status</th>
</tr>
</thead>
<tbody>
%for upcome in upcoming:
<tr class="gradeZ">
<td id="comicname">${upcome['ComicName']}</td>
<td id="comicname"><a href="artistPage?ComicID=${upcome['ComicID']}">${upcome['ComicName']}</a></td>
<td id="issuenumber"><a href="albumPage?IssueID=${upcome['ComicID']}">${upcome['IssueNumber']}</a></td>
<td id="reldate">${upcome['IssueDate']}</td>
<td id="status">${upcome['Status']}</td>

14
exceptions.csv Normal file
View File

@ -0,0 +1,14 @@
4,2045,none,1482/10251/6029/11218/62349
2,2127,none,1570/7794/11288
1,18033,none,19531/25058
99,3092,2605,none
99,50389,66832,none
99,42947,61242,none
99,51382,68125,none
99,51622,68322,none
99,4937,4611,none
99,7300,7880,none
99,46744,67757,none
99,42821,60934,none
99,42947,61242,none
99,42322,60917,none
1 4 2045 none 1482/10251/6029/11218/62349
2 2 2127 none 1570/7794/11288
3 1 18033 none 19531/25058
4 99 3092 2605 none
5 99 50389 66832 none
6 99 42947 61242 none
7 99 51382 68125 none
8 99 51622 68322 none
9 99 4937 4611 none
10 99 7300 7880 none
11 99 46744 67757 none
12 99 42821 60934 none
13 99 42947 61242 none
14 99 42322 60917 none

View File

@ -20,6 +20,7 @@ import os, sys, subprocess
import threading
import webbrowser
import sqlite3
import csv
from lib.apscheduler.scheduler import Scheduler
from lib.configobj import ConfigObj
@ -519,6 +520,24 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT)')
# c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)')
#new
logger.info(u"Populating Exception listings into Mylar....")
c.execute('CREATE TABLE IF NOT EXISTS exceptions (variloop TEXT, ComicID TEXT, NewComicID TEXT, GComicID TEXT)')
csvfile = open('exceptions.csv', "rb")
creader = csv.reader(csvfile, delimiter=',')
for row in creader:
#print (row)
try:
c.execute("INSERT INTO exceptions VALUES (?,?,?,?);", row)
except Exception, e:
#print ("Error - invald arguments...-skipping")
pass
csvfile.close()
#c.executemany("INSERT INTO exceptions VALUES (?, ?);", to_db)
#add in the late players to the game....
try:
c.execute('SELECT LastUpdated from comics')

View File

@ -14,11 +14,12 @@
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import os, errno
import sys
import shlex
import datetime
import re
import urllib
import mylar
from mylar import logger, helpers, db, mb, albumart, cv, parseit, filechecker, search, updater
@ -38,8 +39,7 @@ def is_exists(comicid):
return False
def addComictoDB(comicid):
def addComictoDB(comicid,mismatch=None):
# Putting this here to get around the circular import. Will try to use this to update images at later date.
from mylar import cache
@ -54,14 +54,16 @@ def addComictoDB(comicid):
if dbcomic is None:
newValueDict = {"ComicName": "Comic ID: %s" % (comicid),
"Status": "Loading"}
comlocation = None
else:
newValueDict = {"Status": "Loading"}
comlocation = dbcomic['ComicLocation']
myDB.upsert("comics", newValueDict, controlValueDict)
# we need to lookup the info for the requested ComicID in full now
comic = cv.getComic(comicid,'comic')
#comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone()
if not comic:
logger.warn("Error fetching comic. ID for : " + comicid)
if dbcomic is None:
@ -82,39 +84,58 @@ def addComictoDB(comicid):
#--Now that we know ComicName, let's try some scraping
#--Start
# gcd will return issue details (most importantly publishing date)
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
if gcdinfo == "No Match":
logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" )
updater.no_searchresults(comicid)
nomatch = "true"
return nomatch
if mismatch == "no":
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
mismatch_com = "no"
if gcdinfo == "No Match":
updater.no_searchresults(comicid)
nomatch = "true"
logger.info(u"There was an error when trying to add " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" )
return nomatch
else:
mismatch_com = "yes"
#print ("gcdinfo:" + str(gcdinfo))
elif mismatch == "yes":
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
if CV_EXcomicid['variloop'] is None: pass
else:
vari_loop = CV_EXcomicid['variloop']
NewComicID = CV_EXcomicid['NewComicID']
gcomicid = CV_EXcomicid['GComicID']
resultURL = "/series/" + str(NewComicID) + "/"
#print ("variloop" + str(CV_EXcomicid['variloop']))
#if vari_loop == '99':
gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None)
logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] )
# print ("Series Published" + parseit.resultPublished)
#--End
#comic book location on machine
# setup default location here
if ':' in comic['ComicName']:
comicdir = comic['ComicName'].replace(':','')
else: comicdir = comic['ComicName']
comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
if mylar.DESTINATION_DIR == "":
logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
return
if mylar.REPLACE_SPACES:
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)
#if it doesn't exist - create it (otherwise will bugger up later on)
if os.path.isdir(str(comlocation)):
logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
else:
#print ("Directory doesn't exist!")
try:
os.makedirs(str(comlocation))
logger.info(u"Directory successfully created at: " + str(comlocation))
except OSError.e:
if e.errno != errno.EEXIST:
raise
if comlocation is None:
if ':' in comic['ComicName']:
comicdir = comic['ComicName'].replace(':','')
else: comicdir = comic['ComicName']
comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
if mylar.DESTINATION_DIR == "":
logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
return
if mylar.REPLACE_SPACES:
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)
#if it doesn't exist - create it (otherwise will bugger up later on)
if os.path.isdir(str(comlocation)):
logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
else:
#print ("Directory doesn't exist!")
try:
os.makedirs(str(comlocation))
logger.info(u"Directory successfully created at: " + str(comlocation))
except OSError.e:
if e.errno != errno.EEXIST:
raise
#try to account for CV not updating new issues as fast as GCD
#seems CV doesn't update total counts
@ -124,20 +145,45 @@ def addComictoDB(comicid):
else:
comicIssues = comic['ComicIssues']
#let's download the image...
if os.path.exists(mylar.CACHE_DIR):pass
else:
#let's make the dir.
try:
os.makedirs(str(mylar.CACHE_DIR))
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
except OSError.e:
if e.errno != errno.EEXIST:
raise
coverfile = mylar.CACHE_DIR + "/" + str(comicid) + ".jpg"
#try:
urllib.urlretrieve(str(comic['ComicImage']), str(coverfile))
try:
with open(str(coverfile)) as f:
ComicImage = "cache/" + str(comicid) + ".jpg"
logger.info(u"Sucessfully retrieved cover for " + str(comic['ComicName']))
except IOError as e:
logger.error(u"Unable to save cover locally at this time.")
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
"ComicSortName": sortname,
"ComicYear": comic['ComicYear'],
"ComicImage": comic['ComicImage'],
"ComicImage": ComicImage,
"Total": comicIssues,
"ComicLocation": comlocation,
"ComicPublisher": comic['ComicPublisher'],
"ComicPublished": parseit.resultPublished,
"ComicPublished": gcdinfo['resultPublished'],
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
issued = cv.getComic(comicid,'issue')
logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] )
n = 0
@ -152,7 +198,7 @@ def addComictoDB(comicid):
latestdate = "0000-00-00"
#print ("total issues:" + str(iscnt))
#---removed NEW code here---
logger.info(u"Now adding/updating issues for" + comic['ComicName'])
logger.info(u"Now adding/updating issues for " + comic['ComicName'])
# file check to see if issue exists
logger.info(u"Checking directory for existing issues.")
@ -278,9 +324,9 @@ def addComictoDB(comicid):
logger.info(u"Updating complete for: " + comic['ComicName'])
# lets' check the pullist for anyting at this time as well since we're here.
#if mylar.AUTOWANT_UPCOMING:
# logger.info(u"Checking this week's pullist for new issues of " + str(comic['ComicName']))
# updater.newpullcheck()
if mylar.AUTOWANT_UPCOMING:
logger.info(u"Checking this week's pullist for new issues of " + str(comic['ComicName']))
updater.newpullcheck()
#here we grab issues that have been marked as wanted above...
@ -297,3 +343,264 @@ def addComictoDB(comicid):
else: logger.info(u"No issues marked as wanted for " + comic['ComicName'])
logger.info(u"Finished grabbing what I could.")
def GCDimport(gcomicid):
# this is for importing via GCD only and not using CV.
# used when volume spanning is discovered for a Comic (and can't be added using CV).
# Issue Counts are wrong (and can't be added).
# because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
# CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)
gcdcomicid = gcomicid
myDB = db.DBConnection()
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
controlValueDict = {"ComicID": gcdcomicid}
comic = myDB.action('SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation FROM comics WHERE ComicID=?', [gcomicid]).fetchone()
ComicName = comic[0]
ComicYear = comic[1]
ComicIssues = comic[2]
comlocation = comic[5]
#ComicImage = comic[4]
#print ("Comic:" + str(ComicName))
newValueDict = {"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
# we need to lookup the info for the requested ComicID in full now
#comic = cv.getComic(comicid,'comic')
if not comic:
logger.warn("Error fetching comic. ID for : " + gcdcomicid)
if dbcomic is None:
newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
myDB.upsert("comics", newValueDict, controlValueDict)
return
if ComicName.startswith('The '):
sortname = ComicName[4:]
else:
sortname = ComicName
logger.info(u"Now adding/updating: " + ComicName)
#--Now that we know ComicName, let's try some scraping
#--Start
# gcd will return issue details (most importantly publishing date)
comicid = gcomicid[1:]
resultURL = "/series/" + str(comicid) + "/"
gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None)
if gcdinfo == "No Match":
logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" )
updater.no_searchresults(gcomicid)
nomatch = "true"
return nomatch
logger.info(u"Sucessfully retrieved details for " + ComicName )
# print ("Series Published" + parseit.resultPublished)
#--End
ComicImage = gcdinfo['ComicImage']
#comic book location on machine
# setup default location here
if comlocation is None:
if ':' in ComicName:
comicdir = ComicName.replace(':','')
else: comicdir = ComicName
comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
if mylar.DESTINATION_DIR == "":
logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
return
if mylar.REPLACE_SPACES:
#mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)
#if it doesn't exist - create it (otherwise will bugger up later on)
if os.path.isdir(str(comlocation)):
logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
else:
#print ("Directory doesn't exist!")
try:
os.makedirs(str(comlocation))
logger.info(u"Directory successfully created at: " + str(comlocation))
except OSError.e:
if e.errno != errno.EEXIST:
raise
comicIssues = gcdinfo['totalissues']
#let's download the image...
if os.path.exists(mylar.CACHE_DIR):pass
else:
#let's make the dir.
try:
os.makedirs(str(mylar.CACHE_DIR))
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
except OSError.e:
if e.errno != errno.EEXIST:
raise
coverfile = mylar.CACHE_DIR + "/" + str(gcomicid) + ".jpg"
urllib.urlretrieve(str(ComicImage), str(coverfile))
try:
with open(str(coverfile)) as f:
ComicImage = "cache/" + str(gcomicid) + ".jpg"
logger.info(u"Sucessfully retrieved cover for " + str(ComicName))
except IOError as e:
logger.error(u"Unable to save cover locally at this time.")
controlValueDict = {"ComicID": gcomicid}
newValueDict = {"ComicName": ComicName,
"ComicSortName": sortname,
"ComicYear": ComicYear,
"Total": comicIssues,
"ComicLocation": comlocation,
"ComicImage": ComicImage,
#"ComicPublisher": comic['ComicPublisher'],
#"ComicPublished": comicPublished,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
logger.info(u"Sucessfully retrieved issue details for " + ComicName )
n = 0
iscnt = int(comicIssues)
issnum = []
issname = []
issdate = []
int_issnum = []
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
latestiss = "0"
latestdate = "0000-00-00"
#print ("total issues:" + str(iscnt))
#---removed NEW code here---
logger.info(u"Now adding/updating issues for " + ComicName)
bb = 0
while (bb <= iscnt):
#---NEW.code
try:
gcdval = gcdinfo['gcdchoice'][bb]
print ("gcdval: " + str(gcdval))
except IndexError:
#account for gcd variation here
if gcdinfo['gcdvariation'] == 'gcd':
#print ("gcd-variation accounted for.")
issdate = '0000-00-00'
int_issnum = int ( issis / 1000 )
break
if 'nn' in str(gcdval['GCDIssue']):
#no number detected - GN, TP or the like
logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
updater.no_searchresults(comicid)
return
elif '.' in str(gcdval['GCDIssue']):
issst = str(gcdval['GCDIssue']).find('.')
issb4dec = str(gcdval['GCDIssue'])[:issst]
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(gcdval['GCDIssue'])[issst+1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
if len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
if int(issaftdec) == 0: issaftdec = "00"
gcd_issue = issb4dec + "." + issaftdec
gcdis = (int(issb4dec) * 1000) + decisval
else:
gcdis = int(str(gcdval['GCDIssue'])) * 1000
gcd_issue = str(gcdval['GCDIssue'])
#get the latest issue / date using the date.
int_issnum = int( gcdis / 1000 )
issdate = str(gcdval['GCDDate'])
issid = "G" + str(gcdval['IssueID'])
if gcdval['GCDDate'] > latestdate:
latestiss = str(gcd_issue)
latestdate = str(gcdval['GCDDate'])
#print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
#---END.NEW.
# check if the issue already exists
iss_exists = myDB.select('SELECT * from issues WHERE IssueID=?', [issid])
# Only change the status & add DateAdded if the issue is not already in the database
if not len(iss_exists):
newValueDict['DateAdded'] = helpers.today()
#adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
if "?" in str(issdate):
issdate = "0000-00-00"
controlValueDict = {"IssueID": issid}
newValueDict = {"ComicID": gcomicid,
"ComicName": ComicName,
"Issue_Number": gcd_issue,
"IssueDate": issdate,
"Int_IssueNumber": int_issnum
}
#print ("issueid:" + str(controlValueDict))
#print ("values:" + str(newValueDict))
if mylar.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
#elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
# newValueDict['Status'] = "Wanted"
else:
newValueDict['Status'] = "Skipped"
myDB.upsert("issues", newValueDict, controlValueDict)
bb+=1
# logger.debug(u"Updating comic cache for " + ComicName)
# cache.getThumb(ComicID=issue['issueid'])
# logger.debug(u"Updating cache for: " + ComicName)
# cache.getThumb(ComicIDcomicid)
#check for existing files...
updater.forceRescan(gcomicid)
controlValueStat = {"ComicID": gcomicid}
newValueStat = {"Status": "Active",
"LatestIssue": latestiss,
"LatestDate": latestdate,
"LastUpdated": helpers.now()
}
myDB.upsert("comics", newValueStat, controlValueStat)
logger.info(u"Updating complete for: " + ComicName)
# lets' check the pullist for anyting at this time as well since we're here.
if mylar.AUTOWANT_UPCOMING:
logger.info(u"Checking this week's pullist for new issues of " + str(ComicName))
updater.newpullcheck()
#here we grab issues that have been marked as wanted above...
results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
if results:
logger.info(u"Attempting to grab wanted issues for : " + ComicName)
for result in results:
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
foundNZB = search.searchforissue(result['IssueID'])
if foundNZB == "yes":
updater.foundsearch(result['ComicID'], result['IssueID'])
else: logger.info(u"No issues marked as wanted for " + ComicName)
logger.info(u"Finished grabbing what I could.")

View File

@ -33,17 +33,15 @@ def MysterBinScrape(comsearch, comyear):
# max is set high enough to inlude everything but collections/groups of cbr/cbz which confuse us.
# minsize = 9mb maxsize = 75mb (for now)
input = 'http://www.mysterbin.com/advsearch?q=' + str(searchterms) + '&match=normal&minSize=9&maxSize=75&group=alt.binaries.comics.dcp&maxAge=1269&complete=2'
#print (input)
response = urllib2.urlopen ( input )
try:
soup = BeautifulSoup ( response )
except HTMLParseError:
logger.info(u"Unable to decipher using Experimental Search. Parser problem.")
return "no results"
#print (soup)
cnt = len(soup.findAll("input", {"class" : "check4nzb"}))
logger.info(u"I found " + str(cnt) + " results doing my search...now I'm going to analyze the results.")
#print (str(cnt) + " results")
if cnt == 0: return "no results"
resultName = []
resultComic = []
@ -99,7 +97,6 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
#print ( "comichave: " + str(comicis) )
#print ( "comicid: " + str(comicid) )
comicnm = re.sub(' ', '+', comicnm)
#input = 'http://www.comics.org/series/name/' + str(comicnm) + '/sort/alpha/'
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen ( input )
soup = BeautifulSoup ( response)
@ -110,8 +107,6 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
#print (str(cnt) + " results")
global resultPublished
resultName = []
resultID = []
resultYear = []
@ -145,7 +140,14 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
resultIssues[n] = resultIssues[n].replace(' ','')
#print ( "Year: " + str(resultYear[n]) )
#print ( "Issues: " + str(resultIssues[n]) )
if resultName[n].lower() == str(ComicName).lower():
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub(' ', '', CleanResultName).lower()
#print ("CleanComicName: " + str(CleanComicName))
#print ("CleanResultName: " + str(CleanResultName))
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
#this has been seen in a few instances already, so trying to adjust.
#when the series year is 2011, in gcd it might be 2012 due to publication
@ -182,7 +184,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10}
basnumbs = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':11,'twelve':12}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
@ -194,7 +196,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
#print ("comicname-reVISED:" + str(ComicNm))
return GCDScraper(ComicNm, ComicYear, Total, ComicID)
break
if ComicName.startswith('The '):
if ComicName.lower().startswith('the '):
ComicName = ComicName[4:]
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if ':' in ComicName:
@ -207,112 +209,168 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
ComicName = ComicName.replace('and', '&')
return GCDScraper(ComicName, ComicYear, Total, ComicID)
return 'No Match'
#vari_loop = 0
return GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=ComicID, TotalIssues=TotalIssues, issvariation=issvariation, resultPublished=resultPublished)
def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariation, resultPublished):
gcdinfo = {}
gcdchoice = []
gcount = 0
i = 0
if vari_loop > 1:
resultPublished = "Unknown"
input2 = 'http://www.comics.org' + str(resultURL) + 'details/'
resp = urllib2.urlopen ( input2 )
soup = BeautifulSoup ( resp )
if vari_loop == 99: vari_loop = 1
#for newer comics, on-sale date has complete date...
#for older comics, pub.date is to be used
while (i <= vari_loop):
if vari_loop > 0:
try:
boong = comseries['comseries'][i]
except IndexError:
break
resultURL = boong['comseriesID']
ComicID = boong['comicid']
TotalIssues+= int(boong['comseriesIssues'])
else:
resultURL = resultURL
# if we're here - it means it's a mismatched name.
# let's pull down the publication date as it'll be blank otherwise
inputMIS = 'http://www.comics.org' + str(resultURL)
resp = urllib2.urlopen ( inputMIS )
soup = BeautifulSoup ( resp )
type = soup.find(text=' On-sale date ')
if type:
#print ("on-sale date detected....adjusting")
datetype = "on-sale"
else:
#print ("pub date defaulting")
datetype = "pub"
cnt1 = len(soup.findAll("tr", {"class" : "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class" : "row_even_True"}))
cnt = int(cnt1 + cnt2)
#print (str(cnt) + " Issues in Total (this may be wrong due to alternate prints, etc")
n_odd = -1
n_even = -1
n = 0
PI = "1.00"
altcount = 0
while ( n < cnt ):
if n%2==0:
n_odd+=1
parsed = soup.findAll("tr", {"class" : "row_even_False"})[n_odd]
ntype = "odd"
else:
n_even+=1
ntype = "even"
parsed = soup.findAll("tr", {"class" : "row_even_True"})[n_even]
subtxt3 = parsed.find("a")
ParseIssue = subtxt3.findNext(text=True)
if ',' in ParseIssue: ParseIssue = re.sub("\,", "", ParseIssue)
isslen = ParseIssue.find(' ')
#if 'isslen' exists, it means that it's an alternative cover.
#however, if ONLY alternate covers exist of an issue it won't work.
#let's use the FIRST record, and ignore all other covers for the given issue.
isschk = ParseIssue[:isslen]
#check if decimal exists or not, and store decimal results
if '.' in isschk:
isschk_find = isschk.find('.')
isschk_b4dec = isschk[:isschk_find]
isschk_decval = isschk[isschk_find+1:]
else:
isschk_decval = ".00"
if isslen > 0:
isschk = ParseIssue[:isslen]
isschk2 = str(isschk) + isschk_decval
ParseIssue = str(isschk2)
#print ("Alt.cover found = " + str(isschk2))
if str(PI) == str(isschk2):
if altcount == 0:
#this handles the first occurance.. print ("Fist occurance detected - " + str(isschk))
ParseIssue = str(isschk2)
PI = str(isschk2)
altcount = 1
else:
#print ("Using only first record for issue - ignoring further alternate matches")
ParseIssue = "this is wrong"
altcount+=1
parsed = soup.find("div", {"id" : "series_data"})
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
resultPublished = subtxt3.findNext(text=True).rstrip()
#print ("pubdate:" + str(resultPublished))
coverst = soup.find("div", {"id" : "series_cover"})
if coverst < 0:
gcdcover = "None"
else:
altcount = 1
ParseIssue = str(isschk) + isschk_decval
subcoverst = coverst('img',src=True)[0]
gcdcover = subcoverst['src']
#print ("resultURL:" + str(resultURL))
#print ("comicID:" + str(ComicID))
input2 = 'http://www.comics.org' + str(resultURL) + 'details/'
resp = urllib2.urlopen ( input2 )
soup = BeautifulSoup ( resp )
#for newer comics, on-sale date has complete date...
#for older comics, pub.date is to be used
type = soup.find(text=' On-sale date ')
if type:
#print ("on-sale date detected....adjusting")
datetype = "on-sale"
else:
ParseIssue = ParseIssue + isschk_decval
#print ("no alt.cover detected for - " + str(ParseIssue))
altcount = 1
if (altcount == 1):
# in order to get the compare right, let's decimialize the string to '.00'.
gcdinfo['ComicIssue'] = ParseIssue
#print ( "Issue : " + str(ParseIssue) )
#^^ will retrieve issue
#if datetype == "on-sale":
subtxt1 = parsed('td')[2]
ParseDate = subtxt1.findNext(text=True)
pdlen = len(ParseDate)
#print ("Parsed Date length: " + str(pdlen))
if len(ParseDate) < 7:
subtxt1 = parsed.find("td")
ParseDate = subtxt1.findNext(text=True)
if ParseDate == ' ':
ParseDate = "0000-00-00"
ParseDate = ParseDate.replace(' ','')
gcdinfo['ComicDate'] = ParseDate
#print ( "Date : " + str(ParseDate) )
#^^ will retrieve date #
#print ("pub date defaulting")
datetype = "pub"
cnt1 = len(soup.findAll("tr", {"class" : "row_even_False"}))
cnt2 = len(soup.findAll("tr", {"class" : "row_even_True"}))
gcdchoice.append({
'GCDid': ComicID,
'GCDIssue': gcdinfo['ComicIssue'],
'GCDDate': gcdinfo['ComicDate']
})
cnt = int(cnt1 + cnt2)
gcdinfo['gcdchoice'] = gcdchoice
PI = ParseIssue
#print (str(cnt) + " Issues in Total (this may be wrong due to alternate prints, etc")
n_odd = -1
n_even = -1
n = 0
PI = "1.00"
altcount = 0
while ( n < cnt ):
if n%2==0:
n_odd+=1
parsed = soup.findAll("tr", {"class" : "row_even_False"})[n_odd]
ntype = "odd"
else:
n_even+=1
ntype = "even"
parsed = soup.findAll("tr", {"class" : "row_even_True"})[n_even]
subtxt3 = parsed.find("a")
ParseIssue = subtxt3.findNext(text=True)
fid = parsed('a',href=True)[0]
resultGID = fid['href']
resultID = resultGID[7:-1]
#print ( "ID: " + str(resultID) )
if ',' in ParseIssue: ParseIssue = re.sub("\,", "", ParseIssue)
isslen = ParseIssue.find(' ')
#if 'isslen' exists, it means that it's an alternative cover.
#however, if ONLY alternate covers exist of an issue it won't work.
#let's use the FIRST record, and ignore all other covers for the given issue.
isschk = ParseIssue[:isslen]
#check if decimal exists or not, and store decimal results
if '.' in isschk:
isschk_find = isschk.find('.')
isschk_b4dec = isschk[:isschk_find]
isschk_decval = isschk[isschk_find+1:]
else:
isschk_decval = ".00"
if isslen > 0:
isschk = ParseIssue[:isslen]
isschk2 = str(isschk) + isschk_decval
if 'a' in isschk or 'b' in isschk or 'c' in isschk:
isschk2 = ParseIssue[:isslen-1] + isschk_decval
#altcount == 2
ParseIssue = str(isschk2)
#print ("Alt.cover found = " + str(isschk2))
if str(PI) == str(isschk2):
if altcount == 0:
#this handles the first occurance.. print ("Fist occurance detected - " + str(isschk))
ParseIssue = str(isschk2)
PI = str(isschk2)
altcount = 1
else:
#print ("Using only first record for issue - ignoring further alternate matches")
ParseIssue = "this is wrong"
altcount+=1
else:
altcount = 1
ParseIssue = str(isschk) + isschk_decval
else:
ParseIssue = ParseIssue + isschk_decval
#print ("no alt.cover detected for - " + str(ParseIssue))
altcount = 1
if (altcount == 1):
# in order to get the compare right, let's decimialize the string to '.00'.
gcdinfo['ComicIssue'] = ParseIssue
#^^ will retrieve issue
#if datetype == "on-sale":
subtxt1 = parsed('td')[2]
ParseDate = subtxt1.findNext(text=True)
pdlen = len(ParseDate)
#print ("Parsed Date length: " + str(pdlen))
if len(ParseDate) < 7:
subtxt1 = parsed.find("td")
ParseDate = subtxt1.findNext(text=True)
if ParseDate == ' ':
ParseDate = "0000-00-00"
#ParseDate = ParseDate.replace('?','')
ParseDate = ParseDate.replace(' ','')
gcdinfo['ComicDate'] = ParseDate
#^^ will retrieve date #
if ComicID[:1] == "G":
gcdchoice.append({
'GCDid': ComicID,
'IssueID': resultID,
'GCDIssue': gcdinfo['ComicIssue'],
'GCDDate': gcdinfo['ComicDate']
})
gcount+=1
else:
gcdchoice.append({
'GCDid': ComicID,
'GCDIssue': gcdinfo['ComicIssue'],
'GCDDate': gcdinfo['ComicDate']
})
gcdinfo['gcdchoice'] = gcdchoice
PI = ParseIssue
#else:
# -- this needs a rework --
# if issue only has alternative covers on comics.org, it won't match
@ -322,8 +380,71 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID):
# altcount+=1
# print ("alternate issue - ignoring")
#altcount = 0
n+=1
n+=1
i+=1
gcdinfo['gcdvariation'] = issvariation
gcdinfo['totalissues'] = TotalIssues
if ComicID[:1] == "G":
gcdinfo['totalissues'] = gcount
else:
gcdinfo['totalissues'] = TotalIssues
gcdinfo['ComicImage'] = gcdcover
gcdinfo['resultPublished'] = resultPublished
#print ("gcdvariation: " + str(gcdinfo['gcdvariation']))
return gcdinfo
## -- end (GCD) -- ##
def GCDAdd(gcdcomicid):
serieschoice = []
series = {}
for gcdid in gcdcomicid:
#print ("gcdid:" + str(gcdid))
input2 = 'http://www.comics.org/series/' + str(gcdid)
resp = urllib2.urlopen ( input2 )
soup = BeautifulSoup ( resp )
parsen = soup.find("span", {"id" : "series_name"})
subpar = parsen('a')[0]
resultName = subpar.findNext(text=True)
#print ("ComicName: " + resultName)
#covers-start
coverst = soup.find("div", {"id" : "series_cover"})
if coverst < 0:
gcdcover = "None"
else:
subcoverst = coverst('img',src=True)[0]
gcdcover = subcoverst['src']
#print ("Cover: " + str(gcdcover))
#covers end
#publisher start
pubst = soup.find("div", {"class" : "item_data"})
subpubst = pubst('a')[0]
publisher = subpubst.findNext(text=True)
#print ("Publisher: " + str(publisher))
#publisher end
parsed = soup.find("div", {"id" : "series_data"})
#print ("parse:" + str(parsed))
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
pubdate = subtxt3.findNext(text=True).rstrip()
#print ("pubdate:" + str(pubdate))
subtxt4 = parsed.find("dd", {"id" : "issues_published"})
noiss = subtxt4.findNext(text=True)
lenwho = len(noiss)
lent = noiss.find(' ',2)
lenf = noiss.find('(')
stringit = noiss[lenf:lenwho]
stringout = noiss[:lent]
noissues = stringout.rstrip(' \t\r\n\0')
numbering = stringit.rstrip(' \t\r\n\0')
#print ("noissues:" + str(noissues))
#print ("numbering:" + str(numbering))
serieschoice.append({
"ComicID": gcdid,
"ComicName": resultName,
"ComicYear" : pubdate,
"ComicIssues" : noissues,
"ComicPublisher" : publisher,
"ComicCover" : gcdcover
})
series['serieschoice'] = serieschoice
return series

View File

@ -58,14 +58,14 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate):
nzbpr = nzbp-1
findit = 'no'
#fix for issue dates between Dec/Jan
#fix for issue dates between Nov-Dec/Jan
IssDt = str(IssueDate)[5:7]
if IssDt == "12":
if IssDt == "12" or IssDt == "11":
ComicYearFix = str(int(ComicYear) + 1)
IssDateFix = "yes"
else:
IssDateFix = "no"
while (nzbpr >= 0 ):
if nzbprovider[nzbpr] == 'experimental':
#this is for experimental
@ -75,7 +75,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate):
break
else:
if IssDateFix == "yes":
logger.info(u"Hang on - this issue was published between Dec/Jan of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
logger.info(u"Hang on - this issue was published between /NovDec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYearFix, SeriesYear, nzbprov, nzbpr, IssDateFix)
if findit == 'yes':
break
@ -93,12 +93,13 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate):
break
else:
if IssDateFix == "yes":
logger.info(u"Hang on - this issue was published between Dec/Jan of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
logger.info(u"Hang on - this issue was published between Nov/Dec of " + str(ComicYear) + "...adjusting to " + str(ComicYearFix) + " and retrying...")
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYearFix, SeriesYear, nzbprov, nzbpr, IssDateFix)
if findit == 'yes':
break
nzbpr-=1
# ----
elif nzbprovider[nzbpr] == 'dognzb':
@ -117,6 +118,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate):
break
nzbpr-=1
# ----
return findit
@ -413,12 +415,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
if nzbprov == 'experimental':
filenzb = filenamenzb[6:]
if nzbprov == 'dognzb':
filenzb == str(filenamenzb)
filenzb = str(filenamenzb)
savefile = str(tmppath) + "/" + str(filenzb) + ".nzb"
else:
#let's make the dir.
try:
os.makedirs(str(mylar.CACHE_DIR))
os.makedirs(str(mylar.CACHE_DIR))
logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
savefile = str(mylar.CACHE_DIR) + "/" + str(filenzb) + ".nzb"
@ -619,3 +621,25 @@ def searchforissue(issueid=None, new=False):
else:
pass
#print ("not found!")
def searchIssueIDList(issuelist):
myDB = db.DBConnection()
for issueid in issuelist:
issue = myDB.action('SELECT * from issues WHERE IssueID=?', [issueid]).fetchone()
comic = myDB.action('SELECT * from comics WHERE ComicID=?', [issue['ComicID']]).fetchone()
print ("Checking for issue: " + str(issue['Issue_Number']))
foundNZB = "none"
SeriesYear = comic['ComicYear']
if issue['IssueDate'] == None:
ComicYear = comic['ComicYear']
else:
ComicYear = str(issue['IssueDate'])[:4]
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
foundNZB = search_init(comic['ComicName'], issue['Issue_Number'], str(ComicYear), comic['ComicYear'], issue['IssueDate'])
if foundNZB == "yes":
#print ("found!")
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'])
else:
pass
#print ("not found!")

View File

@ -41,28 +41,38 @@ def dbUpdate():
def latest_update(ComicID, LatestIssue, LatestDate):
# here we add to comics.latest
myDB = db.DBConnection()
controlValueDict = {"ComicID": ComicID}
newValueDict = {"LatestIssue": LatestIssue,
"LatestDate": LatestDate}
myDB.upsert("comics", newValueDict, controlValueDict)
latestCTRLValueDict = {"ComicID": ComicID}
newlatestDict = {"LatestIssue": str(LatestIssue),
"LatestDate": str(LatestDate)}
myDB.upsert("comics", newlatestDict, latestCTRLValueDict)
def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate):
# here we add to upcoming table...
myDB = db.DBConnection()
controlValue = {"ComicID": ComicID}
newValue = {"ComicName": ComicName,
"IssueNumber": IssueNumber,
"IssueDate": IssueDate}
newValue = {"ComicName": str(ComicName),
"IssueNumber": str(IssueNumber),
"IssueDate": str(IssueDate)}
if mylar.AUTOWANT_UPCOMING:
newValue = {"STATUS": "Wanted"}
newValue['Status'] = "Wanted"
else:
newValue = {"STATUS": "Skipped"}
newValue['Status'] = "Skipped"
myDB.upsert("upcoming", newValue, controlValue)
issuechk = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone()
if issuechk is None:
pass
#print ("not released yet...")
else:
control = {"IssueID": issuechk['IssueID']}
if mylar.AUTOWANT_UPCOMING: values = {"Status": "Wanted"}
else: values = {"Status": "Skipped"}
myDB.upsert("issues", values, control)
def weekly_update(ComicName):
# here we update status of weekly table...
myDB = db.DBConnection()
controlValue = { "COMIC": ComicName}
controlValue = { "COMIC": str(ComicName)}
if mylar.AUTOWANT_UPCOMING:
newValue = {"STATUS": "Wanted"}
else:
@ -71,7 +81,7 @@ def weekly_update(ComicName):
def newpullcheck():
# When adding a new comic, let's check for new issues on this week's pullist and update.
weeklypull.pullitcheck()
mylar.weeklypull.pullitcheck()
return
def no_searchresults(ComicID):

View File

@ -62,6 +62,19 @@ class WebInterface(object):
myDB = db.DBConnection()
comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
issues = myDB.select('SELECT * from issues WHERE ComicID=? order by Int_IssueNumber DESC', [ComicID])
# make sure comic dir exists..
comlocation = comic['ComicLocation']
if os.path.isdir(str(comlocation)): pass
#logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
else:
print ("Directory doesn't exist!")
try:
os.makedirs(str(comlocation))
logger.info(u"No directory found - So I created one at: " + str(comlocation))
except OSError.e:
if e.errno != errno.EEXIST:
raise
if comic is None:
raise cherrypy.HTTPRedirect("home")
comicConfig = {
@ -85,18 +98,68 @@ class WebInterface(object):
searchresults = mb.findComic(name, mode, issue=None)
elif type == 'comic' and mode == 'want':
searchresults = mb.findComic(name, mode, issue)
#else:
#searchresults = mb.findRelease(name)
searchresults = sorted(searchresults, key=itemgetter('comicyear','issues'), reverse=True)
#print ("Results: " + str(searchresults))
return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type)
searchit.exposed = True
def addComic(self, comicid):
threading.Thread(target=importer.addComictoDB, args=[comicid]).start()
def addComic(self, comicid, comicname=None, comicyear=None, comicissues=None):
myDB = db.DBConnection()
sresults = []
mismatch = "no"
#here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc).
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
if CV_EXcomicid is None: pass
else:
if CV_EXcomicid['variloop'] == '99':
logger.info(u"mismatched name...autocorrecting to correct GID and auto-adding.")
mismatch = "yes"
if CV_EXcomicid['NewComicID'] == 'none':
logger.info(u"multi-volume series detected")
testspx = CV_EXcomicid['GComicID'].split('/')
for exc in testspx:
fakeit = parseit.GCDAdd(testspx)
howmany = int(CV_EXcomicid['variloop'])
t = 0
while (t <= howmany):
try:
sres = fakeit['serieschoice'][t]
except IndexError:
break
sresults.append({
'ComicID' : sres['ComicID'],
'ComicName' : sres['ComicName'],
'ComicYear' : sres['ComicYear'],
'ComicIssues' : sres['ComicIssues'],
'ComicPublisher' : sres['ComicPublisher'],
'ComicCover' : sres['ComicCover']
})
t+=1
#searchfix(-1).html is for misnamed comics and wrong years.
#searchfix-2.html is for comics that span multiple volumes.
return serve_template(templatename="searchfix-2.html", title="In-Depth Results", sresults=sresults)
threading.Thread(target=importer.addComictoDB, args=[comicid,mismatch]).start()
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % comicid)
addComic.exposed = True
def GCDaddComic(self, comicid, comicname=None, comicyear=None, comicissues=None, comiccover=None, comicpublisher=None):
#since we already know most of the info, let's add it to the db so we can reference it later.
myDB = db.DBConnection()
gcomicid = "G" + str(comicid)
comicyear_len = comicyear.find(' ', 2)
comyear = comicyear[comicyear_len+1:comicyear_len+5]
controlValueDict = { 'ComicID': gcomicid }
newValueDict = {'ComicName': comicname,
'ComicYear': comyear,
'ComicPublished': comicyear,
'ComicPublisher': comicpublisher,
'ComicImage': comiccover,
'Total' : comicissues }
myDB.upsert("comics", newValueDict, controlValueDict)
threading.Thread(target=importer.GCDimport, args=[gcomicid]).start()
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % comicid)
GCDaddComic.exposed = True
def pauseArtist(self, ComicID):
logger.info(u"Pausing comic: " + ComicID)
myDB = db.DBConnection()
@ -127,7 +190,15 @@ class WebInterface(object):
deleteArtist.exposed = True
def refreshArtist(self, ComicID):
importer.addComictoDB(ComicID)
myDB = db.DBConnection()
mismatch = "no"
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [ComicID]).fetchone()
if CV_EXcomicid is None: pass
else:
if CV_EXcomicid['variloop'] == '99':
mismatch = "yes"
if ComicID[:1] == "G": threading.Thread(target=importer.GCDimport, args=[ComicID]).start()
else: threading.Thread(target=importer.addComictoDB, args=[ComicID,mismatch]).start()
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
refreshArtist.exposed=True
@ -139,49 +210,27 @@ class WebInterface(object):
#raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" & ComicID)
editIssue.exposed=True
def markissues(self, ComicID=None, action=None, **args):
def markissues(self, action=None, **args):
myDB = db.DBConnection()
issuesToAdd = []
if action == 'WantedNew':
newaction = 'Wanted'
else:
newaction = action
for IssueID in args:
if IssueID is None: break
#print("IssueID:" + IssueID)
mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone()
miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
controlValueDict = {"IssueID": IssueID}
newValueDict = {"Status": newaction}
myDB.upsert("issues", newValueDict, controlValueDict)
if action == 'Skipped': pass
elif action == 'Wanted':
foundcoms = search.search_init(mi['ComicName'], mi['Issue_Number'], mi['IssueDate'][:4], miyr['ComicYear'], mi['IssueDate'])
#searcher.searchforissue(mbid, new=False)
elif action == 'WantedNew':
foundcoms = search.search_init(mi['ComicName'], mi['Issue_Number'], mi['IssueDate'][:4], miyr['ComicYear'], mi['IssueDate'])
#searcher.searchforissue(mbid, new=True)
if foundcoms == "yes":
logger.info(u"Found " + mi['ComicName'] + " issue: " + mi['Issue_Number'] + " ! Marking as Snatched...")
# file check to see if issue exists and update 'have' count
if IssueID is not None:
ComicID = mi['ComicID']
#print ("ComicID: " + str(ComicID))
comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
controlValueDict = {'IssueID': IssueID}
newValueDict = {'Status': 'Snatched'}
myDB.upsert("issues", newValueDict, controlValueDict)
snatchedupdate = {"IssueID": IssueID}
newsnatchValues = {"ComicName": mi['ComicName'],
"ComicID": ComicID,
"Issue_Number": mi['Issue_Number'],
"DateAdded": helpers.today(),
"Status": "Snatched"
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
else:
logger.info(u"Couldn't find " + mi['ComicName'] + " issue: " + mi['Issue_Number'] + " ! Status still wanted...")
mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone()
miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
controlValueDict = {"IssueID": IssueID}
newValueDict = {"Status": newaction}
myDB.upsert("issues", newValueDict, controlValueDict)
if action == 'Wanted':
issuesToAdd.append(IssueID)
if len(issuesToAdd) > 0:
logger.debug("Marking issues: %s" % issuesToAdd)
threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
if ComicID:
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
else:
@ -197,7 +246,6 @@ class WebInterface(object):
myDB = db.DBConnection()
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
if ComicID is None and mode == 'series':
#print (ComicName)
issue = None
raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'series'))
elif ComicID is None and mode == 'pullseries':
@ -233,12 +281,9 @@ class WebInterface(object):
miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [ComicID]).fetchone()
SeriesYear = miyr['ComicYear']
foundcom = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, issues['IssueDate'])
#print ("foundcom:" + str(foundcom))
if foundcom == "yes":
# file check to see if issue exists and update 'have' count
if IssueID is not None:
#print ("ComicID:" + str(ComicID))
#print ("IssueID:" + str(IssueID))
return updater.foundsearch(ComicID, IssueID)
if ComicID:
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
@ -262,7 +307,6 @@ class WebInterface(object):
if popit:
weeklyresults = myDB.select("SELECT * from weekly")
pulldate = myDB.action("SELECT * from weekly").fetchone()
#imgstuff = parseit.PW()
if pulldate is None:
raise cherrypy.HTTPRedirect("home")
else:
@ -292,8 +336,23 @@ class WebInterface(object):
upcoming = myDB.select("SELECT * from upcoming WHERE IssueDate > date('now') order by IssueDate DESC")
issues = myDB.select("SELECT * from issues WHERE Status='Wanted'")
#let's move any items from the upcoming table into the wanted table if the date has already passed.
#mvupcome = myDB.select("SELECT * from upcoming WHERE IssueDate < date('now') order by IssueDate DESC")
#mvcontroldict = {"ComicID": mvupcome['ComicID']}
#gather the list...
mvupcome = myDB.select("SELECT * from upcoming WHERE IssueDate < date('now') order by IssueDate DESC")
#get the issue ID's
for mvup in mvupcome:
myissue = myDB.select("SELECT * FROM issues WHERE Issue_Number=?", [mvup['IssueNumber']])
if myissue is None: pass
else:
print ("ComicName: " + str(myissue['ComicName']))
print ("Issue number : " + str(myissue['Issue_Number']) )
mvcontroldict = {"IssueID": myissue['IssueID']}
mvvalues = {"ComicID": mvupcome['ComicID'],
"Status": "Wanted"}
myDB.upsert("wanted", mvvalues, mvcontroldict)
return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues)
upcoming.exposed = True

View File

@ -303,6 +303,8 @@ def pullit():
pullitcheck()
def pullitcheck():
myDB = db.DBConnection()
not_t = ['TP',
'NA',
'HC',
@ -360,6 +362,7 @@ def pullitcheck():
ki = []
kc = []
otot = 0
#print ("You are watching for: " + str(w) + " comics")
#print ("----------THIS WEEK'S PUBLISHED COMICS------------")
if w > 0:
@ -367,57 +370,55 @@ def pullitcheck():
lines[cnt] = str(lines[cnt]).upper()
llen[cnt] = str(llen[cnt])
#print ("looking for : " + str(lines[cnt]))
cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
while True:
row = cur.fetchone()
#print (row)
if row == None:
weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
#cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
for week in weekly:
if week == None:
break
for nono in not_t:
if nono in row[1]:
if nono in week['PUBLISHER']:
#print ("nono present")
break
for nothere in not_c:
if nothere in row[3]:
if nothere in week['EXTRA']:
#print ("nothere present")
break
else:
comicnm = row[2]
comicnm = week['COMIC']
#here's the tricky part, ie. BATMAN will match on
#every batman comic, not exact
#print ("comparing" + str(comicnm) + "..to.." + str(unlines[cnt]).upper())
if str(comicnm) == str(unlines[cnt]).upper():
#print ("matched on:")
pass
elif ("ANNUAL" in row[3]):
elif ("ANNUAL" in week['EXTRA']):
pass
#print ( row[3] + " matched on ANNUAL")
else:
#print ( row[2] + " not an EXACT match...")
break
break
if "WOLVERINE AND X-MEN" in str(comicnm):
comicnm = "WOLVERINE AND THE X-MEN"
#print ("changed wolvy")
if ("NA" not in row[1]) and ("HC" not in row[1]):
if ("COMBO PACK" not in row[3]) and ("2ND PTG" not in row[3]) and ("3RD PTG" not in row[3]):
if ("NA" not in week['ISSUE']) and ("HC" not in week['ISSUE']):
if ("COMBO PACK" not in week['EXTRA']) and ("2ND PTG" not in week['EXTRA']) and ("3RD PTG" not in week['EXTRA']):
otot+=1
dontadd = "no"
if dontadd == "no":
#print (row[0], row[1], row[2])
tot+=1
kp.append(row[0])
ki.append(row[1])
kc.append(comicnm)
if ("ANNUAL" in row[3]):
#kp.append(row[0])
#ki.append(row[1])
#kc.append(comicnm)
if ("ANNUAL" in week['EXTRA']):
watchfndextra.append("annual")
else:
watchfndextra.append("none")
watchfnd.append(comicnm)
watchfndiss.append(row[1])
watchfndiss.append(week['ISSUE'])
ComicID = comicid[cnt]
ComicIssue = str(watchfndiss[tot -1] + ".00")
ComicDate = str(row[4])
ComicDate = str(week['SHIPDATE'])
ComicName = str(unlines[cnt])
#print ("added: " + str(watchfnd[tot -1]) + " ISSUE: " + str(watchfndiss[tot -1]))
# here we add to comics.latest
@ -435,5 +436,5 @@ def pullitcheck():
#print ("However I've already grabbed " + str(btotal) )
#print ("I need to get " + str(tot) + " comic(s)!" )
con.close()
#con.close()
return