mirror of
https://github.com/evilhero/mylar
synced 2024-12-24 00:32:47 +00:00
FIX: Weeklypull list fix for limited series not being marked as Wanted, FIX: (#256) If No Series Year is provided, defaults to 0000 and causes error - uses alternate Series Year if this is the case
This commit is contained in:
parent
8eeaa4e2a9
commit
e7c05c5a73
5 changed files with 23 additions and 7 deletions
|
@ -89,7 +89,10 @@ def GetComicInfo(comicid,dom):
|
|||
#that the parser finds with name tagName:
|
||||
comic['ComicName'] = dom.getElementsByTagName('name')[trackcnt].firstChild.wholeText
|
||||
comic['ComicName'] = comic['ComicName'].rstrip()
|
||||
comic['ComicYear'] = dom.getElementsByTagName('start_year')[0].firstChild.wholeText
|
||||
try:
|
||||
comic['ComicYear'] = dom.getElementsByTagName('start_year')[0].firstChild.wholeText
|
||||
except:
|
||||
comic['ComicYear'] = '0000'
|
||||
comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
|
||||
#the description field actually holds the Volume# - so let's grab it
|
||||
try:
|
||||
|
|
|
@ -115,6 +115,12 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] )
|
||||
# print ("Series Published" + parseit.resultPublished)
|
||||
|
||||
#if the SeriesYear returned by CV is blank or none (0000), let's use the gcd one.
|
||||
if comic['ComicYear'] is None or comic['ComicYear'] == '0000':
|
||||
SeriesYear = gcdinfo['SeriesYear']
|
||||
else:
|
||||
SeriesYear = comic['ComicYear']
|
||||
|
||||
#comic book location on machine
|
||||
# setup default location here
|
||||
|
||||
|
@ -134,7 +140,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
|
||||
series = comicdir
|
||||
publisher = comic['ComicPublisher']
|
||||
year = comic['ComicYear']
|
||||
year = SeriesYear
|
||||
|
||||
#do work to generate folder path
|
||||
|
||||
|
@ -151,7 +157,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
#print helpers.replace_all(mylar.FOLDER_FORMAT, values)
|
||||
|
||||
if mylar.FOLDER_FORMAT == '':
|
||||
comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
|
||||
comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + SeriesYear + ")"
|
||||
else:
|
||||
comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values)
|
||||
|
||||
|
@ -216,7 +222,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
|
|||
controlValueDict = {"ComicID": comicid}
|
||||
newValueDict = {"ComicName": comic['ComicName'],
|
||||
"ComicSortName": sortname,
|
||||
"ComicYear": comic['ComicYear'],
|
||||
"ComicYear": SeriesYear,
|
||||
"ComicImage": ComicImage,
|
||||
"Total": comicIssues,
|
||||
"ComicVersion": comicVol,
|
||||
|
|
|
@ -51,7 +51,7 @@ def findComic(name, mode, issue, limityear=None):
|
|||
if any((c in chars) for c in name):
|
||||
name = '"'+name+'"'
|
||||
|
||||
print ("limityear: " + str(limityear))
|
||||
#print ("limityear: " + str(limityear))
|
||||
if limityear is None: limityear = 'None'
|
||||
|
||||
comicquery=name.replace(" ", "%20")
|
||||
|
|
|
@ -49,7 +49,7 @@ def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
|
|||
|
||||
cnt = int(cnt1 + cnt2)
|
||||
|
||||
print (str(cnt) + " results")
|
||||
#print (str(cnt) + " results")
|
||||
|
||||
resultName = []
|
||||
resultID = []
|
||||
|
@ -206,6 +206,12 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
|
|||
except UnicodeDecodeError:
|
||||
logger.info("not working...aborting. Tell Evilhero.")
|
||||
return
|
||||
#If CV doesn't have the Series Year (Stupid)...Let's store the Comics.org stated year just in case.
|
||||
pyearit = soup.find("div", {"class" : "item_data"})
|
||||
pyeartxt = pyearit.find(text=re.compile(r"Series"))
|
||||
pyearst = pyeartxt.index('Series')
|
||||
ParseYear = pyeartxt[int(pyearst)-5:int(pyearst)]
|
||||
|
||||
parsed = soup.find("div", {"id" : "series_data"})
|
||||
#recent structure changes - need to adjust now
|
||||
subtxt3 = parsed.find("dd", {"id" : "publication_dates"})
|
||||
|
@ -423,6 +429,7 @@ def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariati
|
|||
gcdinfo['totalissues'] = TotalIssues
|
||||
gcdinfo['ComicImage'] = gcdcover
|
||||
gcdinfo['resultPublished'] = resultPublished
|
||||
gcdinfo['SeriesYear'] = ParseYear
|
||||
return gcdinfo
|
||||
## -- end (GCD) -- ##
|
||||
|
||||
|
|
|
@ -381,7 +381,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None):
|
|||
#print ("watchd: " + str(watchd))
|
||||
if watchd is None:
|
||||
break
|
||||
if 'Present' in watchd[4]:
|
||||
if 'Present' in watchd[4] or (helpers.now()[:4] in watchd[4]):
|
||||
# let's not even bother with comics that are in the Present.
|
||||
a_list.append(watchd[1])
|
||||
b_list.append(watchd[2])
|
||||
|
|
Loading…
Reference in a new issue