1
0
Fork 0
mirror of https://github.com/evilhero/mylar synced 2025-03-19 01:55:21 +00:00

FIX: (#1013) Story Arcs now have a Refresh option which will refresh the issue data from ComicVine. The Arc must be added via the search in order for the Refresh option to appear (using a cbl file won't allow for a refresh currently), FIX:(#1023) Filenames that have the wording of 'Vol.' as well as having an Issue Year present, or some filenames that have a vYEAR format could not be scanned in, FIX: When doing an Import a Directory, when on Windows OS and running with suppressed console, would receive bad file descriptor errors,

FIX: Unicode filenames will now accept 1/2, 1/4, 3/4 (all unicoded) within the actual filename when scanning (usually during manual post-processing), FIX: Blackhole method for nzb's was failing when trying to send to SABnzbd
This commit is contained in:
evilhero 2015-05-27 01:40:38 -04:00
parent 9a4a6b4bc5
commit 0c1e864099
12 changed files with 327 additions and 228 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

View file

@ -84,7 +84,7 @@
%if result['haveit'] == "No":
%if type == 'story_arc':
<td class="add" nowrap="nowrap"><a href="addStoryArc?arcid=${result['comicid']}&storyarcname=${result['name'] |u}&storyarcyear=${result['comicyear']}&storyarcpublisher=${result['publisher'] |u}&storyarcissues=${result['issues']}&arclist=${result['arclist']}&desc=${result['description']}&image=${result['comicimage']}"><span class="ui-icon ui-icon-plus"></span> Add this Story Arc</a></td>
<td class="add" nowrap="nowrap"><a href="addStoryArc?arcid=${result['comicid']}&cvarcid=${result['cvarcid']}&storyarcname=${result['name'] |u}&storyarcyear=${result['comicyear']}&storyarcpublisher=${result['publisher'] |u}&storyarcissues=${result['issues']}&arclist=${result['arclist']}&desc=${result['description']}&image=${result['comicimage']}"><span class="ui-icon ui-icon-plus"></span> Add this Story Arc</a></td>
%else:
<td class="add" nowrap="nowrap"><a href="addComic?comicid=${result['comicid']}&comicname=${result['name'] |u}&comicyear=${result['comicyear']}&comicpublisher=${result['publisher'] |u}&comicimage=${result['comicimage']}&comicissues=${result['issues']}&imported=${imported}&ogcname=${ogcname}&serinfo=${serinfo}"><span class="ui-icon ui-icon-plus"></span> Add this Comic</a></td>
%endif

View file

@ -97,6 +97,9 @@
<td id="have"><span title="${item['percent']}"></span>${css}<div style="width:${item['percent']}%"><span class="progressbar-front-text">${item['Have']}/${item['Total']}</span></div></td>
<td id="options">
<a title="Remove from Story Arc Watchlist" onclick="doAjaxCall('removefromreadlist?StoryArcID=${item['StoryArcID']}',$(this),'table')" data-success="Sucessfully removed ${item['StoryArc']} from list."><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
%if item['CV_ArcID']:
<a title="Refresh Series" onclick="doAjaxCall('addStoryArc_thread?arcid=${item['StoryArcID']}&cvarcid=${item['CV_ArcID']}&storyarcname=${item['StoryArc']}&arcrefresh=True',$(this),'table')" data-success="Now refreshing ${item['StoryArc']}."><img src="interfaces/default/images/refresh.png" height="25" width="25" /></a>
%endif
</td>
</tr>
%endfor

View file

@ -15,6 +15,9 @@
<a id="menu_link_delete" href="#">Clear File Cache</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ReadGetWanted?StoryArcID=${storyarcid}',$(this),'table')" data-success="Searching for Missing StoryArc Issues">Search for Missing</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ArcWatchlist?StoryArcID=${storyarcid}',$(this),'table')" data-success="Searching for matches on Watchlist">Search for Watchlist matches</a>
%if cvarcid:
<a id="menu_link_refresh" onclick="doAjaxCall('addStoryArc_thread?arcid=${storyarcid}&cvarcid=${cvarcid}&storyarcname=${storyarcname}&arcrefresh=True',$(this),'table')" data-success="Refreshed Story Arc">Refresh Story Arc</a>
%endif
</div>
</div>
</%def>

View file

@ -284,4 +284,3 @@ class FailedProcessor(object):
myDB.upsert("failed", Vals, ctrlVal)
logger.info(module + ' Successfully marked as Failed.')

View file

@ -146,8 +146,9 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
volrem = subit
vers4vol = volrem
break
elif subit.lower()[:3] == 'vol':
elif subit.lower()[:3] == 'vol' or subit.lower()[:4] == 'vol.':
tsubit = re.sub('vol', '', subit.lower())
tsubit = re.sub('vol.', '', subit.lower())
try:
if any([tsubit.isdigit(), len(tsubit) > 5]):
#if in format vol.2013 etc
@ -679,8 +680,8 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
#justthedigits = item[jtd_len:]
logger.fdebug('[FILECHECKER] final jtd_len to prune [' + str(jtd_len) + ']')
logger.fdebug('[FILECHECKER] before title removed from FILENAME [' + str(item) + ']')
logger.fdebug('[FILECHECKER] after title removed from FILENAME [' + str(item[jtd_len:]) + ']')
logger.fdebug('[FILECHECKER] before title removed from FILENAME [' + item + ']')
logger.fdebug('[FILECHECKER] after title removed from FILENAME [' + item[jtd_len:] + ']')
logger.fdebug('[FILECHECKER] creating just the digits using SUBNAME, pruning first [' + str(jtd_len) + '] chars from [' + subname + ']')
justthedigits_1 = re.sub('#', '', subname[jtd_len:]).strip()
@ -703,15 +704,15 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
if digitchk:
try:
#do the issue title check here
logger.fdebug('[FILECHECKER] Possible issue title is : ' + str(digitchk))
logger.fdebug('[FILECHECKER] Possible issue title is : ' + digitchk)
# see if it can float the digits
try:
st = digitchk.find('.')
logger.fdebug('st:' + str(st))
st_d = digitchk[:st]
logger.fdebug('st_d:' + str(st_d))
logger.fdebug('st_d:' + st_d)
st_e = digitchk[st +1:]
logger.fdebug('st_e:' + str(st_e))
logger.fdebug('st_e:' + st_e)
#x = int(float(st_d))
#logger.fdebug('x:' + str(x))
#validity check
@ -725,8 +726,13 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
if digitchk.startswith('.'):
pass
else:
if len(justthedigits_1) >= len(digitchk) and len(digitchk) > 3:
logger.fdebug('[FILECHECKER] Removing issue title.')
# account for series in the format of Series - Issue#
if digitchk.startswith('-') and digitchk[1] == ' ':
logger.fdebug('[FILECHECKER] Detected hyphen (-) as a separator. Removing for comparison.')
digitchk = digitchk[2:]
justthedigits_1 = re.sub('- ', '', justthedigits_1).strip()
elif len(justthedigits_1) >= len(digitchk) and len(digitchk) > 3:
logger.fdebug('[FILECHECKER][CATCH-1] Removing issue title.')
justthedigits_1 = re.sub(digitchk, '', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
@ -734,7 +740,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
issue_firstword = digitchk.split()[0]
splitit = subname.split()
splitst = len(splitit)
logger.fdebug('[FILECHECKER] splitit :' + str(splitit))
logger.fdebug('[FILECHECKER] splitit :' + splitit)
logger.fdebug('[FILECHECKER] splitst :' + str(len(splitit)))
orignzb = item
except:
@ -754,8 +760,8 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
issue_firstword = digitchk.split()[0]
splitit = subname.split()
splitst = len(splitit)
logger.info('[FILECHECKER] splitit :' + str(splitit))
logger.info('[FILECHECKER] splitst :' + str(len(splitit)))
logger.fdebug('[FILECHECKER] splitit :' + splitit)
logger.fdebug('[FILECHECKER] splitst :' + str(len(splitit)))
orignzb = item
except:
pass #(revert this back if above except doesn't work)
@ -844,12 +850,28 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
x = float(justthedigits)
#validity check
if x < 0:
logger.fdebug("I've encountered a negative issue #: " + str(justthedigits) + ". Trying to accomodate.")
logger.fdebug("I've encountered a negative issue #: " + justthedigits + ". Trying to accomodate.")
digitsvalid = "true"
else: raise ValueError
except ValueError, e:
logger.fdebug('Probably due to an incorrect match - I cannot determine the issue number from given issue #: ' + str(justthedigits))
if u'\xbd' in justthedigits:
justthedigits = re.sub(u'\xbd', '0.5', justthedigits).strip()
logger.fdebug('[FILECHECKER][UNICODE DETECTED] issue detected :' + u'\xbd')
digitsvalid = "true"
elif u'\xbc' in justthedigits:
justthedigits = re.sub(u'\xbc', '0.25', justthedigits).strip()
logger.fdebug('[FILECHECKER][UNICODE DETECTED] issue detected :' + u'\xbc')
digitsvalid = "true"
elif u'\xbe' in justthedigits:
justthedigits = re.sub(u'\xbe', '0.75', justthedigits).strip()
logger.fdebug('[FILECHECKER][UNICODE DETECTED] issue detected :' + u'\xbe')
digitsvalid = "true"
elif u'\u221e' in justthedigits:
#issnum = utf-8 will encode the infinity symbol without any help
logger.fdebug('[FILECHECKER][UNICODE DETECTED] issue detected :' + u'\u221e')
digitsvalid = "true"
else:
logger.fdebug('Probably due to an incorrect match - I cannot determine the issue number from given issue #: ' + justthedigits)
logger.fdebug('[FILECHECKER] final justthedigits [' + justthedigits + ']')
if digitsvalid == "false":

View file

@ -82,7 +82,7 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
"link": urlParse["href"],
"length": urlParse["length"],
"pubdate": feed.entries[countUp].updated})
countUp=countUp +1
countUp=countUp +1
logger.fdebug('keypair: ' + str(keyPair))

View file

@ -48,19 +48,14 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
comic_list = []
comiccnt = 0
extensions = ('cbr', 'cbz')
extensions = ('cbr','cbz')
for r, d, f in os.walk(dir):
#for directory in d[:]:
# if directory.startswith("."):
# d.remove(directory)
for files in f:
if any(files.lower().endswith('.' + x.lower()) for x in extensions):
comic = files
comicpath = os.path.join(r, files)
comicsize = os.path.getsize(comicpath)
print "Comic: " + comic
print "Comic Path: " + comicpath
print "Comic Size: " + str(comicsize)
logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes')
# We need the unicode path to use for logging, inserting into database
unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace')
@ -73,7 +68,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
comic_list.append(comic_dict)
logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now")
logger.info("comiclist: " + str(comic_list))
#logger.info("comiclist: " + str(comic_list))
myDB = db.DBConnection()
#let's load in the watchlist to see if we have any matches.
@ -136,38 +131,38 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
# datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
# #search for number as text, and change to numeric
# for numbs in basnumbs:
# #print ("numbs:" + str(numbs))
# #logger.fdebug("numbs:" + str(numbs))
# if numbs in ComicName.lower():
# numconv = basnumbs[numbs]
# #print ("numconv: " + str(numconv))
# #logger.fdebug("numconv: " + str(numconv))
for i in comic_list:
print i['ComicFilename']
#if mylar.IMP_METADATA:
#logger.info('metatagging checking enabled.')
#if read tags is enabled during import, check here.
#if i['ComicLocation'].endswith('.cbz'):
# logger.info('Attempting to read tags present in filename: ' + str(i['ComicLocation']))
# issueinfo = helpers.IssueDetails(i['ComicLocation'])
# if issueinfo is None:
# pass
# else:
# logger.info('Successfully retrieved some tags. Lets see what I can figure out.')
# comicname = issueinfo[0]['series']
# logger.fdebug('Series Name: ' + comicname)
# issue_number = issueinfo[0]['issue_number']
# logger.fdebug('Issue Number: ' + str(issue_number))
# issuetitle = issueinfo[0]['title']
# logger.fdebug('Issue Title: ' + issuetitle)
# issueyear = issueinfo[0]['year']
# logger.fdebug('Issue Year: ' + str(issueyear))
# # if used by ComicTagger, Notes field will have the IssueID.
# issuenotes = issueinfo[0]['notes']
# logger.fdebug('Notes: ' + issuenotes)
logger.fdebug('Analyzing : ' + i['ComicFilename'])
if mylar.IMP_METADATA:
logger.info('metatagging checking enabled.')
#if read tags is enabled during import, check here.
if i['ComicLocation'].endswith('.cbz'):
logger.info('Attempting to read tags present in filename: ' + i['ComicLocation'])
issueinfo = helpers.IssueDetails(i['ComicLocation'])
if issueinfo is None:
pass
else:
logger.info('Successfully retrieved some tags. Lets see what I can figure out.')
comicname = issueinfo[0]['series']
logger.fdebug('Series Name: ' + comicname)
issue_number = issueinfo[0]['issue_number']
logger.fdebug('Issue Number: ' + str(issue_number))
issuetitle = issueinfo[0]['title']
logger.fdebug('Issue Title: ' + issuetitle)
issueyear = issueinfo[0]['year']
logger.fdebug('Issue Year: ' + str(issueyear))
# if used by ComicTagger, Notes field will have the IssueID.
issuenotes = issueinfo[0]['notes']
logger.fdebug('Notes: ' + issuenotes)
else:
logger.info(i['ComicLocation'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags')
comfilename = i['ComicFilename']
comlocation = i['ComicLocation']
#let's clean up the filename for matching purposes
@ -294,16 +289,16 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
while (findcn < len(cnsplit)):
cname = cname + cs[findcn] + " "
findcn+=1
cname = cname[:len(cname) -1] # drop the end space...
print ("assuming name is : " + cname)
cname = cname[:len(cname)-1] # drop the end space...
logger.fdebug('assuming name is : ' + cname)
com_NAME = cname
print ("com_NAME : " + com_NAME)
logger.fdebug('com_NAME : ' + com_NAME)
yearmatch = "True"
else:
logger.fdebug('checking ' + m[cnt])
# we're assuming that the year is in brackets (and it should be damnit)
if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
print ("year detected: " + str(m[cnt]))
logger.fdebug('year detected: ' + str(m[cnt]))
ydetected = 'yes'
result_comyear = m[cnt]
elif m[cnt][:3].lower() in datelist:
@ -388,7 +383,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
scount+=1
#elif ':' in splitit[n] or '-' in splitit[n]:
# splitrep = splitit[n].replace('-', '')
# print ("non-character keyword...skipped on " + splitit[n])
# logger.fdebug("non-character keyword...skipped on " + splitit[n])
elif str(splitit[n]).lower().startswith('v'):
logger.fdebug("possible versioning..checking")
#we hit a versioning # - account for it
@ -460,11 +455,11 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
"comfilename": comfilename,
"comlocation": comlocation.decode(mylar.SYS_ENCODING)
})
logger.fdebug('import_by_ids: ' + str(import_by_comicids))
#logger.fdebug('import_by_ids: ' + str(import_by_comicids))
if len(watch_kchoice) > 0:
watchchoice['watchlist'] = watch_kchoice
print ("watchchoice: " + str(watchchoice))
#logger.fdebug("watchchoice: " + str(watchchoice))
logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.")
wat = 0
@ -472,25 +467,25 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
if watchfound > 0:
if mylar.IMP_MOVE:
logger.info("You checked off Move Files...so that's what I'm going to do")
logger.info('You checked off Move Files...so that\'s what I am going to do')
#check to see if Move Files is enabled.
#if not being moved, set the archive bit.
print("Moving files into appropriate directory")
while (wat < watchfound):
logger.fdebug('Moving files into appropriate directory')
while (wat < watchfound):
watch_the_list = watchchoice['watchlist'][wat]
watch_comlocation = watch_the_list['ComicLocation']
watch_comicid = watch_the_list['ComicID']
watch_comicname = watch_the_list['ComicName']
watch_comicyear = watch_the_list['ComicYear']
watch_comiciss = watch_the_list['ComicIssue']
print ("ComicLocation: " + str(watch_comlocation))
logger.fdebug('ComicLocation: ' + watch_comlocation)
orig_comlocation = watch_the_list['OriginalLocation']
orig_filename = watch_the_list['OriginalFilename']
print ("Orig. Location: " + str(orig_comlocation))
print ("Orig. Filename: " + str(orig_filename))
orig_filename = watch_the_list['OriginalFilename']
logger.fdebug('Orig. Location: ' + orig_comlocation)
logger.fdebug('Orig. Filename: ' + orig_filename)
#before moving check to see if Rename to Mylar structure is enabled.
if mylar.IMP_RENAME:
print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
logger.fdebug('Renaming files according to configuration details : ' + str(mylar.FILE_FORMAT))
renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
nfilename = renameit['nfilename']
@ -498,12 +493,12 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
if str(watch_comicid) not in comicids:
comicids.append(watch_comicid)
else:
print("Renaming files not enabled, keeping original filename(s)")
logger.fdebug('Renaming files not enabled, keeping original filename(s)')
dst_path = os.path.join(watch_comlocation, orig_filename)
#os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
#src = os.path.join(, str(nfilename + ext))
print ("I'm going to move " + str(orig_comlocation) + " to .." + str(dst_path))
logger.fdebug('I am going to move ' + orig_comlocation + ' to ' + dst_path)
try:
shutil.move(orig_comlocation, dst_path)
except (OSError, IOError):
@ -515,16 +510,16 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
watch_the_list = watchchoice['watchlist'][wat]
watch_comicid = watch_the_list['ComicID']
watch_issue = watch_the_list['ComicIssue']
print ("ComicID: " + str(watch_comicid))
print ("Issue#: " + str(watch_issue))
logger.fdebug('ComicID: ' + str(watch_comicid))
logger.fdebug('Issue#: ' + str(watch_issue))
issuechk = myDB.selectone("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone()
if issuechk is None:
print ("no matching issues for this comic#")
logger.fdebug('No matching issues for this comic#')
else:
print("...Existing status: " + str(issuechk['Status']))
logger.fdebug('...Existing status: ' + str(issuechk['Status']))
control = {"IssueID": issuechk['IssueID']}
values = {"Status": "Archived"}
print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ")
logger.fdebug('...changing status of ' + str(issuechk['Issue_Number']) + ' to Archived ')
myDB.upsert("issues", values, control)
if str(watch_comicid) not in comicids:
comicids.append(watch_comicid)
@ -533,14 +528,14 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
else:
c_upd = len(comicids)
c = 0
while (c < c_upd):
print ("Rescanning.. " + str(c))
updater.forceRescan(c)
while (c < c_upd ):
logger.fdebug('Rescanning.. ' + str(c))
updater.forceRescan(c)
if not len(import_by_comicids):
return "Completed"
if len(import_by_comicids) > 0:
import_comicids['comic_info'] = import_by_comicids
print ("import comicids: " + str(import_by_comicids))
logger.fdebug('import comicids: ' + str(import_by_comicids))
return import_comicids, len(import_by_comicids)
@ -596,5 +591,3 @@ def scanLibrary(scan=None, queue=None):
valreturn.append({"somevalue": 'self.ie',
"result": 'success'})
return queue.put(valreturn)
#raise cherrypy.HTTPRedirect("importResults")

View file

@ -80,11 +80,12 @@ def pullsearch(comicapi, comicquery, offset, explicit, type):
def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
#with mb_lock:
comiclist = []
#with mb_lock:
comicResults = None
comicLibrary = listLibrary()
comiclist = []
arcinfolist = []
chars = set('!?*')
if any((c in chars) for c in name):
name = '"' +name +'"'
@ -197,102 +198,37 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText
if xmlid is not None:
#respawn to the exact id for the story arc and count the # of issues present.
ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
cvapi_check()
try:
file = urllib2.urlopen(ARCPULL_URL)
except urllib2.HTTPError, err:
logger.error('err : ' + str(err))
logger.error('There was a major problem retrieving data from ComicVine - on their end.')
return
mylar.CVAPI_COUNT +=1
arcdata = file.read()
file.close()
arcdom = parseString(arcdata)
try:
logger.fdebug('story_arc ascension')
issuecount = len(arcdom.getElementsByTagName('issue'))
issuedom = arcdom.getElementsByTagName('issue')
isc = 0
arclist = ''
for isd in issuedom:
zeline = isd.getElementsByTagName('id')
isdlen = len(zeline)
isb = 0
while (isb < isdlen):
if isc == 0:
arclist = str(zeline[isb].firstChild.wholeText).strip()
else:
arclist += '|' + str(zeline[isb].firstChild.wholeText).strip()
isb+=1
isc+=1
except:
logger.fdebug('unable to retrive issue count - nullifying value.')
issuecount = 0
try:
firstid = None
arcyear = None
fid = len (arcdom.getElementsByTagName('id'))
fi = 0
while (fi < fid):
if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
logger.fdebug('hit it.')
firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
break # - dont' break out here as we want to gather ALL the issue ID's since it's here
fi+=1
logger.fdebug('firstid: ' + str(firstid))
if firstid is not None:
firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
logger.fdebug('success')
arcyear = cv.GetFirstIssue(firstid, firstdom)
except:
logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')
if (arcdom.getElementsByTagName('image')[0].childNodes[0].nodeValue) is None:
xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText
else:
xmlimage = "cache/blankcover.jpg"
try:
xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
except:
xmldesc = "None"
try:
xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
except:
xmldeck = "None"
if xmlid in comicLibrary:
haveit = comicLibrary[xmlid]
else:
haveit = "No"
comiclist.append({
'name': xmlTag,
'comicyear': arcyear,
'comicid': xmlid,
'url': xmlurl,
'issues': issuecount,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc,
'deck': xmldeck,
'arclist': arclist,
'haveit': haveit
})
logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist))
arcinfolist = storyarcinfo(xmlid)
comiclist.append({
'name': xmlTag,
'comicyear': arcinfolist['comicyear'],
'comicid': xmlid,
'cvarcid': xmlid,
'url': xmlurl,
'issues': arcinfolist['issues'],
'comicimage': arcinfolist['comicimage'],
'publisher': xmlpub,
'description': arcinfolist['description'],
'deck': arcinfolist['deck'],
'arclist': arcinfolist['arclist'],
'haveit': arcinfolist['haveit']
})
else:
comiclist.append({
'name': xmlTag,
'comicyear': arcyear,
'comicid': xmlid,
'url': xmlurl,
'issues': issuecount,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc,
'deck': xmldeck,
'arclist': arclist,
'haveit': haveit
})
logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist))
else:
xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
#here we can determine what called us, and either start gathering all issues or just limited ones.
@ -396,3 +332,111 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
countResults = countResults + 100
return comiclist, explicit
def storyarcinfo(xmlid):
comicLibrary = listLibrary()
arcinfo = {}
if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
comicapi = mylar.DEFAULT_CVAPI
else:
comicapi = mylar.COMICVINE_API
#respawn to the exact id for the story arc and count the # of issues present.
ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
cvapi_check()
try:
file = urllib2.urlopen(ARCPULL_URL)
except urllib2.HTTPError, err:
logger.error('err : ' + str(err))
logger.error('There was a major problem retrieving data from ComicVine - on their end.')
return
mylar.CVAPI_COUNT +=1
arcdata = file.read()
file.close()
arcdom = parseString(arcdata)
try:
logger.fdebug('story_arc ascension')
issuecount = len( arcdom.getElementsByTagName('issue') )
issuedom = arcdom.getElementsByTagName('issue')
isc = 0
arclist = ''
for isd in issuedom:
zeline = isd.getElementsByTagName('id')
isdlen = len( zeline )
isb = 0
while ( isb < isdlen):
if isc == 0:
arclist = str(zeline[isb].firstChild.wholeText).strip()
else:
arclist += '|' + str(zeline[isb].firstChild.wholeText).strip()
isb+=1
isc+=1
except:
logger.fdebug('unable to retrive issue count - nullifying value.')
issuecount = 0
try:
firstid = None
arcyear = None
fid = len ( arcdom.getElementsByTagName('id') )
fi = 0
while (fi < fid):
if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
logger.fdebug('hit it.')
firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
break # - dont' break out here as we want to gather ALL the issue ID's since it's here
fi+=1
logger.fdebug('firstid: ' + str(firstid))
if firstid is not None:
firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
logger.fdebug('success')
arcyear = cv.GetFirstIssue(firstid,firstdom)
except:
logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')
if (arcdom.getElementsByTagName('image')[0].childNodes[0].nodeValue) is None:
xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText
else:
xmlimage = "cache/blankcover.jpg"
try:
xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
except:
xmldesc = "None"
try:
xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
except:
xmldeck = "None"
if xmlid in comicLibrary:
haveit = comicLibrary[xmlid]
else:
haveit = "No"
arcinfo = {
#'name': xmlTag, #theese four are passed into it only when it's a new add
#'url': xmlurl, #needs to be modified for refreshing to work completely.
#'publisher': xmlpub,
'comicyear': arcyear,
'comicid': xmlid,
'issues': issuecount,
'comicimage': xmlimage,
'description': xmldesc,
'deck': xmldeck,
'arclist': arclist,
'haveit': haveit
}
return arcinfo

View file

@ -32,6 +32,7 @@ from xml.dom.minidom import parseString
import urllib2
import email.utils
import datetime
import shutil
from wsgiref.handlers import format_date_time
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None):
@ -1716,7 +1717,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if payload is None:
logger.info('Download URL: ' + str(down_url) + ' [VerifySSL:' + str(verify) + ']')
else:
logger.info('Download URL: ' + down_url + urllib.urlencode(payload) + ' [VerifySSL:' + str(verify) + ']')
logger.info('Download URL: ' + down_url + '?' + urllib.urlencode(payload) + ' [VerifySSL:' + str(verify) + ']')
import lib.requests as requests

View file

@ -993,11 +993,11 @@ def forceRescan(ComicID, archive=None, module=None):
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + reiss['Issue_Number'] + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
isslocation = tmpfc['ComicFilename']
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + str(isslocation))
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
logger.fdebug(module + ' .......filename: ' + isslocation)
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': fcdigit,
'filename': tmpfc['ComicFilename'],

View file

@ -334,37 +334,66 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
addbyid.exposed = True
def addStoryArc(self, storyarcname, storyarcyear, storyarcpublisher, storyarcissues, arcid, arclist, desc, image):
def addStoryArc_thread(self, **kwargs):
threading.Thread(target=self.addStoryArc, kwargs=kwargs).start()
addStoryArc_thread.exposed = True
def addStoryArc(self, arcid, arcrefresh=False, cvarcid=None, arclist=None, storyarcname=None, storyarcyear=None, storyarcpublisher=None, storyarcissues=None, desc=None, image=None):
# used when a choice is selected to 'add story arc' via the searchresults screen (via the story arc search).
# arclist contains ALL the issueid's in sequence, along with the issue titles.
# call the function within cv.py to grab all the issueid's and return all the issue data
module = '[STORY ARC]'
myDB = db.DBConnection()
#check if it already exists.
arc_chk = myDB.selectone('SELECT * FROM readinglist WHERE CV_ArcID=?', [arcid]).fetchone()
if arc_chk is None:
logger.fdebug(module + ' No match in db based on ComicVine ID. Making sure and checking against Story Arc Name.')
arc_chk = myDB.selectone('SELECT * FROM readinglist WHERE StoryArc=?', [storyarcname]).fetchone()
if arc_chk is not None:
logger.warn(module + ' ' + storyarcname + ' already exists on your Story Arc Watchlist.')
raise cherrypy.HTTPRedirect("readlist")
if cvarcid is None:
arc_chk = myDB.select('SELECT * FROM readinglist WHERE StoryArcID=?', [arcid])
else:
logger.warn(module + ' ' + storyarcname + ' already exists on your Story Arc Watchlist.')
raise cherrypy.HTTPRedirect("readlist")
arc_chk = myDB.select('SELECT * FROM readinglist WHERE CV_ArcID=?', [cvarcid])
if arc_chk is None:
if arcrefresh:
logger.warn(module + ' Unable to retrieve Story Arc ComicVine ID from the db. Unable to refresh Story Arc at this time. You probably have to delete/readd the story arc this one time for Refreshing to work properly.')
return
else:
logger.fdebug(module + ' No match in db based on ComicVine ID. Making sure and checking against Story Arc Name.')
arc_chk = myDB.select('SELECT * FROM readinglist WHERE StoryArc=?', [storyarcname])
if arc_chk is None:
logger.warn(module + ' ' + storyarcname + ' already exists on your Story Arc Watchlist!')
raise cherrypy.HTTPRedirect("readlist")
else:
if arcrefresh: #cvarcid must be present here as well..
logger.info(module + '[' + str(arcid) + '] Successfully found Story Arc ComicVine ID [4045-' + str(cvarcid) + '] within db. Preparing to refresh Story Arc.')
# we need to store the existing arc values that are in the db, so we don't create duplicate entries or mess up items.
iss_arcids = []
for issarc in arc_chk:
iss_arcids.append({"IssueArcID": issarc['IssueArcID'],
"IssueID": issarc['IssueID']})
arcinfo = mb.storyarcinfo(cvarcid)
if len(arcinfo) > 1:
arclist = arcinfo['arclist']
else:
logger.warn(module + ' Unable to retrieve issue details at this time. Something is probably wrong.')
return
# else:
# logger.warn(module + ' ' + storyarcname + ' already exists on your Story Arc Watchlist.')
# raise cherrypy.HTTPRedirect("readlist")
arc_results = mylar.cv.getComic(comicid=None, type='issue', arcid=arcid, arclist=arclist)
logger.fdebug(module + ' Arcresults: ' + str(arc_results))
if len(arc_results) > 0:
import random
issuedata = []
storyarcid = str(random.randint(1000, 9999)) + str(storyarcissues)
if storyarcissues is None:
storyarcissues = len(arc_results)
if arcid is None:
storyarcid = str(random.randint(1000,9999)) + str(storyarcissues)
else:
storyarcid = arcid
n = 0
cidlist = ''
iscnt = int(storyarcissues)
while (n <= iscnt):
try:
arcval = arc_results['issuechoice'][n]
#print arcval
except IndexError:
break
comicname = arcval['ComicName']
@ -376,7 +405,14 @@ class WebInterface(object):
cidlist += str(comicid)
else:
cidlist += '|' + str(comicid)
st_issueid = str(storyarcid) + "_" + str(random.randint(1000, 9999))
#don't recreate the st_issueid if it's a refresh and the issueid already exists (will create duplicates otherwise)
st_issueid = None
if arcrefresh:
for aid in iss_arcids:
if aid['IssueID'] == issid:
st_issueid = aid['IssueArcID']
if st_issueid is None:
st_issueid = str(storyarcid) + "_" + str(random.randint(1000,9999))
issnum = arcval['Issue_Number']
issdate = str(arcval['Issue_Date'])
storedate = str(arcval['Store_Date'])
@ -491,7 +527,6 @@ class WebInterface(object):
n+=1
comicid_results = mylar.cv.getComic(comicid=None, type='comicyears', comicidlist=cidlist)
#logger.info('comicid_results: ' + str(comicid_results))
logger.fdebug(module + ' Initiating issue updating - just the info')
@ -524,15 +559,18 @@ class WebInterface(object):
"IssueDate": AD['IssueDate'],
"StoreDate": AD['ReleaseDate'],
"SeriesYear": seriesYear,
"IssuePublisher": issuePublisher}
"IssuePublisher": issuePublisher,
"CV_ArcID": arcid}
myDB.upsert("readinglist", newVals, newCtrl)
#run the Search for Watchlist matches now.
logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.')
self.ArcWatchlist(storyarcid)
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
if arcrefresh:
return
else:
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
addStoryArc.exposed = True
def wanted_Export(self):
@ -803,14 +841,12 @@ class WebInterface(object):
logger.fdebug('checking annual db')
for annthis in annual_load:
if not any(d['ReleaseComicID'] == annthis['ReleaseComicID'] for d in annload):
#print 'matched on annual'
annload.append({
'ReleaseComicID': annthis['ReleaseComicID'],
'ReleaseComicName': annthis['ReleaseComicName'],
'ComicID': annthis['ComicID'],
'ComicName': annthis['ComicName']
})
#print 'added annual'
issues += annual_load #myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID])
#store the issues' status for a given comicid, after deleting and readding, flip the status back to$
logger.fdebug("Deleting all issue data.")
@ -1383,12 +1419,10 @@ class WebInterface(object):
failed_handling.exposed = True
def archiveissue(self, IssueID, comicid):
print 'marking issue : ' + str(IssueID)
myDB = db.DBConnection()
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
annchk = 'no'
if issue is None:
print 'issue is none'
if mylar.ANNUALS_ON:
issann = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
comicname = issann['ReleaseComicName']
@ -1396,11 +1430,8 @@ class WebInterface(object):
annchk = 'yes'
comicid = issann['ComicID']
else:
print 'issue not none'
comicname = issue['ComicName']
print comicname
issue = issue['Issue_Number']
print issue
logger.info(u"Marking " + comicname + " issue # " + str(issue) + " as archived...")
controlValueDict = {'IssueID': IssueID}
newValueDict = {'Status': 'Archived'}
@ -1778,7 +1809,6 @@ class WebInterface(object):
for skippy in skipped2:
mvcontroldict = {"IssueID": skippy['IssueID']}
mvvalues = {"Status": "Wanted"}
#print ("Changing issue " + str(skippy['Issue_Number']) + " to Wanted.")
myDB.upsert("issues", mvvalues, mvcontroldict)
issuestowanted.append(skippy['IssueID'])
issuesnumwant.append(skippy['Issue_Number'])
@ -1875,7 +1905,6 @@ class WebInterface(object):
manageComics.exposed = True
def manageIssues(self, **kwargs):
#print kwargs
status = kwargs['status']
results = []
myDB = db.DBConnection()
@ -2052,14 +2081,20 @@ class WebInterface(object):
"percent": percent,
"Have": havearc,
"SpanYears": spanyears,
"Total": al['TotalIssues']})
"Total": al['TotalIssues'],
"CV_ArcID": al['CV_ArcID']})
return serve_template(templatename="storyarc.html", title="Story Arcs", arclist=arclist)
storyarc_main.exposed = True
def detailStoryArc(self, StoryArcID, StoryArcName):
myDB = db.DBConnection()
arcinfo = myDB.select("SELECT * from readinglist WHERE StoryArcID=? order by ReadingOrder ASC", [StoryArcID])
return serve_template(templatename="storyarc_detail.html", title="Detailed Arc list", readlist=arcinfo, storyarcname=StoryArcName, storyarcid=StoryArcID)
try:
cvarcid = arcinfo[0]['CV_ArcID']
except:
cvarcid = None
return serve_template(templatename="storyarc_detail.html", title="Detailed Arc list", readlist=arcinfo, storyarcname=StoryArcName, storyarcid=StoryArcID, cvarcid=cvarcid)
detailStoryArc.exposed = True
def markreads(self, action=None, **args):
@ -2309,14 +2344,15 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarc))
importReadlist.exposed = True
#Story Arc Ascension...welcome to the next level :)
def ArcWatchlist(self, StoryArcID=None):
def ArcWatchlist(self,StoryArcID=None):
myDB = db.DBConnection()
if StoryArcID:
ArcWatch = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=?", [StoryArcID])
else:
ArcWatch = myDB.select("SELECT * FROM readinglist")
if ArcWatch is None: logger.info("No Story Arcs to search")
if ArcWatch is None:
logger.info("No Story Arcs to search")
else:
Comics = myDB.select("SELECT * FROM comics")
@ -2324,15 +2360,15 @@ class WebInterface(object):
wantedlist = []
sarc_title = None
showonreadlist = 1 # 0 won't show storyarcissues on readinglist main page, 1 will show
showonreadlist = 1 # 0 won't show storyarcissues on readinglist main page, 1 will show
for arc in ArcWatch:
logger.fdebug("arc: " + arc['storyarc'] + " : " + arc['ComicName'] + " : " + arc['IssueNumber'])
sarc_title = arc['StoryArc']
logger.fdebug("arc: " + arc['StoryArc'] + " : " + arc['ComicName'] + " : " + arc['IssueNumber'])
#cycle through the story arcs here for matches on the watchlist
if sarc_title != arc['storyarc']:
if sarc_title != arc['StoryArc']:
if mylar.STORYARCDIR:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['storyarc'])
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['StoryArc'])
else:
dstloc = os.path.join(mylar.DESTINATION_DIR, mylar.GRABBAG_DIR)
@ -2349,7 +2385,7 @@ class WebInterface(object):
mod_arc = re.sub(r'\s', '', mod_arc)
matcheroso = "no"
for comic in Comics:
logger.fdebug("comic: " + comic['ComicName'])
#logger.fdebug("comic: " + comic['ComicName'])
mod_watch = re.sub('[\:\,\'\/\-\&\%\$\#\@\!\*\+\.]', '', comic['ComicName'])
mod_watch = re.sub('\\bthe\\b', '', mod_watch.lower())
mod_watch = re.sub('\\band\\b', '', mod_watch.lower())
@ -2384,8 +2420,8 @@ class WebInterface(object):
logger.fdebug("Issue: " + str(arc['IssueNumber']))
logger.fdebug("IssueArcID: " + str(arc['IssueArcID']))
#gather the matches now.
arc_match.append({
"match_storyarc": arc['storyarc'],
arc_match.append({
"match_storyarc": arc['StoryArc'],
"match_name": arc['ComicName'],
"match_id": isschk['ComicID'],
"match_issue": arc['IssueNumber'],
@ -2403,7 +2439,7 @@ class WebInterface(object):
"IssueYear": arc['IssueYear']})
if mylar.STORYARCDIR:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['storyarc'])
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['StoryArc'])
else:
dstloc = mylar.GRABBAG_DIR
logger.fdebug('destination location set to : ' + dstloc)
@ -2411,7 +2447,8 @@ class WebInterface(object):
filechk = filechecker.listFiles(dstloc, arc['ComicName'], Publisher=None, sarc='true')
fn = 0
fccnt = filechk['comiccount']
while (fn < fccnt):
logger.fdebug('files in directory: ' + str(fccnt))
while (fn < fccnt) and fccnt != 0:
haveissue = "no"
issuedupe = "no"
try:
@ -2436,10 +2473,7 @@ class WebInterface(object):
myDB.upsert("readinglist", newVal, ctrlVal)
fn+=1
sarc_title = arc['storyarc']
logger.fdebug("we matched on " + str(len(arc_match)) + " issues")
for m_arc in arc_match:
#now we cycle through the issues looking for a match.
issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
@ -2463,8 +2497,8 @@ class WebInterface(object):
"ComicID": m_arc['match_id']}
myDB.upsert("readlist", shownewVal, showctrlVal)
myDB.upsert("readinglist", newVal, ctrlVal)
logger.info("Already have " + issue['ComicName'] + " :# " + str(issue['Issue_Number']))
myDB.upsert("readinglist",newVal,ctrlVal)
logger.fdebug("Already have " + issue['ComicName'] + " :# " + str(issue['Issue_Number']))
if issue['Status'] == 'Downloaded':
issloc = os.path.join(m_arc['match_filedirectory'], issue['Location'])
logger.fdebug('source location set to : ' + issloc)