IMP: (#844)Annuals now deleted from table on a Delete Series command, IMP: (#837)Better volume detection when searching for issues, IMP: (#842)(#808) Added some handling for issue titles when searching (it's not perfect, but it works for most), IMP: (#836)Added .cb7 for filechecking purposes only, IMP: (#830)issue numbers that are -1 can now be renamed, FIX: 'bi-annual' now fixed along with some other improvements for annual handling, IMP: Added version check for autoProcessComics and ComicRN - in order to help warn users of changes and the need to update these files since they are usually outside of the mylar git directory when being used, FIX:(#840) Boxcar2 should be working again, FIX: (#845) If search returned a filename that had no distinction between issue number and issue title and/or extra information, would error out if any of the words contained NOW, IMP: (#823) Added some better detection for NZBGet parameters when using ComicRN.py

This commit is contained in:
evilhero 2014-10-06 14:10:36 -04:00
parent 44f35de4f2
commit 9066ebb965
24 changed files with 746 additions and 280 deletions

View File

@ -148,16 +148,16 @@ div#main { margin: 0; padding: 80px 0 0 0; }
.comictable td#maindetails { width: 200px; padding: 10px; }
.comictable td#middle { vertical-align: middle; }
table#series_table { background-color: white; width: 100%; padding: 20px; }
table#series_table { background-color: black; width: 100%; padding: 10px; }
table#series_table th#publisher { text-align: left; min-width: 50px; }
table#series_table th#name { text-align: left; min-width: 250px; }
table#series_table th#year { text-align: left; min-width: 25px; }
table#series_table th#year { text-align: left; max-width: 25px; }
table#series_table th#issue { text-align: left; min-width: 100px; }
table#series_table th#published { vertical-align: middle; text-align: left; min-width:40px; }
table#series_table th#have { text-align: center; }
table#series_table th#status { vertical-align: middle; text-align: left; min-width: 25px; }
table#series_table th#active { vertical-align: middle; text-align: left; min-width: 20px; }
table#series_table th#active { vertical-align: middle; text-align: left; max-width: 20px; }
table#series_table td#publisher { text-align: left; max-width: 100px; }
table#series_table td#name { text-align: left; max-width: 250px; }

View File

@ -136,10 +136,10 @@
<div>
<label><big>Directory</big><br/><norm>${comic['ComicLocation']}</norm></label>
</div>
<div>
<!-- <div>
<label><big>Description:</big></label><br/>
<!-- <norm>${comic['Description']}</norm> -->
</div>
<small>${comic['Description']}</small>
--> </div>
</fieldset>
</td>
@ -298,7 +298,9 @@
<label for="Downloaded" class="checkbox inline Downloaded"><input type="checkbox" id="Downloaded" checked="checked" /> Downloaded: <b>${isCounts['Downloaded']}</b></label>
<label for="Skipped" class="checkbox inline Skipped"><input type="checkbox" id="Skipped" checked="checked" /> Skipped: <b>${isCounts['Skipped']}</b></label>
<label for="Ignored" class="checkbox inline Ignored"><input type="checkbox" id="Ignored" checked="checked" /> Ignored: <b>${isCounts['Ignored']}</b></label>
<label for="Failed" class="checkbox inline Failed"><input type="checkbox" id="Failed" checked="checked" /> Failed: <b>${isCounts['Failed']}</b></label>
%if mylar.FAILED_DOWNLOAD_HANDLING:
<label for="Failed" class="checkbox inline Failed"><input type="checkbox" id="Failed" checked="checked" /> Failed: <b>${isCounts['Failed']}</b></label>
%endif
</div>
</div>
@ -314,7 +316,9 @@
<option value="Downloaded">Downloaded</option>
<option value="Archived">Archived</option>
<option value="Ignored">Ignored</option>
<option value="Failed">Failed</option>
%if mylar.FAILED_DOWNLOAD_HANDLING:
<option value="Failed">Failed</option>
%endif
</select>
selected issues
<input type="hidden" value="Go">
@ -384,7 +388,9 @@
%elif (issue['Status'] == 'Snatched'):
<a href="#" onclick="doAjaxCall('retryit?ComicName=${issue['ComicName'] |u}&ComicID=${issue['ComicID']}&IssueID=${issue['IssueID']}&IssueNumber=${issue['Issue_Number']}&ComicYear=${issue['IssueDate']}', $(this),'table')" data-success="Retrying the same version of '${issue['ComicName']}' '${issue['Issue_Number']}'" title="Retry the same download again"><img src="interfaces/default/images/retry_icon.png" height="25" width="25" class="highqual" /></a>
<a href="#" title="Mark issue as Skipped" onclick="doAjaxCall('unqueueissue?IssueID=${issue['IssueID']}&ComicID=${issue['ComicID']}',$(this),'table')" data-success="'${issue['Issue_Number']}' has been marked as Skipped"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" class="highqual" /></a>
<a href="#" title="Mark issue as Failed" onclick="doAjaxCall('unqueueissue?IssueID=${issue['IssueID']}&ComicID=${issue['ComicID']}&mode="failed"',$(this),'table')" data-success="'${issue['Issue_Number']}' has been marked as Failed"><img src="interfaces/default/images/failed.png" height="25" width="25" class="highqual" /></a>
%if mylar.FAILED_DOWNLOAD_HANDLING:
<a href="#" title="Mark issue as Failed" onclick="doAjaxCall('unqueueissue?IssueID=${issue['IssueID']}&ComicID=${issue['ComicID']}&mode=failed',$(this),'table')" data-success="'${issue['Issue_Number']}' has been marked as Failed"><img src="interfaces/default/images/failed.png" height="25" width="25" class="highqual" /></a>
%endif
%elif (issue['Status'] == 'Downloaded'):
<%
if issue['Location'] is not None:
@ -520,7 +526,9 @@
%elif (annual['Status'] == 'Snatched'):
<a href="#" onclick="doAjaxCall('retryit?ComicName=${annual['ComicName'] |u}&ComicID=${annual['ComicID']}&IssueID=${annual['IssueID']}&IssueNumber=${annual['Issue_Number']}&ComicYear=${annual['IssueDate']}&ReleaseComicID=${annual['ReleaseComicID']}', $(this),'table')" data-success="Retrying the same version of '${issue['ComicName']}' '${issue['Issue_Number']}'" title="Retry the same download again"><img src="interfaces/default/images/retry_icon.png" height="25" width="25" class="highqual" /></a>
<a href="#" title="Mark annual as Skipped" onclick="doAjaxCall('unqueueissue?IssueID=${annual['IssueID']}&ComicID=${annual['ComicID']}&ReleaseComicID=${annual['ReleaseComicID']}',$(this),'table')" data-success="'${annual['Issue_Number']}' has been marked as skipped"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" class="highqual" /></a>
<a href="#" title="Mark annual as Failed" onclick="doAjaxCall('unqueueissue?IssueID=${issue['IssueID']}&ComicID=${issue['ComicID']}&mode="failed"',$(this),'table')" data-success="'${issue['Issue_Number']}' has been marked as Failed"><img src="interfaces/default/images/failed.png" height="25" width="25" class="highqual" /></a>
%if mylar.FAILED_DOWNLOAD_HANDLING:
<a href="#" title="Mark annual as Failed" onclick="doAjaxCall('unqueueissue?IssueID=${issue['IssueID']}&ComicID=${issue['ComicID']}&mode="failed"',$(this),'table')" data-success="'${issue['Issue_Number']}' has been marked as Failed"><img src="interfaces/default/images/failed.png" height="25" width="25" class="highqual" /></a>
%endif
%elif (annual['Status'] == 'Downloaded'):
<%
if annual['Location'] is not None:

View File

@ -905,6 +905,11 @@
</div>
<div class="row">
<label>Device ID</label><input type="text" name="pushbullet_deviceid" value="${config['pushbullet_deviceid']}" size="50">
<!-- <label>Pushbullet device list</label>
<select name="pushbullet_device_list" id="pushbullet_device_list"></select>
<input type="hidden" id="pushbullet_deviceid" value="${config['pushbullet_deviceid']}" />
<input type="button" class="btn" value="Update device list" id="getPushbulletDevices" />
-->
</div>
</div>
</fieldset>

View File

@ -1116,20 +1116,23 @@ div#artistheader h2 a {
width: 100%;
}
#series_table th#publisher {
min-width: 150px;
min-width: 100px;
text-align: left;
}
#series_table th#name {
min-width: 275px;
text-align: left;
}
#series_table th#year,
#series_table th#issue {
min-width: 25px;
#series_table th#year {
max-width: 25px;
text-align: left;
}
#series_table th#active {
max-width: 40px;
text-align: left;
}
#series_table th#status,
#series_table th#active,
#series_table th#issue,
#series_table th#published {
min-width: 50px;
text-align: left;
@ -1138,7 +1141,7 @@ div#artistheader h2 a {
text-align: center;
}
#series_table td#publisher {
min-width: 150px;
min-width: 100px;
text-align: left;
vertical-align: middle;
font-size: 12px;
@ -1148,14 +1151,18 @@ div#artistheader h2 a {
text-align: left;
vertical-align: middle;
}
#series_table th#year,
#series_table th#issue {
#series_table td#year {
max-width: 25px;
text-align: left;
vertical-align: middle;
}
#series_table td#active {
max-width: 40px;
text-align: left;
vertical-align: middle;
}
#series_table td#status,
#series_table td#active,
#series_table td#issue,
#series_table td#published {
max-width: 50px;
text-align: left;

View File

@ -95,14 +95,16 @@
</tr>
<tr>
<form action="readOptions" method="get">
<div class="row">
<label>Options</label><br/>
<input type="checkbox" /><label>Arcs in Grabbag Directory?</label><br/>
<input type="checkbox" name="storyarcdir" value="1" ${readConfig['storyarcdir']} /><label>Arcs in StoryArc Directory (off of ComicLocationRoot)?</label><br/>
<input type="checkbox" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading # to filename</label>
<fieldset>
<legend>Options</legend>
<div class="row checkbox left clearfix">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Arcs in Grabbag Directory?</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="storyarcdir" value="1" ${readConfig['storyarcdir']} /><label>Arcs in StoryArc Directory (off of ComicLocationRoot)?</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading # to filename</label>
</div>
</fieldset>
</form>
</tr>
</tr>

View File

@ -26,14 +26,14 @@
<tr>
<form action="readOptions" method="get">
<fieldset>
<div class="row">
<label>Options</label><br/>
<input type="checkbox" name="storyarcdir" value="1" ${readConfig['storyarcdir']} /><label>Should I create a Story-Arc Directory?</label><br/>
<legend>Options</legend>
<div class="row checkbox left clearfix">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="storyarcdir" value="1" ${readConfig['storyarcdir']} /><label>Should I create a Story-Arc Directory?</label><br/>
<small>Arcs in StoryArc Directory: <% sdir = os.path.join(mylar.DESTINATION_DIR, "StoryArcs") %>${sdir}</small><br/>
<input type="checkbox" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading# to filename</label><br/>
<label>Enforce Renaming/MetaTagging options (if enabled)</label>
<label>Copy watchlisted issues to StoryArc Directory</label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading# to filename</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Enforce Renaming/MetaTagging options (if enabled)</label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Copy watchlisted issues to StoryArc Directory</label>
</div>
</fieldset>
<div>
@ -48,7 +48,6 @@
<thead>
<tr>
<th id="readingorder"></th>
<th id="storyarc">Story Arc</th>
<th id="comicname">ComicName</th>
<th id="issue">Issue</th>
<th id="issueyear">Pub Year</th>
@ -74,7 +73,6 @@
<tr id="${item['ReadingOrder']}" class="grade${grade}">
<td id="readingorder">${item['ReadingOrder']}</td>
<td id="storyarc">${item['StoryArc']}</td>
<td id="comicname">${item['ComicName']} (${item['SeriesYear']})</td>
<td id="issue">${item['IssueNumber']}</td>
<td id="issueyear">${item['IssueYear']}</td>

View File

@ -343,25 +343,41 @@ class PostProcessor(object):
#use issueid to get publisher, series, year, issue number
annchk = "no"
if 'annual' in nzbname.lower():
logger.info(module + ' Annual detected.')
annchk = "yes"
# if 'annual' in nzbname.lower():
# logger.info(module + ' Annual detected.')
# annchk = "yes"
# issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
# else:
# issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
if issuenzb is None:
logger.info(module + ' Could not detect as a standard issue - checking against annuals.')
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
if issuenzb is None:
logger.info(module + ' issuenzb not found.')
#if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
#using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
if 'S' in issueid:
sandwich = issueid
elif 'G' in issueid or '-' in issueid:
sandwich = 1
else:
logger.info(module + ' Successfully located issue as an annual. Continuing.')
annchk = "yes"
if issuenzb is not None:
logger.info(module + ' issuenzb found.')
if helpers.is_number(issueid):
sandwich = int(issuenzb['IssueID'])
else:
logger.info(module + ' issuenzb not found.')
#if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
#using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
if 'S' in issueid:
sandwich = issueid
elif 'G' in issueid or '-' in issueid:
sandwich = 1
# else:
# logger.info(module + ' issuenzb not found.')
# #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
# #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
# if 'S' in issueid:
# sandwich = issueid
# elif 'G' in issueid or '-' in issueid:
# sandwich = 1
if helpers.is_number(sandwich):
if sandwich < 900000:
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
@ -498,7 +514,7 @@ class PostProcessor(object):
if annchk == "no":
logger.info(module + ' Starting Post-Processing for ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']))
else:
logger.info(module + ' Starting Post-Processing for ' + issuenzb['ComicName'] + ' Annual issue: ' + str(issuenzb['Issue_Number']))
logger.info(module + ' Starting Post-Processing for ' + issuenzb['ReleaseComicName'] + ' issue: ' + str(issuenzb['Issue_Number']))
logger.fdebug(module + ' issueid: ' + str(issueid))
logger.fdebug(module + ' issuenumOG: ' + str(issuenumOG))
@ -647,7 +663,7 @@ class PostProcessor(object):
chunk_f_f = re.sub('\$Annual','',chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug(module + ' Not an annual - removing from filename paramaters')
logger.fdebug(module + ' Not an annual - removing from filename parameters')
logger.fdebug(module + ' New format: ' + str(chunk_file_format))
else:
@ -903,9 +919,12 @@ class PostProcessor(object):
dispiss = 'issue: ' + str(issuenumOG)
else:
updater.foundsearch(comicid, issueid, mode='want_ann', down=downtype, module=module)
dispiss = 'annual issue: ' + str(issuenumOG)
if 'annual' not in series.lower():
dispiss = 'annual issue: ' + str(issuenumOG)
else:
dispiss = str(issuenumOG)
#force rescan of files
#force rescan of files
updater.forceRescan(comicid,module=module)
if mylar.WEEKFOLDER:
@ -952,7 +971,11 @@ class PostProcessor(object):
if annchk == "no":
prline = series + '(' + issueyear + ') - issue #' + issuenumOG
else:
prline = series + ' Annual (' + issueyear + ') - issue #' + issuenumOG
if 'annual' not in series.lower():
prline = series + ' Annual (' + issueyear + ') - issue #' + issuenumOG
else:
prline = series + ' (' + issueyear + ') - issue #' + issuenumOG
prline2 = 'Mylar has downloaded and post-processed: ' + prline
if mylar.PROWL_ENABLED:

View File

@ -34,7 +34,7 @@ from lib.configobj import ConfigObj
import cherrypy
from mylar import versioncheck, logger, versioncheck, rsscheck, search, PostProcessor, weeklypull, helpers #versioncheckit, searchit, weeklypullit, dbupdater, scheduler
from mylar import logger, versioncheck, rsscheck, search, PostProcessor, weeklypull, helpers #versioncheckit, searchit, weeklypullit, dbupdater, scheduler
FULL_PATH = None
PROG_DIR = None
@ -128,7 +128,7 @@ COMIC_DIR = None
LIBRARYSCAN = False
IMP_MOVE = False
IMP_RENAME = False
IMP_METADATA = False
IMP_METADATA = False # should default to False - this is enabled for testing only.
SEARCH_INTERVAL = 360
NZB_STARTUP_SEARCH = False
@ -281,6 +281,10 @@ ENABLE_RSS = 0
RSS_CHECKINTERVAL = 20
RSS_LASTRUN = None
#these are used to set the comparison against the post-processing scripts
STATIC_COMICRN_VERSION = "1.0"
STATIC_APC_VERSION = "1.0"
FAILED_DOWNLOAD_HANDLING = 0
FAILED_AUTO = 0

View File

@ -138,7 +138,11 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None, module
# if the filename is identical to the parent folder, the entire subfolder gets copied since it's the first match, instead of just the file
shutil.move( filename, comicpath )
filename = os.path.split(filename)[1] # just the filename itself
try:
filename = os.path.split(filename)[1] # just the filename itself
except:
logger.warn('Unable to detect filename within directory - I am aborting the tagging. You best check things out.')
return "fail"
#print comicpath
#print os.path.join( comicpath, filename )
if filename.endswith('.cbr'):

View File

@ -189,6 +189,8 @@ def GetComicInfo(comicid,dom):
except:
comic_deck = 'None'
#comic['ComicDescription'] = comic_desc
try:
comic['Aliases'] = dom.getElementsByTagName('aliases')[0].firstChild.wholeText
#logger.fdebug('Aliases: ' + str(aliases))

View File

@ -69,7 +69,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
'C',
'X']
extensions = ('.cbr', '.cbz')
extensions = ('.cbr', '.cbz', '.cb7')
# #get the entire tree here
dirlist = traverse_directories(basedir)
@ -538,8 +538,8 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] AS_Tuple : ' + str(AS_Tuple))
annual_comicid = None
for ATS in AS_Tuple:
logger.fdebug('[FILECHECKER] ' + str(ATS['AS_Alternate']) + ' comparing to ' + str(modwatchcomic))
if ATS['AS_Alternate'] == modwatchcomic:
logger.fdebug('[FILECHECKER] ' + str(ATS['AS_Alternate']) + ' comparing to ' + str(subname)) #str(modwatchcomic))
if ATS['AS_Alternate'] == subname: #modwatchcomic:
logger.fdebug('[FILECHECKER] Associating ComiciD : ' + str(ATS['ComicID']))
annual_comicid = str(ATS['ComicID'])
break

View File

@ -343,14 +343,29 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
if mylar.ZERO_LEVEL_N == "none": zeroadd = ""
elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"
logger.fdebug('Zero Suppression set to : ' + str(mylar.ZERO_LEVEL_N))
prettycomiss = None
if str(len(issueno)) > 1:
if int(issueno) < 0:
self._log("issue detected is a negative")
prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
elif int(issueno) < 10:
try:
x = float(issueno)
#validity check
if x < 0:
logger.info('I\'ve encountered a negative issue #: ' + str(issueno) + '. Trying to accomodate.')
logger.info('abs is : ' + str(abs(iss)))
prettycomiss = '-' + str(zeroadd) + str(abs(iss))
logger.info('past.')
else: raise ValueError
except ValueError, e:
logger.warn('Unable to properly determine issue number [' + str(issueno) + '] - you should probably log this on github for help.')
return
if prettycomiss is None and len(str(issueno)) > 0:
logger.info('here')
#if int(issueno) < 0:
# self._log("issue detected is a negative")
# prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
if int(issueno) < 10:
logger.fdebug('issue detected less than 10')
if '.' in iss:
if int(iss_decval) > 0:
@ -389,7 +404,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug('Zero level supplement set to ' + str(mylar.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
else:
elif len(str(issueno)) == 0:
prettycomiss = str(issueno)
logger.fdebug('issue length error - cannot determine length. Defaulting to None: ' + str(prettycomiss))
@ -1414,6 +1429,12 @@ def get_issue_title(IssueID):
return None
return issue['IssueName']
def int_num(s):
try:
return int(s)
except ValueError:
return float(s)
from threading import Thread

View File

@ -61,10 +61,18 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"Status": "Loading"}
comlocation = None
oldcomversion = None
series_status = 'Loading'
else:
if chkwant is not None:
logger.fdebug('ComicID: ' + str(comicid) + ' already exists. Not adding from the future pull list at this time.')
return 'Exists'
if dbcomic['Status'] == 'Active':
series_status = 'Active'
elif dbcomic['Status'] == 'Paused':
series_status = 'Paused'
else:
series_status = 'Loading'
newValueDict = {"Status": "Loading"}
comlocation = dbcomic['ComicLocation']
if not latestissueinfo:
@ -89,7 +97,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
if series_status == 'Active' or series_status == 'Loading':
newValueDict = {"Status": "Active"}
myDB.upsert("comics", newValueDict, controlValueDict)
return
@ -449,7 +458,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
#Cdesc = helpers.cleanhtml(comic['ComicDescription'])
#cdes_find = Cdesc.find("Collected")
#cdes_removed = Cdesc[:cdes_find]
#print cdes_removed
#logger.fdebug('description: ' + cdes_removed)
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
@ -461,7 +470,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
"ComicVersion": comicVol,
"ComicLocation": comlocation,
"ComicPublisher": comic['ComicPublisher'],
#"Description": Cdesc.decode('utf-8', 'replace'),
# "Description": Cdesc, #.dencode('utf-8', 'replace'),
"DetailURL": comic['ComicURL'],
# "ComicPublished": gcdinfo['resultPublished'],
"ComicPublished": "Unknown",
@ -537,7 +546,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
if pullupd is None:
# lets' check the pullist for anything at this time as well since we're here.
# do this for only Present comics....
if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present': #and 'Present' in gcdinfo['resultPublished']:
if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present' and series_status == 'Active': #and 'Present' in gcdinfo['resultPublished']:
logger.fdebug('latestissue: #' + str(latestiss))
chkstats = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
logger.fdebug('latestissue status: ' + chkstats['Status'])

View File

@ -97,9 +97,9 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
for watch in watchlist:
#use the comicname_filesafe to start
watchdisplaycomic = re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
watchdisplaycomic = watch['ComicName'].encode('utf-8').strip() #re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
# let's clean up the name, just in case for comparison purposes...
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', watch['ComicName_Filesafe']).encode('utf-8').strip()
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['ComicName_Filesafe']).encode('utf-8').strip()
#watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()
if ' the ' in watchcomic.lower():
@ -110,7 +110,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
# account for alternate names as well
if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None':
altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', watch['AlternateSearch']).encode('utf-8').strip()
altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['AlternateSearch']).encode('utf-8').strip()
#altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
AltName.append(altcomic)
alt_chk = "yes" # alt-checker flag
@ -210,7 +210,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
#we need to track the counter to make sure we are comparing the right array parts
#this takes care of the brackets :)
m = re.findall('[^()]+', cfilename)
m = re.findall('[^()]+', d_filename) #cfilename)
lenm = len(m)
logger.fdebug("there are " + str(lenm) + " words.")
cnt = 0
@ -316,22 +316,23 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
cnt+=1
displength = len(cname)
print 'd_filename is : ' + d_filename
logger.fdebug('cname length : ' + str(displength) + ' --- ' + str(cname))
logger.fdebug('d_filename is : ' + d_filename)
charcount = d_filename.count('#')
print ('charcount is : ' + str(charcount))
logger.fdebug('charcount is : ' + str(charcount))
if charcount > 0:
print ('entering loop')
logger.fdebug('entering loop')
for i,m in enumerate(re.finditer('\#', d_filename)):
if m.end() <= displength:
print comfilename[m.start():m.end()]
logger.fdebug(comfilename[m.start():m.end()])
# find occurance in c_filename, then replace into d_filname so special characters are brought across
newchar = comfilename[m.start():m.end()]
print 'newchar:' + str(newchar)
logger.fdebug('newchar:' + str(newchar))
d_filename = d_filename[:m.start()] + str(newchar) + d_filename[m.end():]
print 'd_filename:' + str(d_filename)
logger.fdebug('d_filename:' + str(d_filename))
dispname = d_filename[:displength]
print ('dispname : ' + dispname)
logger.fdebug('dispname : ' + dispname)
splitit = []
watchcomic_split = []
@ -445,19 +446,20 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
else:
vol_label = volno
print ("adding " + com_NAME + " to the import-queue!")
impid = com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
print ("impid: " + str(impid))
logger.fdebug("adding " + com_NAME + " to the import-queue!")
impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
logger.fdebug("impid: " + str(impid))
import_by_comicids.append({
"impid" : impid,
"watchmatch" : watchmatch,
"displayname" : dispname,
"comicname" : com_NAME,
"comicname" : dispname, #com_NAME,
"comicyear" : result_comyear,
"volume" : vol_label,
"comfilename" : comfilename,
"comlocation" : comlocation.decode(mylar.SYS_ENCODING)
})
logger.fdebug('import_by_ids: ' + str(import_by_comicids))
if len(watch_kchoice) > 0:
watchchoice['watchlist'] = watch_kchoice
@ -550,9 +552,9 @@ def scanLibrary(scan=None, queue=None):
logger.error('Unable to complete the scan: %s' % e)
return
if soma == "Completed":
print ("sucessfully completed import.")
logger.info('Sucessfully completed import.')
else:
logger.info(u"Starting mass importing..." + str(noids) + " records.")
logger.info('Starting mass importing...' + str(noids) + ' records.')
#this is what it should do...
#store soma (the list of comic_details from importing) into sql table so import can be whenever
#display webpage showing results
@ -565,12 +567,12 @@ def scanLibrary(scan=None, queue=None):
#result = threadthis.main(soma)
myDB = db.DBConnection()
sl = 0
print ("number of records: " + str(noids))
logger.fdebug("number of records: " + str(noids))
while (sl < int(noids)):
soma_sl = soma['comic_info'][sl]
print ("soma_sl: " + str(soma_sl))
print ("comicname: " + soma_sl['comicname'].encode('utf-8'))
print ("filename: " + soma_sl['comfilename'].encode('utf-8'))
logger.fdebug("soma_sl: " + str(soma_sl))
logger.fdebug("comicname: " + soma_sl['comicname'].encode('utf-8'))
logger.fdebug("filename: " + soma_sl['comfilename'].encode('utf-8'))
controlValue = {"impID": soma_sl['impid']}
newValue = {"ComicYear": soma_sl['comicyear'],
"Status": "Not Imported",

View File

@ -39,10 +39,9 @@ def pullsearch(comicapi,comicquery,offset,explicit,type):
# 02/22/2014 use the volume filter label to get the right results.
# add the 's' to the end of type to pluralize the caption (it's needed)
if type == 'story_arc':
logger.info('redefining.')
u_comicquery = re.sub("%20AND%20", "%20", u_comicquery)
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
#logger.info('PULLURL: ' + str(PULLURL))
#all these imports are standard on most modern python implementations
#CV API Check here.
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
@ -135,53 +134,165 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
for result in comicResults:
#retrieve the first xml tag (<tag>data</tag>)
#that the parser finds with name tagName:
xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
#here we can determine what called us, and either start gathering all issues or just limited ones.
#print ("n: " + str(n) + "--xmcnt" + str(xmlcnt))
if issue is not None and str(issue).isdigit():
#this gets buggered up with NEW/ONGOING series because the db hasn't been updated
#to reflect the proper count. Drop it by 1 to make sure.
limiter = int(issue) - 1
else: limiter = 0
if int(xmlcnt) >= limiter:
xmlTag = result.getElementsByTagName('name')[0].firstChild.wholeText
if (result.getElementsByTagName('start_year')[0].firstChild) is not None:
xmlYr = result.getElementsByTagName('start_year')[0].firstChild.wholeText
else: xmlYr = "0000"
if xmlYr in limityear or limityear == 'None':
xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText
publishers = result.getElementsByTagName('publisher')
if len(publishers) > 0:
pubnames = publishers[0].getElementsByTagName('name')
if len(pubnames) >0:
xmlpub = pubnames[0].firstChild.wholeText
else:
xmlpub = "Unknown"
else:
xmlpub = "Unknown"
if (result.getElementsByTagName('name')[0].childNodes[0].nodeValue) is None:
xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText
else:
xmlimage = "cache/blankcover.jpg"
if type == 'story_arc':
#call cv.py here to find out issue count in story arc
try:
logger.fdebug('story_arc ascension')
names = len( result.getElementsByTagName('name') )
n = 0
logger.fdebug('length: ' + str(names))
while ( n < names ):
logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName)
if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc':
logger.fdebug('yes')
try:
xmlTag = result.getElementsByTagName('name')[n].firstChild.wholeText
xmlTag = xmlTag.rstrip()
logger.fdebug('name: ' + str(xmlTag))
except:
logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
return
elif result.getElementsByTagName('name')[n].parentNode.nodeName == 'publisher':
logger.fdebug('publisher check.')
xmlpub = result.getElementsByTagName('name')[n].firstChild.wholeText
n+=1
except:
logger.warn('error retrieving story arc search results.')
return
siteurl = len( result.getElementsByTagName('site_detail_url') )
s = 0
logger.fdebug('length: ' + str(names))
xmlurl = None
while ( s < siteurl ):
logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName)
if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc':
try:
xmlurl = result.getElementsByTagName('site_detail_url')[s].firstChild.wholeText
except:
logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
return
s+=1
xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText
if xmlid is not None:
#respawn to the exact id for the story arc and count the # of issues present.
ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue&format=xml&offset=0'
logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
cvapi_check()
try:
file = urllib2.urlopen(ARCPULL_URL)
except urllib2.HTTPError, err:
logger.error('err : ' + str(err))
logger.error('There was a major problem retrieving data from ComicVine - on their end.')
return
mylar.CVAPI_COUNT +=1
arcdata = file.read()
file.close()
arcdom = parseString(arcdata)
try:
xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText
logger.fdebug('story_arc ascension')
issuecount = len( arcdom.getElementsByTagName('issue') )
except:
xmldesc = "None"
comiclist.append({
'name': xmlTag,
'comicyear': xmlYr,
'comicid': xmlid,
'url': xmlurl,
'issues': xmlcnt,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc
})
logger.fdebug('unable to retrive issue count - nullifying value.')
issuecount = 0
try:
firstid = None
arcyear = None
fid = len ( arcdom.getElementsByTagName('id') )
fi = 0
while (fi < fid):
if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
logger.fdebug('hit it.')
firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
break
fi+=1
logger.fdebug('firstid: ' + str(firstid))
if firstID is not None:
firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
logger.fdebug('success')
arcyear = cv.GetFirstIssue(firstid,firstdom)
except:
logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')
if (result.getElementsByTagName('image')[0].childNodes[0].nodeValue) is None:
xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText
else:
logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
xmlimage = "cache/blankcover.jpg"
try:
xmldesc = result.getElementsByTagName('deck')[0].firstChild.wholeText
except:
xmldesc = "None"
comiclist.append({
'name': xmlTag,
'comicyear': arcyear,
'comicid': xmlid,
'url': xmlurl,
'issues': issuecount,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc
})
else:
xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
#here we can determine what called us, and either start gathering all issues or just limited ones.
#print ("n: " + str(n) + "--xmcnt" + str(xmlcnt))
if issue is not None and str(issue).isdigit():
#this gets buggered up with NEW/ONGOING series because the db hasn't been updated
#to reflect the proper count. Drop it by 1 to make sure.
limiter = int(issue) - 1
else: limiter = 0
if int(xmlcnt) >= limiter:
xmlTag = result.getElementsByTagName('name')[0].firstChild.wholeText
if (result.getElementsByTagName('start_year')[0].firstChild) is not None:
xmlYr = result.getElementsByTagName('start_year')[0].firstChild.wholeText
else: xmlYr = "0000"
if xmlYr in limityear or limityear == 'None':
xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText
publishers = result.getElementsByTagName('publisher')
if len(publishers) > 0:
pubnames = publishers[0].getElementsByTagName('name')
if len(pubnames) >0:
xmlpub = pubnames[0].firstChild.wholeText
else:
xmlpub = "Unknown"
else:
xmlpub = "Unknown"
if (result.getElementsByTagName('name')[0].childNodes[0].nodeValue) is None:
xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText
else:
xmlimage = "cache/blankcover.jpg"
try:
xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText
except:
xmldesc = "None"
comiclist.append({
'name': xmlTag,
'comicyear': xmlYr,
'comicid': xmlid,
'url': xmlurl,
'issues': xmlcnt,
'comicimage': xmlimage,
'publisher': xmlpub,
'description': xmldesc
})
else:
logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
n+=1
#search results are limited to 100 and by pagination now...let's account for this.
countResults = countResults + 100

View File

@ -182,6 +182,11 @@ class PUSHOVER:
response = http_handler.getresponse()
request_status = response.status
logger.fdebug(u"PushOver response status: %r" % request_status)
logger.fdebug(u"PushOver response headers: %r" % response.getheaders())
logger.fdebug(u"PushOver response body: %r" % response.read())
if request_status == 200:
logger.info(module + ' Pushover notifications sent.')
return True
@ -249,7 +254,7 @@ class BOXCAR:
logger.fdebug(module + ' Boxcar2 notification successful.')
return True
def notify(self, ComicName=None, Year=None, Issue=None, sent_to=None, snatched_nzb=None, force=False, module=None):
def notify(self, prline=None, prline2=None, sent_to=None, snatched_nzb=None, force=False, module=None):
"""
Sends a boxcar notification based on the provided info or SB config
@ -270,9 +275,8 @@ class BOXCAR:
title = "Mylar. Sucessfully Snatched!"
message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
else:
title = "Mylar. Successfully Downloaded & Post-Processed!"
message = "Mylar has downloaded and postprocessed: " + ComicName + ' (' + Year + ') #' + Issue
title = prline
message = prline2
logger.info(module + ' Sending notification to Boxcar2')
@ -285,13 +289,45 @@ class PUSHBULLET:
self.apikey = mylar.PUSHBULLET_APIKEY
self.deviceid = mylar.PUSHBULLET_DEVICEID
def notify(self, snline=None, prline=None, prline2=None, snatched=None, sent_to=None, prov=None, module=None):
def get_devices(self, api):
return self.notify(method="GET")
def notify(self, snline=None, prline=None, prline2=None, snatched=None, sent_to=None, prov=None, module=None, method=None):
if not mylar.PUSHBULLET_ENABLED:
return
if module is None:
module = ''
module += '[NOTIFIER]'
http_handler = HTTPSConnection("api.pushbullet.com")
#possible needed for update.
#if method == 'GET':
# uri = '/v2/devices'
#else:
# method = 'POST'
# uri = '/v2/pushes'
#authString = base64.encodestring('%s:' % (self.apikey)).replace('\n', '')
#if method == 'GET':
# http_handler.request(method, uri, None, headers={'Authorization': 'Basic %s:' % authString})
#else:
# if snatched:
# if snatched[-1] == '.': snatched = snatched[:-1]
# event = snline
# message = "Mylar has snatched: " + snatched + " from " + prov + " and has sent it to " + sent_to
# else:
# event = prline + ' complete!'
# message = prline2
# data = {'device_iden': self.deviceid,
# 'type': "note",
# 'title': event.encode('utf-8'), #"mylar",
# 'body': message.encode('utf-8') }
# http_handler.request(method, uri, body=urlencode(data), headers={'Authorization': 'Basic %s' % authString})
if snatched:
if snatched[-1] == '.': snatched = snatched[:-1]
event = snline
@ -300,34 +336,36 @@ class PUSHBULLET:
event = prline + ' complete!'
message = prline2
http_handler = HTTPSConnection("api.pushbullet.com")
data = {'device_iden': mylar.PUSHBULLET_DEVICEID,
'type': "note",
'title': event, #"mylar",
'body': message.encode("utf-8") }
'title': event,
'body': message.encode("utf-8")}
http_handler.request("POST",
"/api/pushes",
headers = {'Content-type': "application/x-www-form-urlencoded",
'Authorization' : 'Basic %s' % base64.b64encode(mylar.PUSHBULLET_APIKEY + ":") },
'Authorization': 'Basic %s' % base64.b64encode(mylar.PUSHBULLET_APIKEY + ":") },
body = urlencode(data))
response = http_handler.getresponse()
request_body = response.read()
request_status = response.status
#logger.debug(u"PushBullet response status: %r" % request_status)
#logger.debug(u"PushBullet response headers: %r" % response.getheaders())
#logger.debug(u"PushBullet response body: %r" % response.read())
logger.fdebug(u"PushBullet response status: %r" % request_status)
logger.fdebug(u"PushBullet response headers: %r" % response.getheaders())
logger.fdebug(u"PushBullet response body: %r" % response.read())
if request_status == 200:
if method == 'GET':
return request_body
else:
logger.fdebug(module + ' PushBullet notifications sent.')
return True
elif request_status >= 400 and request_status < 500:
logger.error(module + ' PushBullet request failed: %s' % response.reason)
return False
logger.error(module + ' PushBullet request failed: %s' % response.reason)
return False
else:
logger.error(module + ' PushBullet notification failed serverside.')
return False
logger.error(module + ' PushBullet notification failed serverside.')
return False
def test(self, apikey, deviceid):

View File

@ -35,7 +35,7 @@ from wsgiref.handlers import format_date_time
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None, rsscheck=None, ComicID=None, manualsearch=None, filesafe=None):
if filesafe:
if filesafe != ComicName:
if filesafe != ComicName and mode != 'want_ann':
logger.info('[SEARCH] altering ComicName to search-safe Name : ' + filesafe)
ComicName = filesafe
if ComicYear == None: ComicYear = '2014'
@ -49,7 +49,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mode == 'want_ann':
logger.info("Annual issue search detected. Appending to issue #")
#anything for mode other than None indicates an annual.
ComicName = ComicName + " annual"
if 'annual' not in ComicName.lower():
ComicName = ComicName + " annual"
if AlternateSearch is not None and AlternateSearch != "None":
AlternateSearch = AlternateSearch + " annual"
@ -678,6 +679,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("Cleantitle: " + str(cleantitle))
vers4year = "no"
vers4vol = "no"
versionfound = "no"
if 'cover only' in cleantitle.lower():
logger.fdebug("Ignoring title as Cover Only detected.")
@ -702,12 +704,32 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
#cleantitle = re.sub(ct, "(" + str(vers4year) + ")", cleantitle)
#logger.fdebug("volumized cleantitle : " + cleantitle)
versionfound = "yes"
break
else:
if len(ct) < 4:
logger.fdebug("Version detected as " + str(ct))
vers4vol = str(ct)
versionfound = "yes"
break
logger.fdebug("false version detection..ignoring.")
elif ct.lower()[:3] == 'vol':
#if in format vol.2013/vol2013/vol01/vol.1, etc
ct = re.sub('vol', '', ct.lower())
if '.' in ct: re.sub('.', '', ct).strip()
if ct.lower()[4:].isdigit():
logger.fdebug('volume indicator detected as version #:' + str(ct))
vers4year = "yes"
versionfound = "yes"
break
else:
vers4vol = ct
versionfound = "yes"
logger.fdebug('volume indicator detected as version #:' + str(vers4vol))
break
logger.fdebug("false version detection..ignoring.")
@ -844,6 +866,30 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
splitit = []
watchcomic_split = []
logger.fdebug("original nzb comic and issue: " + str(comic_andiss))
#scan the returned name to see if it contains a '-', which typically denotes the start of an issuetitle
#if the series doesn't have a '-' within it.
hyphensplit = None
hyphenfail = False
for m in re.finditer('-', comic_andiss):
logger.fdebug('I have found a hyphen within the nzbname @ position: ' + str(m.start()))
if '-' in ComicName:
logger.fdebug('There is a hyphen present in the series title. Ignoring position: ' + str(m.start()))
pass
else:
logger.fdebug('There is no hyphen present in the series title.')
logger.fdebug('Assuming position start is : ' + str(m.start()))
hyphensplit = comic_andiss[m.start():].split()
try:
issue_firstword = hyphensplit[1]
logger.fdebug('First word of issue stored as : ' + str(issue_firstword))
except:
logger.fdebug('Unable to parse title due to no space between hyphen. Ignoring this result.')
hyphenfail = True
break
if hyphenfail == True:
continue
#changed this from '' to ' '
comic_iss_b4 = re.sub('[\-\:\,\?]', ' ', str(comic_andiss))
comic_iss = comic_iss_b4.replace('.',' ')
@ -943,76 +989,88 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
annualize = "true"
#splitst = splitst - 1
for tstsplit in splitit:
if tstsplit.lower().startswith('v') and tstsplit[1:].isdigit():
logger.fdebug("this has a version #...let's adjust")
if len(tstsplit[1:]) == 4: #v2013
logger.fdebug("Version detected as " + str(tstsplit))
vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
elif len(tstsplit[1:]) == 1: #v2
logger.fdebug("Version detected as " + str(tstsplit))
vers4vol = str(tstsplit)
elif tstsplit[1:].isdigit() and len(tstsplit) < 4:
logger.fdebug('Version detected as ' +str(tstsplit))
vers4vol = str(tstsplit)
else:
logger.fdebug("error - unknown length for : " + str(tstsplit))
logger.fdebug("volume detection commencing - adjusting length.")
logger.fdebug("watch comicversion is " + str(ComicVersion))
fndcomicversion = str(tstsplit)
logger.fdebug("version found: " + str(fndcomicversion))
logger.fdebug("vers4year: " + str(vers4year))
logger.fdebug("vers4vol: " + str(vers4vol))
if vers4year is not "no" or vers4vol is not "no":
#if the volume is None, assume it's a V1 to increase % hits
if ComVersChk == 0:
D_ComicVersion = 1
if versionfound == "yes":
for tstsplit in splitit:
logger.fdebug('comparing ' + str(tstsplit))
if tstsplit.lower().startswith('v'): #tstsplit[1:].isdigit():
logger.fdebug("this has a version #...let's adjust")
tmpsplit = tstsplit
if tmpsplit.lower().startswith('vol'):
logger.fdebug('volume detected - stripping and re-analzying for volume label.')
if '.' in tmpsplit:
tmpsplit = re.sub('.', '', tmpsplit).strip()
tmpsplit = re.sub('vol','', tmpsplit.lower()).strip()
if len(tmpsplit[1:]) == 4: #v2013
logger.fdebug("Version detected as " + str(tmpsplit))
vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
elif len(tmpsplit[1:]) == 1: #v2
logger.fdebug("Version detected as " + str(tmpsplit))
vers4vol = str(tmpsplit)
elif tmpsplit[1:].isdigit() and len(tmpsplit) < 4:
logger.fdebug('Version detected as ' +str(tmpsplit))
vers4vol = str(tmpsplit)
else:
D_ComicVersion = ComVersChk
logger.fdebug("error - unknown length for : " + str(tmpsplit))
continue
F_ComicVersion = re.sub("[^0-9]", "", fndcomicversion)
logger.fdebug("volume detection commencing - adjusting length.")
#if this is a one-off, SeriesYear will be None and cause errors.
if SeriesYear is None:
S_ComicVersion = 0
else:
S_ComicVersion = str(SeriesYear)
logger.fdebug("watch comicversion is " + str(ComicVersion))
fndcomicversion = str(tstsplit)
logger.fdebug("version found: " + str(fndcomicversion))
logger.fdebug("vers4year: " + str(vers4year))
logger.fdebug("vers4vol: " + str(vers4vol))
logger.fdebug("FCVersion: " + str(F_ComicVersion))
logger.fdebug("DCVersion: " + str(D_ComicVersion))
logger.fdebug("SCVersion: " + str(S_ComicVersion))
if vers4year is not "no" or vers4vol is not "no":
#here's the catch, sometimes annuals get posted as the Pub Year
# instead of the Series they belong to (V2012 vs V2013)
if annualize == "true" and int(ComicYear) == int(F_ComicVersion):
logger.fdebug("We matched on versions for annuals " + str(fndcomicversion))
scount+=1
cvers = "true"
#if the volume is None, assume it's a V1 to increase % hits
if ComVersChk == 0:
D_ComicVersion = 1
else:
D_ComicVersion = ComVersChk
elif int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion):
logger.fdebug("We matched on versions..." + str(fndcomicversion))
scount+=1
cvers = "true"
F_ComicVersion = re.sub("[^0-9]", "", fndcomicversion)
else:
logger.fdebug("Versions wrong. Ignoring possible match.")
scount = 0
cvers = "false"
#if this is a one-off, SeriesYear will be None and cause errors.
if SeriesYear is None:
S_ComicVersion = 0
else:
S_ComicVersion = str(SeriesYear)
logger.fdebug("FCVersion: " + str(F_ComicVersion))
logger.fdebug("DCVersion: " + str(D_ComicVersion))
logger.fdebug("SCVersion: " + str(S_ComicVersion))
#here's the catch, sometimes annuals get posted as the Pub Year
# instead of the Series they belong to (V2012 vs V2013)
if annualize == "true" and int(ComicYear) == int(F_ComicVersion):
logger.fdebug("We matched on versions for annuals " + str(fndcomicversion))
scount+=1
cvers = "true"
elif int(F_ComicVersion) == int(D_ComicVersion) or int(F_ComicVersion) == int(S_ComicVersion):
logger.fdebug("We matched on versions..." + str(fndcomicversion))
scount+=1
cvers = "true"
else:
logger.fdebug("Versions wrong. Ignoring possible match.")
scount = 0
cvers = "false"
if cvers == "true":
#since we matched on versions, let's remove it entirely to improve matching.
logger.fdebug('Removing versioning from nzb filename to improve matching algorithims.')
cissb4vers = re.sub(tstsplit, "", comic_iss_b4).strip()
logger.fdebug('New b4split : ' + str(cissb4vers))
splitit = cissb4vers.split(None)
splitst -=1
break
if cvers == "true":
#since we matched on versions, let's remove it entirely to improve matching.
logger.fdebug('Removing versioning from nzb filename to improve matching algorithims.')
cissb4vers = re.sub(tstsplit, "", comic_iss_b4).strip()
logger.fdebug('New b4split : ' + str(cissb4vers))
splitit = cissb4vers.split(None)
splitst -=1
break
#do an initial check
initialchk = 'ok'
isstitle_chk = False
if (splitst) != len(watchcomic_split):
logger.fdebug("incorrect comic lengths...not a match")
@ -1022,6 +1080,8 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('there are ' + str(len(issuetitle_words)) + ' words in the issue title of : ' + str(issuetitle))
# we minus 1 the splitst since the issue # is included in there.
if (splitst - 1) > len(watchcomic_split):
possibleissue_num = splitit[splitst]
logger.fdebug('possible issue number of : ' + str(possibleissue_num))
extra_words = splitst - len(watchcomic_split)
logger.fdebug('there are ' + str(extra_words) + ' left over after we remove the series title.')
wordcount = 1
@ -1033,40 +1093,75 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#logger.info('watchcomic_split: ' + str(len(watchcomic_split)))
if wordcount - len(watchcomic_split) == 1:
search_issue_title = word
possibleissue_num = word
else:
search_issue_title += ' ' + word
wordcount +=1
logger.fdebug('search_issue_title is : ' + str(search_issue_title))
logger.fdebug('possible issue number of : ' + str(possibleissue_num))
if hyphensplit is not None:
logger.fdebug('hypen split detected.')
issue_start = search_issue_title.find(issue_firstword)
logger.fdebug('located first word of : ' + str(issue_firstword) + ' at position : ' + str(issue_start))
search_issue_title = search_issue_title[issue_start:]
logger.fdebug('corrected search_issue_title is now : ' + str(search_issue_title))
#now we have the nzb issue title (if it exists), let's break it down further.
sit_split = search_issue_title.split(None)
watch_split_count = len(issuetitle_words)
wsplit = 0
isstitle_removal = []
isstitle_match = 0 #counter to tally % match
misword = 0 # counter to tally words that probably don't need to be an 'exact' match for
for sit in sit_split:
if sit.lower() == issuetitle_words[wsplit].lower():
logger.fdebug('word match: ' + str(sit))
isstitle_match +=1
else:
for wsplit in issuetitle_words:
if wsplit.lower() == 'part' or wsplit.lower() == 'of':
of_chk = False
if wsplit.lower() == 'of':
of_chk = True
logger.fdebug('not worrying about this word : ' + str(wsplit))
misword +=1
continue
if wsplit.isdigit() and of_chk == True:
logger.fdebug('of ' + str(wsplit) + ' detected. Ignoring for matching.')
of_chk = False
continue
for sit in sit_split:
logger.fdebug('looking at : ' + str(sit.lower()) + ' -TO- ' + str(wsplit.lower()))
if sit.lower() == 'part':
#logger.fdebug('not worrying about this word : ' + str(sit))
logger.fdebug('not worrying about this word : ' + str(sit))
misword +=1
if sit.isdigit():
#logger.fdebug('found digit - possible mini-series/arc subset.')
if sit in issuetitle:
logger.fdebug('found matching numeric in issuetitle.')
isstitle_match +=1
isstitle_removal.append(sit)
break
elif sit.lower() == wsplit.lower():
logger.fdebug('word match: ' + str(sit))
isstitle_match +=1
isstitle_removal.append(sit)
break
else:
try:
if int(sit) == int(wsplit):
logger.fdebug('found matching numeric: ' + str(wsplit))
isstitle_match +=1
isstitle_removal.append(sit)
break
except:
pass
logger.fdebug('isstitle_match count : ' + str(isstitle_match))
if isstitle_match > 0:
iss_calc = int( watch_split_count / isstitle_match )
logger.fdebug('iss_calc: ' + str(iss_calc) + ' %')
iss_calc = ( ( isstitle_match + misword ) / watch_split_count ) * 100
logger.fdebug('iss_calc: ' + str(iss_calc) + ' % with ' + str(misword) + ' unaccounted for words')
else:
iss_calc = 0
logger.fdebug('0 words matched on issue title.')
if int(iss_calc) > 80:
if iss_calc >= 80:
logger.fdebug('>80% match on issue name. If this were implemented, this would be considered a match.')
logger.fdebug('we should remove ' + str(len(isstitle_removal)) + ' words : ' + str(isstitle_removal))
logger.fdebug('Removing issue title from nzb filename to improve matching algorithims.')
splitst = splitst - len(isstitle_removal)
isstitle_chk = True
else:
pass
@ -1156,9 +1251,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("integer value of issue we are looking for : " + str(intIss))
fnd_iss_except = None
#if the issue title was present and it contained a numeric, it will pull that as the issue incorrectly
if isstitle_chk == True:
comic_iss = possibleissue_num
logger.fdebug("issue we found for is : " + str(comic_iss))
comintIss = helpers.issuedigits(comic_iss)
logger.fdebug("integer value of issue we are found : " + str(comintIss))
logger.fdebug("integer value of issue we have found : " + str(comintIss))
#issue comparison now as well
if int(intIss) == int(comintIss):

View File

@ -70,7 +70,7 @@ def solicit(month, year):
if len(str(mon)) == 1:
mon = '0' + str(mon)
monthlist.append({"month": helpers.fullmonth(mon).lower(),
monthlist.append({"month": helpers.fullmonth(str(mon)).lower(),
"num_month": mon,
"year": str(year)})
mongr+=1

View File

@ -480,6 +480,7 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
module += '[UPDATER]'
myDB = db.DBConnection()
modcomicname = False
logger.fdebug(module + ' comicid: ' + str(ComicID))
logger.fdebug(module + ' issueid: ' + str(IssueID))
@ -488,6 +489,9 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
ComicName = comic['ComicName']
if mode == 'want_ann':
issue = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if ComicName != issue['ReleaseComicName'] + ' Annual':
ComicName = issue['ReleaseComicName']
modcomicname = True
else:
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
CYear = issue['IssueDate'][:4]
@ -535,10 +539,13 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
"Status": "Snatched"
}
else:
if mode == 'want_ann':
IssueNum = "Annual " + issue['Issue_Number']
else:
if modcomicname:
IssueNum = issue['Issue_Number']
else:
if mode == 'want_ann':
IssueNum = "Annual " + issue['Issue_Number']
else:
IssueNum = issue['Issue_Number']
newsnatchValues = {"ComicName": ComicName,
"ComicID": ComicID,
@ -556,7 +563,10 @@ def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None
logger.info(module + ' Setting status to Downloaded in history.')
downstatus = 'Downloaded'
if mode == 'want_ann':
IssueNum = "Annual " + issue['Issue_Number']
if modcomicname:
IssueNum = issue['Issue_Number']
else:
IssueNum = "Annual " + issue['Issue_Number']
elif mode == 'story_arc':
IssueNum = issue['IssueNumber']
IssueID = IssueArcID
@ -628,7 +638,30 @@ def forceRescan(ComicID,archive=None,module=None):
issuedupechk = []
annualdupechk = []
issueexceptdupechk = []
mc_issue = []
mc_issuenumber = []
reissues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [ComicID])
multiple_check = myDB.select('SELECT * FROM issues WHERE ComicID=? GROUP BY Int_IssueNumber HAVING (COUNT(Int_IssueNumber) > 1)', [ComicID])
if len(multiple_check) == 0:
logger.fdebug('No issues with identical issue numbering were detected for this series')
mc_issuenumber = None
else:
logger.fdebug('Multiple issues with identical numbering were detected. Attempting to accomodate.')
for mc in multiple_check:
mc_issuenumber.append({"Int_IssueNumber": mc['Int_IssueNumber']})
if not mc_issuenumber is None:
for mciss in mc_issuenumber:
mchk = myDB.select('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [ComicID, mciss['Int_IssueNumber']])
for mck in mchk:
mc_issue.append({"Int_IssueNumber": mck['Int_IssueNumber'],
"IssueYear": mck['IssueDate'][:4],
"IssueID": mck['IssueID']})
logger.fdebug('mc_issue:' + str(mc_issue))
issID_to_ignore = []
issID_to_ignore.append(str(ComicID))
issID_to_write = []
@ -756,32 +789,82 @@ def forceRescan(ComicID,archive=None,module=None):
#logger.fdebug("int_iss: " + str(int_iss))
if int(fcdigit) == int_iss:
logger.fdebug(module + ' Issue match - fcdigit: ' + str(fcdigit) + ' ... int_iss: ' + str(int_iss))
logger.fdebug(module + ' [' + str(reiss['IssueID']) + '] Issue match - fcdigit: ' + str(fcdigit) + ' ... int_iss: ' + str(int_iss))
if '-' in temploc and temploc.find(reiss['Issue_Number']) > temploc.find('-'):
logger.fdebug(module + ' I have detected a possible Title in the filename')
logger.fdebug(module + ' the issue # has occured after the -, so I assume that it is part of the Title')
break
multiplechk = False
for d in issuedupechk:
if int(d['fcdigit']) == int(fcdigit):
logger.fdebug(module + ' Duplicate issue detected - not counting this: ' + str(tmpfc['ComicFilename']))
logger.fdebug(module + ' is a duplicate of ' + d['filename'])
logger.fdebug('fcdigit:' + str(fcdigit) + ' === dupedigit: ' + str(d['fcdigit']))
issuedupe = "yes"
break
if len(mc_issue) > 1:
logger.fdebug('[Initial Check] multiple check issue detected - more than one issue with identical numbering for series.')
for mi in mc_issue:
if (mi['IssueYear'] in tmpfc['ComicFilename']):# and (int(d['issueyear']) == int(mi['IssueYear'])) and (d['fcdigit'] == mi['Int_IssueNumber']):
logger.fdebug('[Initial Check] detected : ' + str(mi['IssueYear']) + ' within filename.')
multiplechk = False
issuedupe = "no"
break
else:
logger.fdebug('[Initial Check] ' + str(mi['Int_IssueNumber']) + ' - did not detect year within filename - expecting (' + str(mi['IssueYear']) + '). Assuming this is the identical numbered issue.')
multiplechk = True
if multiplechk == False: break
if multiplechk == True:
logger.fdebug(module + ' Duplicate issue detected - not counting this: ' + str(tmpfc['ComicFilename']))
#logger.fdebug(module + ' is a duplicate of ' + d['filename'])
#logger.fdebug('fcdigit:' + str(fcdigit) + ' === dupedigit: ' + str(d['fcdigit']))
issuedupe = "yes"
break
if issuedupe == "no":
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + str(reiss['Issue_Number']) + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + str(isslocation))
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': int(fcdigit),
'filename': tmpfc['ComicFilename']})
break
foundchk = False
#make sure we are adding the correct issue.
for d in issuedupechk:
if int(d['fcdigit']) == int(fcdigit):
if len(mc_issue) > 1 and multiplechk == False:
#if len(mc_issue) > 1 and multiplechk != False and any d['Int_IssueNumber'] == int_iss for d in mc_issue):
for mi in mc_issue:
logger.fdebug('[DupeCheck]' + str(mi['IssueID']) + ' comparing to ' + str(d['issueid']))
if mi['IssueID'] != d['issueid'] and mi['IssueID'] == reiss['IssueID']:
logger.fdebug('Most likely, I should not be marking this as a dupe.')
if (mi['IssueYear'] in tmpfc['ComicFilename']) and (d['fcdigit'] == mi['Int_IssueNumber']):
logger.fdebug('[DupeCheck] detected : ' + str(mi['IssueYear']) + ' within filename. This is an issue that happens to have duplicate numbering and is acceptable')
foundchk = True
break
else:
logger.fdebug('[DupeCheck] ' + str(mi['Int_IssueNumber']) + ': did not detect year (' + str(mi['IssueYear']) + ').')
foundchk = False
else:
foundchk = True
if foundchk == True:
logger.fdebug('[DupeCheck] This is not a duplicate. foundchk is : ' + str(foundchk))
letitgo = True
break
if foundchk == False:
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + reiss['Issue_Number'] + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + str(isslocation))
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': int(fcdigit),
'filename': tmpfc['ComicFilename'],
'issueyear': issyear,
'issueid': reiss['IssueID']})
break
#else:
# if the issue # matches, but there is no year present - still match.
# determine a way to match on year if present, or no year (currently).
@ -865,21 +948,26 @@ def forceRescan(ComicID,archive=None,module=None):
#even if we couldn't find the physical issue, check the status.
#-- if annuals aren't enabled, this will bugger out.
writeit = True
if mylar.ANNUALS_ON:
if 'annual' in temploc.lower():
if reann is None:
logger.fdebug(module + ' Annual present in location, but series does not have any annuals attached to it - Ignoring')
try:
if mylar.ANNUALS_ON:
if 'annual' in temploc.lower():
if reann is None:
logger.fdebug(module + ' Annual present in location, but series does not have any annuals attached to it - Ignoring')
writeit = False
else:
iss_id = reann['IssueID']
else:
iss_id = reiss['IssueID']
else:
if 'annual' in temploc.lower():
logger.fdebug(module + ' Annual support not enabled, but annual issue present within directory. Ignoring annual.')
writeit = False
else:
iss_id = reann['IssueID']
else:
iss_id = reiss['IssueID']
else:
if 'annual' in temploc.lower():
logger.fdebug(module + ' Annual support not enabled, but annual issue present within directory. Ignoring annual.')
writeit = False
else:
iss_id = reiss['IssueID']
iss_id = reiss['IssueID']
except:
logger.warn(module + ' An error occured trying to get the relevant issue data. This is probably due to the series not having proper issue data.')
logger.warn(module + ' you should either Refresh the series, and/or submit an issue on github in regards to the series and the error.')
return
if writeit == True:
logger.fdebug(module + ' issueID to write to db:' + str(iss_id))

View File

@ -423,7 +423,20 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % gcomicid)
GCDaddComic.exposed = True
def post_process(self, nzb_name, nzb_folder, failed=False):
def post_process(self, nzb_name, nzb_folder, failed=False, apc_version=None, comicrn_version=None):
if comicrn_version is None and apc_version is None:
logger.warn('ComicRN should be v' + str(mylar.STATIC_COMICRN_VERSION) + ' and autoProcessComics.py should be v' + str(mylar.STATIC_APC_VERSION) + ', but they are not and are out of date. Post-Processing may or may not work.')
elif comicrn_version is None or comicrn_version != mylar.STATIC_COMICRN_VERSION:
if comicrn_version == 'None':
comicrn_version = "0"
logger.warn('Your ComicRN.py script should be v' + str(mylar.STATIC_COMICRN_VERSION) + ', but is v' + str(comicrn_version) + ' and is out of date. Things may still work - but you are taking your chances.')
elif apc_version is None or apc_version != mylar.STATIC_APC_VERSION:
if apc_version == 'None':
apc_version = "0"
logger.warn('Your autoProcessComics.py script should be v' + str(mylar.STATIC_APC_VERSION) + ', but is v' + str(apc_version) + ' and is out of date. Odds are something is gonna fail - you should update it.')
else:
logger.info('ComicRN.py version: ' + str(comicrn_version) + ' -- autoProcessComics.py version: ' + str(apc_version))
import Queue
logger.info('Starting postprocessing for : ' + nzb_name)
if failed == '0':
@ -509,6 +522,8 @@ class WebInterface(object):
logger.info(u"Deleting all traces of Comic: " + ComicName)
myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
if mylar.ANNUALS_ON:
myDB.action('DELETE from annuals WHERE ComicID=?', [ComicID])
myDB.action('DELETE from upcoming WHERE ComicID=?', [ComicID])
helpers.ComicSort(sequence='update')
raise cherrypy.HTTPRedirect("home")
@ -1004,10 +1019,16 @@ class WebInterface(object):
logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...")
myDB.upsert("issues", newStatus, controlValueDict)
else:
if manualsearch:
logger.info('Initiating manual search for ' + ComicName + ' Annual: ' + ComicIssue)
annual_name = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? and IssueID=?", [ComicID,IssueID]).fetchone()
if annual_name is None:
logger.fdebug('Unable to locate.')
else:
logger.info(u"Marking " + ComicName + " Annual: " + ComicIssue + " as wanted...")
ComicName = annual_name['ReleaseComicName']
if manualsearch:
logger.info('Initiating manual search for ' + ComicName + ' : ' + ComicIssue)
else:
logger.info(u"Marking " + ComicName + " : " + ComicIssue + " as wanted...")
myDB.upsert("annuals", newStatus, controlValueDict)
#---
#this should be on it's own somewhere
@ -2542,7 +2563,7 @@ class WebInterface(object):
#determine a best-guess to # of issues in series
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = int(minISSUE) - int(startISSUE)
numissues = helpers.int_num(minISSUE) - helpers.int_num(startISSUE)
#normally minissue would work if the issue #'s started at #1.
implog = implog + "the years involved are : " + str(yearRANGE) + "\n"
implog = implog + "highest issue # is : " + str(minISSUE) + "\n"
@ -2567,9 +2588,9 @@ class WebInterface(object):
mode='series'
if yearRANGE is None:
sresults, explicit = mb.findComic(ogcname, mode, issue=numissues, explicit='all') #ComicName, mode, issue=numissues)
sresults, explicit = mb.findComic(displaycomic, mode, issue=numissues, explicit='all') #ogcname, mode, issue=numissues, explicit='all') #ComicName, mode, issue=numissues)
else:
sresults, explicit = mb.findComic(ogcname, mode, issue=numissues, limityear=yearRANGE, explicit='all') #ComicName, mode, issue=numissues, limityear=yearRANGE)
sresults, explicit = mb.findComic(displaycomic, mode, issue=numissues, limityear=yearRANGE, explicit='all') #ogcname, mode, issue=numissues, limityear=yearRANGE, explicit='all') #ComicName, mode, issue=numissues, limityear=yearRANGE)
type='comic'
if len(sresults) == 1:

View File

@ -460,7 +460,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepul
w = 1
else:
#let's read in the comic.watchlist from the db here
cur.execute("SELECT ComicID, ComicName_Filesafe, ComicYear, ComicPublisher, ComicPublished, LatestDate, ForceContinuing, AlternateSearch, LatestIssue from comics")
cur.execute("SELECT ComicID, ComicName_Filesafe, ComicYear, ComicPublisher, ComicPublished, LatestDate, ForceContinuing, AlternateSearch, LatestIssue from comics WHERE Status = 'Active'")
while True:
watchd = cur.fetchone()
#print ("watchd: " + str(watchd))

View File

@ -3,6 +3,8 @@ import urllib
import os.path
import ConfigParser
apc_version = "1.0"
class AuthURLOpener(urllib.FancyURLopener):
def __init__(self, user, pw):
self.username = user
@ -21,8 +23,11 @@ class AuthURLOpener(urllib.FancyURLopener):
self.numTries = 0
return urllib.FancyURLopener.open(self, url)
def processEpisode(dirName, nzbName=None):
print "Your ComicRN.py script is outdated. I'll force this through, but Failed Download Handling and possible enhancements/fixes will not work and could cause errors."
return processIssue(dirName, nzbName)
def processIssue(dirName, nzbName=None, failed=False):
def processIssue(dirName, nzbName=None, failed=False, comicrn_version=None):
config = ConfigParser.ConfigParser()
configFilename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessComics.cfg")
@ -61,6 +66,9 @@ def processIssue(dirName, nzbName=None, failed=False):
params['nzb_name'] = nzbName
params['failed'] = failed
params['apc_version'] = apc_version
params['comicrn_version'] = comicrn_version
myOpener = AuthURLOpener(username, password)

View File

@ -26,10 +26,24 @@ if os.environ.has_key('NZBOP_SCRIPTDIR') and not os.environ['NZBOP_VERSION'][0:5
POSTPROCESS_NONE=95
#Start script
if os.environ['NZBPP_TOTALSTATUS'] == 'FAILURE' or os.environ['NZBPP_TOTALSTATUS'] == 'WARNING':
failit = 1
if os.environ['NZBOP_VERSION'][0:5] > '13.0':
if os.environ['NZBPP_TOTALSTATUS'] == 'FAILURE' or os.environ['NZBPP_TOTALSTATUS'] == 'WARNING':
failit = 1
else:
failit = 0
else:
failit = 0
#NZBPP_TOTALSTATUS only exists in 13.0 - so if it's not that but greater than 11.0+, we need to use NZBPP_STATUS
#assume failit = 1 (failed) by default
failit = 1
if os.environ['NZBPP_PARSTATUS'] == '1' or os.environ['NZBPP_UNPACKSTATUS'] == '1':
print 'Download of "%s" has failed.' % (os.environ['NZBPP_NZBNAME'])
elif os.environ['NZBPP_UNPACKSTATUS'] in ('3', '4'):
print 'Download of "%s" has failed.' % (os.environ['NZBPP_NZBNAME'])
elif os.environ['NZBPP_PARSTATUS'] == '4':
print 'Download of "%s" requires par-repair.' % (os.environ['NZBPP_NZBNAME'])
else:
print 'Download of "%s" has successfully completed.' % (os.environ['NZBPP_NZBNAME'])
failit = 0
result = autoProcessComics.processIssue(os.environ['NZBPP_DIRECTORY'], os.environ['NZBPP_NZBNAME'], failed=failit)

View File

@ -12,11 +12,14 @@
import sys
import autoProcessComics
comicrn_version = "1.0"
#the code.
if len(sys.argv) < 2:
print "No folder supplied - is this being called from SABnzbd or NZBGet?"
sys.exit()
elif len(sys.argv) >= 3:
sys.exit(autoProcessComics.processIssue(sys.argv[1], sys.argv[3], sys.argv[7]))
sys.exit(autoProcessComics.processIssue(sys.argv[1], sys.argv[3], sys.argv[7], comicrn_version=comicrn_version))
else:
sys.exit(autoProcessComics.processIssue(sys.argv[1]))
sys.exit(autoProcessComics.processIssue(sys.argv[1], comicrn_version=comicrn_version))