IMP:(#398) Manual PostProcess inclusion, FIX:(#434) Search related issues when series title has 'the' and a numeric, FIX:(#432) Age of Ultron (AI), FIX:(#426) -1 Issue support, FIX:(#419) Filecheck support for Title inclusion in filenames, FIX:(#417) Status displayed wrongly on main, IMP:(#417) Indipendent publisher now can set time btwn issues for Continuing/Ended, FIX:(#414) Unicode problem, IMP:(#409) Annual Inclusion (config.ini/Annuals=1), IMP: ReadingLists inclusion (../readlist.html) & more

This commit is contained in:
evilhero 2013-07-01 01:19:15 -04:00
parent 62eb6c438c
commit 36f7863ac6
19 changed files with 1704 additions and 417 deletions

5
comictagger.py Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env python
from lib.comictaggerlib.main import ctmain
if __name__ == '__main__':
ctmain()

View File

@ -282,7 +282,7 @@ td.details {
}
.paging_full_numbers span.paginate_button {
background-color: #ddd;
background-color: #fff;
}
.paging_full_numbers span.paginate_button:hover {

View File

@ -161,21 +161,42 @@ table#artist_table td#have { vertical-align: middle; }
div#paddingheader { padding-top: 48px; font-size: 24px; font-weight: bold; text-align: center; }
div#nopaddingheader { font-size: 24px; font-weight: bold; text-align: center; }
table#issue_table { background-color: white; }
table#issue_table { background-color: grey; width: 100%; padding: 10px; }
table#issue_table th#select { vertical-align: middle; text-align: left; min-width: 10px; }
table#issue_table th#int_issuenumber { text-align: left; min-width: 0px }
table#issue_table th#issuenumber { text-align: left; min-width: 20px; }
table#issue_table th#issuename { text-align: center; min-width: 200px; }
table#issue_table th#reldate { text-align: center; min-width: 20px; }
table#issue_table th#status { text-align: center; min-width: 20px; }
table#issue_table th#type { text-align: center; min-width: 20px; }
table#issue_table th#reldate { text-align: center; min-width: 10px; }
table#issue_table th#status { text-align: center; min-width: 10px; }
table#issue_table th#options { text-align: center; min-width: 20px; }
table#issue_table td#select { vertical-align: middle; text-align: left; }
table#issue_table td#int_issuenumber { vertical-align: middle; text-align: left; }
table#issue_table td#issuenumber { vertical-align: middle; text-align: left; }
table#issue_table td#issuename { vertical-align: middle; text-align: center; font-size: 9px; }
table#issue_table td#reldate { vertical-align: middle; text-align: center; }
table#issue_table td#status { vertical-align: middle; text-align: center; font-size: 13px; }
table#issue_table td#type { vertical-align: middle; text-align: center; }
table#issue_table td#options { vertical-align: middle; text-align: center; }
table#annual_table { background-color: white; width: 100%; padding: 10px; }
table#annual_table th#select { vertical-align: middle; text-align: left; min-width: 10px; }
table#annual_table th#int_issuenumber { text-align: left; min-width: 0px }
table#annual_table th#issuenumber { text-align: left; min-width: 20px; }
table#annual_table th#issuename { text-align: center; min-width: 200px; }
table#annual_table th#reldate { text-align: center; min-width: 10px; }
table#annual_table th#status { text-align: center; min-width: 10px; }
table#annual_table th#options { text-align: center; min-width: 20px; }
table#annual_table td#select { vertical-align: middle; text-align: left; }
table#annual_table td#int_issuenumber { vertical-align: middle; text-align: left; }
table#annual_table td#issuenumber { vertical-align: middle; text-align: left; }
table#annual_table td#issuename { vertical-align: middle; text-align: center; font-size: 9px; }
table#annual_table td#reldate { vertical-align: middle; text-align: center; }
table#annual_table td#status { vertical-align: middle; text-align: center; font-size: 13px; }
table#annual_table td#options { vertical-align: middle; text-align: center; }
img.albumArt { float: left; padding-right: 5px; }
div#albumheader { padding-top: 48px; height: 200px; }

View File

@ -162,7 +162,7 @@
</div>
<legend>Comic Location</legend>
<div>
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Automatic folder creation happens BENEATH this path</small>
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Automatic folder creation happens BENEATH this path</small>
</div>
<div class="row">
<label>Comic Location Path</label>
@ -467,6 +467,21 @@
<small>enter in the absolute path to the script</small>
</div>
</fieldset>
<fieldset>
<legend>Metadata Tagging</legend>
<div class="row checkbox left clearfix">
<input id="enable_meta" type="checkbox" onclick="initConfigCheckbox($this));" name="enable_meta" value="1" ${config['enable_meta']} /><label>Enable Metadata Tagging</label>
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>You need to have ComicTagger and configparser installed</small>
</div>
<div class="config">
<div class="row">
<label>ComicTagger Path</label>
<input type="text" name="cmtagger_path" value="${config['cmtagger_path']}" size="30" />
<small>If left blank, will assume it's in root of mylar</small>
</div>
</div>
</fieldset>
</td>
</tr>
</table>
@ -522,7 +537,7 @@
</select>
</div>
<div class="row checkbox">
<input id="zero_level" type="checkbox" onclick="initConfigCheckbox($(this));" name="zero_level" value="1" ${config['zero_level']} /><label>Zero level Suppression</label>
<input id="zero_level" type="checkbox" onclick="initConfigCheckbox($(this));" name="zero_level" value="1" ${config['zero_level']} /><label>Zero level Suppression</label>
</div>
<div class="row">
<label>Format</label>
@ -802,6 +817,7 @@
initConfigCheckbox("#replace_spaces");
initConfigCheckbox("#use_minsize");
initConfigCheckbox("#use_maxsize");
initConfigCheckbox("#enable_meta");
initConfigCheckbox("#zero_level");
}
$(document).ready(function() {

View File

@ -80,7 +80,7 @@
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if recentchk < 45:
if recentchk < 55:
recentstatus = 'Continuing'
else:
recentstatus = 'Ended'

View File

@ -0,0 +1,171 @@
<%inherit file="base.html"/>
<%!
import os
import mylar
from mylar import helpers, db
%>
<%def name="headerIncludes()">
<div id="subhead_container">
<div id="subhead_menu">
<a id="menu_link_delete" href="#">Sync</a>
<a id="menu_link_delete" href="#" onclick="doAjaxCall('removefromreadlist?AllRead=1',$(this),'table')" data-success="All Read Records Removed">Remove Read</a>
<a id="menu_link_delete" href="#">Force New Check</a>
<a id="menu_link_refresh" href="#">Clear File Cache</a>
<a id="menu_link_refresh" href="#">Import Story Arc File</a>
</div>
</div>
</%def>
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix"><img src="interfaces/default/images/ReadingList-icon.png" height="26" width="26" alt="Reading List"/>Reading List Management</h1>
</div>
<div id="tabs">
<ul>
<li><a href="#tabs-1">Issue Reading List</a></li>
<li><a href="#tabs-2">Story Arcs</a></li>
</ul>
<div id="tabs-1">
<table class="configtable" id="read_detail">
<fieldset>
<center><legend>Individual Reading Lists</legend>
<strong>(Watchlist)</strong>
<p>Your watchlisted series' that you have issues marked as wanting to add
to the Reading List go here.<br/></p></center>
</fieldset>
<thead>
<tr>
<th id="comicname">ComicName</th>
<th id="issue">Issue</th>
<th id="issueyear">Issue Date</th>
<th id="status">Status</th>
<th id="options">Options</th>
</tr>
</thead>
<tbody>
%for issue in issuelist:
<tr>
<td id="comicname"><a href="artistPage?ComicID=${issue['ComicID']}">${issue['ComicName']} (${issue['SeriesYear']})</td>
<td id="issue">${issue['Issue_Number']}</td>
<td id="issueyear">${issue['IssueDate']}</td>
<td id="status">${issue['Status']}</td>
<td id="options">
%if issue['inCacheDIR']:
<%
try:
with open(os.path.join(mylar.CACHE_DIR,issue['Location'])) as f:
linky = issue['Location']
except IOError as e:
linky = None
%>
%if linky:
<a href="cache/${linky}"><img src="interfaces/default/images/download_icon.png" height="25" width="25" title="Download the Issue" /></a>
%endif
%else:
<a onclick="doAjaxCall('downloadLocal?IssueID=${issue['IssueID']}', $(this), 'table')" ><img src="interfaces/default/images/copy_icon.png" height="25" width="25" title="Copy issue to local cache (ready for download)" /></a>
%endif
<a onclick="doAjaxCall('removefromreadlist?IssueID=${issue['IssueID']}',$(this),'table')" data-success="Sucessfully removed ${issue['ComicName']} #${issue['Issue_Number']} from Reading List"><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" title="Remove from Reading List" /></a>
<a onclick="doAjaxCall('markasRead?IssueID=${issue['IssueID']}', $(this),'table')" data-success="Marked ${issue['ComicName']} ${issue['Issue_Number']} as Read."><img src="interfaces/default/images/wanted_icon.png" height="25" width="25" title="Mark as Read" /></a>
</td>
</tr>
%endfor
</tbody>
</table>
</div>
<div id="tabs-2">
<table class="configtable">
<tr>
<form action="searchit" method="get">
<input type="hidden" name="type" value="storyarc">
<input type="text" value="" placeholder="Search" onfocus="if(this.value==this.defaultValue) this.value='';" name="name" />
<span class="mini-icon"></span>
<input type="submit" value="Search"/>
</form>
<tr>
<form action="importReadlist" method="get">
<div class="row" style="float:right">
<label for="">File to import</label>
<input type="text" value="Enter a filename to import" onfocus="if
(this.value==this.defaultValue) this.value='';" name="filename" size="70" />
<input type="submit" value="Import">
</div>
</form>
</tr>
<tr>
<form action="readOptions" method="get">
<div class="row">
<label>Options</label><br/>
<input type="checkbox" /><label>Arcs in Grabbag Directory?</label><br/>
<input type="checkbox" name="storyarcdir" value="1" ${readConfig['storyarcdir']} /><label>Arcs in StoryArc Directory (off of ComicLocationRoot)?</label><br/>
<input type="checkbox" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading # to filename</label>
</div>
</form>
</tr>
</tr>
</table>
<table class="configtable" id="artist_table">
<thead>
<tr>
<th id="storyarc">Story Arc</th>
<th id="issue">Issues</th>
<th id="have">Status</th>
<th id="action">Options</th>
</tr>
</thead>
<tbody>
%for item in readlist:
<%
myDB = db.DBConnection()
totalcnt = myDB.action("SELECT COUNT(*) as count FROM readinglist WHERE StoryArcID=?", [item['StoryArcID']]).fetchall()
totalarc = totalcnt[0][0]
havecnt = myDB.action("SELECT COUNT(*) as count FROM readinglist WHERE StoryArcID=? AND Status='Downloaded' or Status='Archived'", [item['StoryArcID']]).fetchall()
havearc = havecnt[0][0]
if not havearc:
havearc = 0
try:
percent = (havearc *100.0)/totalarc
if percent > 100:
percent = 100
except (ZeroDivisionError, TypeError):
percent = 0
totalarc = '?'
%>
<tr>
<td id="storyarc"><a href="detailReadlist?StoryArcID=${item['StoryArcID']}&StoryArcName=${item['StoryArc']}">${item['StoryArc']}</a></td>
<td id="issue">${item['TotalIssues']}</td>
<td id="have"><span title="${percent}"></span><div class="progress-container"><div style="background-color:#a3e532; height:14px; width:${percent}%"><div class="havetracks">${havearc}/${totalarc}</div></div></div></td>
<td id="action">
<a title="Remove from Reading List" onclick="doAjaxCall('removefromreadlist?StoryArcID=${item['StoryArcID']}',$(this),'table')" data-success="Sucessfully removed ${item['StoryArc']} from list."><img src="interfaces/default/images/skipped_icon.png" height="25" width="25" /></a>
</td>
</tr>
%endfor
</tbody>
</table>
</div>
</div>
</%def>
<%def name="headIncludes()">
<link rel="stylesheet" href="interfaces/default/css/data_table.css">
</%def>
<%def name="javascriptIncludes()">
<script src="js/libs/jquery.dataTables.min.js"></script>
<script>
function initThisPage() {
jQuery( "#tabs" ).tabs();
}
$(document).ready(function() {
initThisPage();
initActions();
});
$(window).load(function(){
initFancybox();
});
</script>
</%def>

View File

@ -0,0 +1,124 @@
<%inherit file="base.html"/>
<%!
import os
import mylar
from mylar import helpers
%>
<%def name="headerIncludes()">
<div id="subhead_container">
<div id="subhead_menu">
<a id="menu_link_delete" href="#">Sync</a>
<a id="menu_link_delete" href="#">Remove Read</a>
<a id="menu_link_delete" href="#">Clear File Cache</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ReadGetWanted?StoryArcID=${storyarcid}',$(this),'table')" data-success="Searching for Missing StoryArc Issues">Search for Missing</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ArcWatchlist?StoryArcID=${storyarcid}',$(this),'table')" data-success="Searching for matches on Watchlist">Search for Watchlist matches</a>
</div>
</div>
</%def>
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix"><a href="readlist"><img src="interfaces/default/images/ReadingList-icon.png" height="26" width="26" alt="Reading List"/>Reading List Management</a></h1>
</div>
<center><h1>${storyarcname}</h1></center>
<table class="configtable">
<tr>
<form action="readOptions" method="get">
<fieldset>
<div class="row">
<label>Options</label><br/>
<input type="checkbox" name="storyarcdir" value="1" ${readConfig['storyarcdir']} /><label>Should I create a Story-Arc Directory?</label><br/>
<small>Arcs in StoryArc Directory: <% sdir = os.path.join(mylar.DESTINATION_DIR, "StoryArcs") %>${sdir}</small><br/>
<input type="checkbox" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" name="read2filename" value="1" ${readConfig['read2filename']} /><label>Append Reading# to filename</label><br/>
</div>
</fieldset>
</div>
<input type="submit" value="Update"/>
</div>
</form>
</tr>
</table>
<table class="display" id="read_detail">
<thead>
<tr>
<th id="readingorder"></th>
<th id="storyarc">Story Arc</th>
<th id="comicname">ComicName</th>
<th id="issue">Issue</th>
<th id="issueyear">Pub Year</th>
<th id="status">Status</th>
<th id="action">Options</th>
</tr>
</thead>
<tbody>
%for item in readlist:
<%
if item['Status'] == 'Downloaded':
grade = 'A'
elif item['Status'] == 'Read':
grade = 'C'
elif item['Status'] == 'Not Watched':
grade = 'X'
elif item['Status'] == 'Wanted':
grade = 'Y'
else:
grade = 'U'
%>
<tr id="li_${item['ReadingOrder']}" class="grade${grade}">
<td id="readingorder">${item['ReadingOrder']}</td>
<td id="storyarc">${item['StoryArc']}</td>
<td id="comicname">${item['ComicName']} (${item['SeriesYear']})</td>
<td id="issue">${item['IssueNumber']}</td>
<td id="issueyear">${item['IssueYear']}</td>
<td id="status">${item['Status']}</td>
<td id="action">
%if item['Status'] is None or item['Status'] == None:
<a href="queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${item['IssueYear']}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}"><span class="ui-icon ui-icon-plus"></span>Grab it</a>
%endif
</td>
</tr>
%endfor
</tbody>
</table>
</%def>
<%def name="headIncludes()">
<link rel="stylesheet" href="interfaces/default/css/data_table.css">
</%def>
<%def name="javascriptIncludes()">
<script src="js/libs/jquery.dataTables.min.js"></script>
<script src="js/libs/jquery.dataTables.rowReordering.js"></script>
<script>
function initThisPage() {
$('#read_detail').dataTable({
"bDestroy": true,
"oLanguage": {
"sLengthMenu":"Show _MENU_ items per page",
"sEmptyTable": "<em>No History to Display</em>",
"sInfo":"Showing _START_ to _END_ of _TOTAL_ items",
"sInfoEmpty":"Showing 0 to 0 of 0 items",
"sInfoFiltered":"(filtered from _MAX_ total items)"},
"iDisplayLength": 25,
"sPaginationType": "full_numbers",
"aaSorting": []
}).rowReordering({
sURL:"/reOrder",
sRequestType: "GET"
});
resetFilters("item");
}
$(document).ready(function() {
initThisPage();
initActions();
});
</script>
</%def>

View File

@ -27,6 +27,7 @@ import urllib2
import sqlite3
from xml.dom.minidom import parseString
from mylar import logger, db, helpers, updater, notifiers, filechecker
class PostProcessor(object):
@ -171,146 +172,288 @@ class PostProcessor(object):
logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
myDB = db.DBConnection()
nzbname = self.nzb_name
#remove extensions from nzb_name if they somehow got through (Experimental most likely)
extensions = ('.cbr', '.cbz')
if nzbname.lower().endswith(extensions):
fd, ext = os.path.splitext(nzbname)
self._log("Removed extension from nzb: " + ext, logger.DEBUG)
nzbname = re.sub(str(ext), '', str(nzbname))
#replace spaces
nzbname = re.sub(' ', '.', str(nzbname))
nzbname = re.sub('[\,\:\?]', '', str(nzbname))
nzbname = re.sub('[\&]', 'and', str(nzbname))
logger.fdebug("After conversions, nzbname is : " + str(nzbname))
# if mylar.USE_NZBGET==1:
# nzbname=self.nzb_name
self._log("nzbname: " + str(nzbname), logger.DEBUG)
nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
if nzbiss is None:
self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
logger.fdebug("Failure - could not locate nzbfile initially.")
# if failed on spaces, change it all to decimals and try again.
nzbname = re.sub('_', '.', str(nzbname))
self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
logger.fdebug("trying again with nzbname of : " + str(nzbname))
nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
if nzbiss is None:
logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
if self.nzb_name == 'Manual Run':
print ("manual run initiated")
#Manual postprocessing on a folder.
#use the nzb_folder to determine every file
#walk the dir,
#once a series name and issue are matched,
#write the series/issue/filename to a tuple
#when all done, iterate over the tuple until completion...
comicseries = myDB.action("SELECT * FROM comics")
manual_list = []
if comicseries is None:
logger.error(u"No Series in Watchlist - aborting Manual Post Processing. Maybe you should be running Import?")
return
else:
self._log("I corrected and found the nzb as : " + str(nzbname))
logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
issueid = nzbiss['IssueID']
else:
issueid = nzbiss['IssueID']
logger.fdebug("issueid:" + str(issueid))
sarc = nzbiss['SARC']
#use issueid to get publisher, series, year, issue number
issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
if issuenzb is not None:
if helpers.is_number(issueid):
sandwich = int(issuenzb['IssueID'])
else:
#if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
#using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
if 'S' in issueid:
sandwich = issueid
elif 'G' in issueid:
sandwich = 1
if helpers.is_number(sandwich):
if sandwich < 900000:
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
pass
else:
if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
# this has no issueID, therefore it's a one-off or a manual post-proc.
# At this point, let's just drop it into the Comic Location folder and forget about it..
if 'S' in sandwich:
self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
if mylar.STORYARCDIR:
storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc)
self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG)
ccnt=0
nm=0
for cs in comicseries:
watchmatch = filechecker.listFiles(self.nzb_folder,cs['ComicName'],cs['AlternateSearch'])
if watchmatch is None:
nm+=1
pass
else:
fn = 0
fccnt = int(watchmatch['comiccount'])
while (fn < fccnt):
try:
tmpfc = watchmatch['comiclist'][fn]
except IndexError:
break
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
logger.fdebug("temploc: " + str(temploc))
ww = shlex.split(temploc)
lnw = len(ww)
wdc = 0
while (wdc < lnw):
#counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
if ".cbr" in ww[wdc].lower():
ww[wdc] = ww[wdc].replace(".cbr", "")
elif ".cbz" in ww[wdc].lower():
ww[wdc] = ww[wdc].replace(".cbz", "")
if "(c2c)" in ww[wdc].lower():
ww[wdc] = ww[wdc].replace("(c2c)", " ")
get_issue = shlex.split(str(ww[wdc]))
if ww[wdc] != " ":
ww[wdc] = get_issue[0]
if '.' in ww[wdc]:
#logger.fdebug("decimal detected...adjusting.")
try:
i = float(ww[wdc])
except ValueError, TypeError:
#not numeric
#logger.fdebug("NOT NUMERIC - new word: " + str(ww[wdc]))
ww[wdc] = ww[wdc].replace(".", "")
else:
#numeric
pass
if ww[wdc].isdigit():
if int(ww[wdc]) > 0:
if wdc+1 < len(ww) and 'au' in ww[wdc+1].lower():
if len(ww[wdc+1]) == 2:
#if the 'AU' is in 005AU vs 005 AU it will yield different results.
ww[wdc] = ww[wdc] + 'AU'
ww[wdc+1] = '93939999919190933'
logger.info("AU Detected seperate from issue - combining and continuing")
fcdigit = helpers.issuedigits(ww[wdc])
if 'annual' in self.nzb_name.lower():
logger.info("annual detected.")
annchk = "yes"
issuechk = myDB.action("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
else:
issuechk = myDB.action("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
if issuechk is None:
logger.info("No corresponding issue # found for " + str(cs['ComicID']))
else:
logger.info("Found matching issue # " + str(fcdigit) + " for ComicID: " + str(cs['ComicID']) + " / IssueID: " + str(issuechk['IssueID']))
manual_list.append({"ComicLocation": tmpfc['ComicLocation'],
"ComicID": cs['ComicID'],
"IssueID": issuechk['IssueID'],
"IssueNumber": issuechk['Issue_Number'],
"ComicName": cs['ComicName']})
ccnt+=1
print manual_list
wdc+=1
fn+=1
print("There are " + str(len(manual_list)) + " files found that match on your watchlist, " + str(nm) + " do not match anything and will be ignored.")
else:
nzbname = self.nzb_name
#remove extensions from nzb_name if they somehow got through (Experimental most likely)
extensions = ('.cbr', '.cbz')
if nzbname.lower().endswith(extensions):
fd, ext = os.path.splitext(nzbname)
self._log("Removed extension from nzb: " + ext, logger.DEBUG)
nzbname = re.sub(str(ext), '', str(nzbname))
#replace spaces
nzbname = re.sub(' ', '.', str(nzbname))
nzbname = re.sub('[\,\:\?]', '', str(nzbname))
nzbname = re.sub('[\&]', 'and', str(nzbname))
logger.fdebug("After conversions, nzbname is : " + str(nzbname))
# if mylar.USE_NZBGET==1:
# nzbname=self.nzb_name
self._log("nzbname: " + str(nzbname), logger.DEBUG)
nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
if nzbiss is None:
self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
logger.fdebug("Failure - could not locate nzbfile initially.")
# if failed on spaces, change it all to decimals and try again.
nzbname = re.sub('_', '.', str(nzbname))
self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
logger.fdebug("trying again with nzbname of : " + str(nzbname))
nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
if nzbiss is None:
logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
return
else:
self._log("I corrected and found the nzb as : " + str(nzbname))
logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
issueid = nzbiss['IssueID']
else:
issueid = nzbiss['IssueID']
logger.fdebug("issueid:" + str(issueid))
sarc = nzbiss['SARC']
#use issueid to get publisher, series, year, issue number
annchk = "no"
if 'annual' in nzbname.lower():
logger.info("annual detected.")
annchk = "yes"
issuenzb = myDB.action("SELECT * from annuals WHERE IssueID=?", [issueid]).fetchone()
else:
issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
if issuenzb is not None:
if helpers.is_number(issueid):
sandwich = int(issuenzb['IssueID'])
else:
#if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
#using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
if 'S' in issueid:
sandwich = issueid
elif 'G' in issueid or '-' in issueid:
sandwich = 1
if helpers.is_number(sandwich):
if sandwich < 900000:
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
pass
else:
if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
# this has no issueID, therefore it's a one-off or a manual post-proc.
# At this point, let's just drop it into the Comic Location folder and forget about it..
if 'S' in sandwich:
self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
if mylar.STORYARCDIR:
storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc)
self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG)
else:
self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)
else:
self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG)
logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.")
self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)
else:
self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG)
logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.")
self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)
for root, dirnames, filenames in os.walk(self.nzb_folder):
for filename in filenames:
if filename.lower().endswith(extensions):
ofilename = filename
path, ext = os.path.splitext(ofilename)
for root, dirnames, filenames in os.walk(self.nzb_folder):
for filename in filenames:
if filename.lower().endswith(extensions):
ofilename = filename
path, ext = os.path.splitext(ofilename)
if 'S' in sandwich:
if mylar.STORYARCDIR:
grdst = storyarcd
if 'S' in sandwich:
if mylar.STORYARCDIR:
grdst = storyarcd
else:
grdst = mylar.DESTINATION_DIR
else:
grdst = mylar.DESTINATION_DIR
else:
if mylar.GRABBAG_DIR:
grdst = mylar.GRABBAG_DIR
else:
grdst = mylar.DESTINATION_DIR
filechecker.validateAndCreateDirectory(grdst, True)
if mylar.GRABBAG_DIR:
grdst = mylar.GRABBAG_DIR
else:
grdst = mylar.DESTINATION_DIR
filechecker.validateAndCreateDirectory(grdst, True)
grab_dst = os.path.join(grdst, ofilename)
self._log("Destination Path : " + grab_dst, logger.DEBUG)
logger.info("Destination Path : " + grab_dst)
grab_src = os.path.join(self.nzb_folder, ofilename)
self._log("Source Path : " + grab_src, logger.DEBUG)
logger.info("Source Path : " + grab_src)
if 'S' in sandwich:
#if from a StoryArc, check to see if we're appending the ReadingOrder to the filename
if mylar.READ2FILENAME:
issuearcid = re.sub('S', '', issueid)
arcdata = myDB.action("SELECT * FROM readinglist WHERE IssueARCID=?",[issuearcid]).fetchone()
if int(arcdata['ReadingOrder']) < 10: readord = "00" + str(arcdata['ReadingOrder'])
elif int(arcdata['ReadingOrder']) > 10 and int(arcdata['ReadingOrder']) < 99: readord = "0" + str(arcdata['ReadingOrder'])
else: readord = str(arcdata['ReadingOrder'])
dfilename = str(readord) + "-" + ofilename
else:
dfilename = ofilename
grab_dst = os.path.join(grdst, dfilename)
else:
grab_dst = os.path.join(grdst, ofilename)
logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst))
self._log("Destination Path : " + grab_dst, logger.DEBUG)
logger.info("Destination Path : " + grab_dst)
grab_src = os.path.join(self.nzb_folder, ofilename)
self._log("Source Path : " + grab_src, logger.DEBUG)
logger.info("Source Path : " + grab_src)
try:
shutil.move(grab_src, grab_dst)
except (OSError, IOError):
self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
logger.debug("Failed to move directory - check directories and manually re-run.")
return
#tidyup old path
try:
shutil.rmtree(self.nzb_folder)
except (OSError, IOError):
self._log("Failed to remove temporary directory.", logger.DEBUG)
logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
return
logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst))
logger.debug("Removed temporary directory : " + str(self.nzb_folder))
self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG)
#delete entry from nzblog table
myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
try:
shutil.move(grab_src, grab_dst)
except (OSError, IOError):
self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
logger.debug("Failed to move directory - check directories and manually re-run.")
return
#tidyup old path
try:
shutil.rmtree(self.nzb_folder)
except (OSError, IOError):
self._log("Failed to remove temporary directory.", logger.DEBUG)
logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
return
if 'S' in issueid:
issuearcid = re.sub('S', '', issueid)
logger.info("IssueArcID is : " + str(issuearcid))
ctrlVal = {"IssueArcID": issuearcid}
newVal = {"Status": "Downloaded",
"Location": grab_dst }
myDB.upsert("readinglist",newVal,ctrlVal)
logger.info("updated status to Downloaded")
return self.log
logger.debug("Removed temporary directory : " + str(self.nzb_folder))
self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG)
#delete entry from nzblog table
myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
comicid = issuenzb['ComicID']
issuenumOG = issuenzb['Issue_Number']
if 'S' in issueid:
issuearcid = re.sub('S', '', issueid)
logger.info("IssueArcID is : " + str(issuearcid))
ctrlVal = {"IssueArcID": issuearcid}
newVal = {"Status": "Downloaded",
"Location": grab_dst }
myDB.upsert("readinglist",newVal,ctrlVal)
logger.info("updated status to Downloaded")
return self.log
comicid = issuenzb['ComicID']
issuenumOG = issuenzb['Issue_Number']
if self.nzb_name == 'Manual Run':
#loop through the hits here.
if len(manual_list) == '0':
logger.info("No hits ... breakout.")
return
for ml in manual_list:
comicid = ml['ComicID']
issueid = ml['IssueID']
issuenumOG = ml['IssueNumber']
self.Process_next(comicid,issueid,issuenumOG,ml)
else:
return self.Process_next(comicid,issueid,issuenumOG)
def Process_next(self,comicid,issueid,issuenumOG,ml=None):
annchk = "no"
extensions = ('.cbr', '.cbz')
myDB = db.DBConnection()
comicnzb = myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
if issuenzb is None:
issuenzb = myDB.action("SELECT * from annuals WHERE issueid=?", [issueid]).fetchone()
annchk = "yes"
#issueno = str(issuenum).split('.')[0]
#new CV API - removed all decimals...here we go AGAIN!
issuenum = issuenumOG
issuenum = issuenzb['Issue_Number']
issue_except = 'None'
if 'au' in issuenum.lower():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = ' AU'
elif 'ai' in issuenum.lower():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = ' AI'
if '.' in issuenum:
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
@ -388,11 +531,14 @@ class PostProcessor(object):
prettycomiss = str(issueno)
self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss), logger.DEBUG)
if annchk == "yes":
prettycomiss = "Annual " + str(prettycomiss)
logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
issueyear = issuenzb['IssueDate'][:4]
self._log("Issue Year: " + str(issueyear), logger.DEBUG)
logger.fdebug("Issue Year : " + str(issueyear))
comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
# comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
publisher = comicnzb['ComicPublisher']
self._log("Publisher: " + publisher, logger.DEBUG)
logger.fdebug("Publisher: " + str(publisher))
@ -421,6 +567,25 @@ class PostProcessor(object):
logger.fdebug("new format is now: " + str(chunk_file_format))
else:
chunk_file_format = mylar.FILE_FORMAT
ofilename = None
#tag the meta.
if mylar.ENABLE_META:
self._log("Metatagging enabled - proceeding...")
logger.fdebug("Metatagging enabled - proceeding...")
import cmtagmylar
if ml is None:
pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid)
else:
pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation'])
if pcheck == "fail":
self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
logger.fdebug("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
else:
otofilename = pcheck
self._log("Sucessfully wrote metadata to .cbz - Continuing..")
logger.fdebug("Sucessfully wrote metadata to .cbz (" + otofilename + ") - Continuing..")
#Run Pre-script
if mylar.ENABLE_PRE_SCRIPTS:
@ -455,13 +620,22 @@ class PostProcessor(object):
'$VolumeN': comversion
}
ofilename = None
for root, dirnames, filenames in os.walk(self.nzb_folder):
for filename in filenames:
if filename.lower().endswith(extensions):
ofilename = filename
path, ext = os.path.splitext(ofilename)
#if it's a Manual Run, use the ml['ComicLocation'] for the exact filename.
if ml is None:
for root, dirnames, filenames in os.walk(self.nzb_folder):
for filename in filenames:
if filename.lower().endswith(extensions):
ofilename = filename
path, ext = os.path.splitext(ofilename)
else:
print "otofilename:" + str(otofilename)
odir, ofilename = os.path.split(otofilename)
print "ofilename: " + str(ofilename)
path, ext = os.path.splitext(ofilename)
print "path: " + str(path)
print "ext:" + str(ext)
if ofilename is None:
logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.")
@ -501,27 +675,54 @@ class PostProcessor(object):
logger.fdebug("Source: " + str(src))
logger.fdebug("Destination: " + str(dst))
os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
src = os.path.join(self.nzb_folder, str(nfilename + ext))
try:
shutil.move(src, dst)
except (OSError, IOError):
self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
self._log("Post-Processing ABORTED.", logger.DEBUG)
return
#tidyup old path
try:
shutil.rmtree(self.nzb_folder)
except (OSError, IOError):
self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
self._log("Post-Processing ABORTED.", logger.DEBUG)
return
if ml is None:
#non-manual run moving/deleting...
os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
src = os.path.join(self.nzb_folder, str(nfilename + ext))
try:
shutil.move(src, dst)
except (OSError, IOError):
self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
self._log("Post-Processing ABORTED.", logger.DEBUG)
return
#tidyup old path
try:
shutil.rmtree(self.nzb_folder)
except (OSError, IOError):
self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
self._log("Post-Processing ABORTED.", logger.DEBUG)
return
self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
else:
#Manual Run, this is the portion.
logger.fdebug("Renaming " + os.path.join(self.nzb_folder, str(ofilename)) + " ..to.. " + os.path.join(self.nzb_folder,str(nfilename + ext)))
os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
src = os.path.join(self.nzb_folder, str(nfilename + ext))
logger.fdebug("Moving " + src + " ... to ... " + dst)
try:
shutil.move(src, dst)
except (OSError, IOError):
self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
self._log("Post-Processing ABORTED.", logger.DEBUG)
return
#tidyup old path
try:
os.remove(os.path.join(self.nzb_folder, str(ofilename)))
logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename)))
except (OSError, IOError):
self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
self._log("Post-Processing ABORTED.", logger.DEBUG)
return
self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
#delete entry from nzblog table
myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
#update snatched table to change status to Downloaded
updater.foundsearch(comicid, issueid, down='True')
if annchk == "no":
updater.foundsearch(comicid, issueid, down='True')
else:
updater.foundsearch(comicid, issueid, mode='want_ann', down='True')
#force rescan of files
updater.forceRescan(comicid)
logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )

View File

@ -216,6 +216,12 @@ NEWCOM_DIR = None
FFTONEWCOM_DIR = 0
OLDCONFIG_VERSION = None
INDIE_PUB = 75
BIGGIE_PUB = 55
ENABLE_META = 0
CMTAGGER_PATH = None
def CheckSection(sec):
""" Check if INI section exists, if not create it """
try:
@ -274,6 +280,7 @@ def initialize():
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBSU, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, NZBX,\
NEWZNAB, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
RAW, RAW_PROVIDER, RAW_USERNAME, RAW_PASSWORD, RAW_GROUPS, EXPERIMENTAL, \
ENABLE_META, CMTAGGER_PATH, INDIE_PUB, BIGGIE_PUB, \
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, STORYARCDIR, CVURL, CVAPIFIX, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, ANNUALS_ON, CV_ONLY, CV_ONETIMER, WEEKFOLDER
@ -409,6 +416,12 @@ def initialize():
PRE_SCRIPTS = check_setting_str(CFG, 'General', 'pre_scripts', '')
POST_PROCESSING = bool(check_setting_int(CFG, 'General', 'post_processing', 1))
ENABLE_META = bool(check_setting_int(CFG, 'General', 'enable_meta', 0))
CMTAGGER_PATH = check_setting_str(CFG, 'General', 'cmtagger_path', '')
INDIE_PUB = check_setting_str(CFG, 'General', 'indie_pub', '75')
BIGGIE_PUB = check_setting_str(CFG, 'General', 'biggie_pub', '55')
USE_SABNZBD = bool(check_setting_int(CFG, 'SABnzbd', 'use_sabnzbd', 0))
SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '')
@ -742,6 +755,10 @@ def config_write():
new_config['General']['locmove'] = int(LOCMOVE)
new_config['General']['newcom_dir'] = NEWCOM_DIR
new_config['General']['fftonewcom_dir'] = int(FFTONEWCOM_DIR)
new_config['General']['enable_meta'] = int(ENABLE_META)
new_config['General']['cmtagger_path'] = CMTAGGER_PATH
new_config['General']['indie_pub'] = INDIE_PUB
new_config['General']['biggie_pub'] = BIGGIE_PUB
new_config['SABnzbd'] = {}
new_config['SABnzbd']['use_sabnzbd'] = int(USE_SABNZBD)
@ -865,7 +882,7 @@ def dbcheck():
# c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT)')
conn.commit
c.close
#new
@ -996,6 +1013,20 @@ def dbcheck():
except:
c.execute('ALTER TABLE nzblog ADD COLUMN SARC TEXT')
try:
c.execute('SELECT Location from annuals')
except:
c.execute('ALTER TABLE annuals ADD COLUMN Location TEXT')
try:
c.execute('SELECT ComicSize from annuals')
except:
c.execute('ALTER TABLE annuals ADD COLUMN ComicSize TEXT')
try:
c.execute('SELECT Int_IssueNumber from annuals')
except:
c.execute('ALTER TABLE annuals ADD COLUMN Int_IssueNumber INT')
#if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the

261
mylar/cmtagmylar.py Normal file
View File

@ -0,0 +1,261 @@
# This script was taken almost entirely from Manders2600 Script with the use of the awesome ComicTagger.
# modified very slightly so Mylar just passes it the IssueID for it to do it's magic.
import os
import sys
import glob
import platform
import shutil
import zipfile
import subprocess
import mylar
from mylar import logger
def run (dirName, nzbName=None, issueid=None, manual=None, filename=None):
#print "manual:" + manual
#print "filename: " + filename
logger.fdebug("dirName:" + dirName)
#print "issueid:" + issueid
## Set the directory in which comictagger and other external commands are located - IMPORTANT - ##
# ( User may have to modify, depending on their setup, but these are some guesses for now )
#check for dependencies here - configparser
try:
import configparser
except ImportError:
logger.fdebug("configparser not found on system. Please install manually in order to write metadata")
logger.fdebug("continuing with PostProcessing, but I'm not using metadata.")
return "fail"
if platform.system() == "Windows":
(x, y) = platform.architecture()
if x == "64bit":
comictagger_cmd = "C:\Program Files (x86)\ComicTagger\comictagger.exe"
# http://www.win-rar.com/download.html
else:
comictagger_cmd = "C:\Program Files\ComicTagger\comictagger.exe"
unrar_cmd = "C:\Program Files\WinRAR\UnRAR.exe"
elif platform.system() == "Darwin": #Mac OS X
comictagger_cmd = "/Applications/ComicTagger.app/Contents/MacOS/ComicTagger"
unrar_cmd = "/usr/local/bin/unrar"
else:
#set this to the lib path (ie. '<root of mylar>/lib')
comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')
unrar_cmd = "/usr/bin/unrar"
# if not os.path.exists( comictagger_cmd ):
# print "ERROR: can't find the ComicTagger program: {0}".format( comictagger_cmd )
# print " You probably need to edit this script!"
# sys.exit( 1 )
file_conversion = True
file_extension_fixing = True
if not os.path.exists( unrar_cmd ):
logger.fdebug("WARNING: can't find the unrar command.")
logger.fdebug("File conversion and extension fixing not available")
logger.fdebug("You probably need to edit this script, or install the missing tool, or both!")
file_conversion = False
file_extension_fixing = False
## Sets up other directories ##
scriptname = os.path.basename( sys.argv[0] )
downloadpath = os.path.abspath( dirName )
sabnzbdscriptpath = os.path.dirname( sys.argv[0] )
if manual is None:
comicpath = os.path.join( downloadpath , "temp" )
else:
comicpath = os.path.join( downloadpath, issueid )
unrar_folder = os.path.join( comicpath , "unrard" )
logger.fdebug("---directory settings.")
logger.fdebug("scriptname : " + scriptname)
logger.fdebug("downloadpath : " + downloadpath)
logger.fdebug("sabnzbdscriptpath : " + sabnzbdscriptpath)
logger.fdebug("comicpath : " + comicpath)
logger.fdebug("unrar_folder : " + unrar_folder)
logger.fdebug("Running the Post-SabNZBd/Mylar script")
if os.path.exists( comicpath ):
shutil.rmtree( comicpath )
os.makedirs( comicpath )
# make a list of all CBR and CBZ files in downloadpath
if filename is None:
filename_list = glob.glob( os.path.join( downloadpath, "*.cbz" ) )
filename_list.extend( glob.glob( os.path.join( downloadpath, "*.cbr" ) ) )
## Takes all .cbr and .cbz files and dumps them to processing directory ##
for f in filename_list:
shutil.move( f, comicpath)
## Changes filetype extensions when needed ##
cbr_list = glob.glob( os.path.join( comicpath, "*.cbr" ) )
for f in cbr_list:
if zipfile.is_zipfile( f ):
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
logger.fdebug("{0}: renaming {1} to be a cbz".format( scriptname, os.path.basename( f ) ))
if file_extension_fixing:
cbz_list = glob.glob( os.path.join( comicpath, "*.cbz" ) )
for f in cbz_list:
try:
rar_test_cmd_output = "is not RAR archive" #default, in case of error
rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] )
except:
pass
if not "is not RAR archive" in rar_test_cmd_output:
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbr" )
logger.fdebug("{0}: renaming {1} to be a cbr".format( scriptname, os.path.basename( f ) ))
# Now rename all CBR files to RAR
cbr_list = glob.glob( os.path.join( comicpath, "*.cbr" ) )
for f in cbr_list:
base = os.path.splitext( f )[0]
shutil.move( f, base + ".rar" )
## Changes any cbr files to cbz files for insertion of metadata ##
if file_conversion:
rar_list = glob.glob( os.path.join( comicpath, "*.rar" ) )
for f in rar_list:
logger.fdebug("{0}: converting {1} to be zip format".format( scriptname, os.path.basename( f ) ))
basename = os.path.splitext( f )[0]
zipname = basename + ".cbz"
# Move into the folder where we will be unrar-ing things
os.makedirs( unrar_folder )
os.chdir( unrar_folder )
# Extract and zip up
subprocess.Popen( [ unrar_cmd, "x", f ] ).communicate()
shutil.make_archive( basename, "zip", unrar_folder )
# get out of unrar folder and clean up
os.chdir( comicpath )
shutil.rmtree( unrar_folder )
## Changes zip to cbz
zip_list = glob.glob( os.path.join( comicpath, "*.zip" ) )
for f in zip_list:
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
## Tag each CBZ, and move it back to original directory ##
cbz_list = glob.glob( os.path.join( comicpath, "*.cbz" ) )
for f in cbz_list:
if issueid is None:
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "-f", "-o", "--verbose", "--nooverwrite", f ] ).communicate()
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "-f", "-o", "--verbose", "--nooverwrite", f ] ).communicate()
else:
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "-o", "--id", issueid, "--verbose", "--nooverwrite", f ] ).communicate()
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "-o", "--id", issueid, "--verbose", "--nooverwrite", f ] ).communicate()
shutil.move( f, downloadpath )
return
else:
shutil.move( filename, comicpath)
filename = os.path.split(filename)[1] # just the filename itself
print comicpath
print os.path.join( comicpath, filename )
if filename.endswith('.cbr'):
f = os.path.join( comicpath, filename )
if zipfile.is_zipfile( f ):
print "zipfile detected"
base = os.path.splitext( f )[0]
print base
print f
shutil.move( f, base + ".cbz" )
logger.fdebug("{0}: renaming {1} to be a cbz".format( scriptname, os.path.basename( f ) ))
if file_extension_fixing:
if filename.endswith('.cbz'):
f = os.path.join( comicpath, filename )
try:
rar_test_cmd_output = "is not RAR archive" #default, in case of error
rar_test_cmd_output = subprocess.check_output( [ unrar_cmd, "t", f ] )
except:
pass
if not "is not RAR archive" in rar_test_cmd_output:
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbr" )
logger.fdebug("{0}: renaming {1} to be a cbr".format( scriptname, os.path.basename( f ) ))
# Now rename all CBR files to RAR
if filename.endswith('.cbr'):
f = os.path.join( comicpath, filename)
base = os.path.splitext( f )[0]
shutil.move( f, base + ".rar" )
## Changes any cbr files to cbz files for insertion of metadata ##
if file_conversion:
f = os.path.join( comicpath, filename )
logger.fdebug("{0}: converting {1} to be zip format".format( scriptname, os.path.basename( f ) ))
basename = os.path.splitext( f )[0]
zipname = basename + ".cbz"
# Move into the folder where we will be unrar-ing things
os.makedirs( unrar_folder )
os.chdir( unrar_folder )
# Extract and zip up
logger.fdebug("{0}: Comicpath is " + os.path.join(comicpath,basename))
logger.fdebug("{0}: Unrar is " + unrar_folder )
subprocess.Popen( [ unrar_cmd, "x", os.path.join(comicpath,basename) ] ).communicate()
shutil.make_archive( basename, "zip", unrar_folder )
# get out of unrar folder and clean up
os.chdir( comicpath )
shutil.rmtree( unrar_folder )
## Changes zip to cbz
f = os.path.join( comicpath, os.path.splitext(filename)[0] + ".zip" )
print f
try:
with open(f): pass
except:
logger.fdebug("No zip file present")
return "fail"
base = os.path.splitext( f )[0]
shutil.move( f, base + ".cbz" )
nfilename = base + ".cbz"
else:
nfilename = filename
file_dir, file_n = os.path.split(nfilename)
logger.fdebug("converted directory: " + str(file_dir))
logger.fdebug("converted filename: " + str(file_n))
logger.fdebug("destination path: " + os.path.join(dirName,file_n))
logger.fdebug("dirName: " + dirName)
logger.fdebug("absDirName: " + os.path.abspath(dirName))
## Tag each CBZ, and move it back to original directory ##
if issueid is None:
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate()
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "-f", "-o", "--verbose", "--nooverwrite", nfilename ] ).communicate()
else:
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cr", "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate()
subprocess.Popen( [ comictagger_cmd, "-s", "-t", "cbl", "-o", "--id", issueid, "--verbose", "--nooverwrite", nfilename ] ).communicate()
if os.path.exists(os.path.join(os.path.abspath(dirName),file_n)):
logger.fdebug("Unable to move - file already exists.")
else:
shutil.move( nfilename, os.path.join(os.path.abspath(dirName),file_n))
shutil.rmtree( comicpath )
return os.path.join(os.path.abspath(dirName),file_n)
## Clean up temp directory ##
#os.chdir( sabnzbdscriptpath )
#shutil.rmtree( comicpath )
## Will Run Mylar Post=processing In Future ##

View File

@ -133,7 +133,8 @@ def GetComicInfo(comicid,dom):
comic['ComicURL'] = dom.getElementsByTagName('site_detail_url')[trackcnt].firstChild.wholeText
#the description field actually holds the Volume# - so let's grab it
try:
comic['ComicDescription'] = dom.getElementsByTagName('description')[0].firstChild.wholeText
descchunk = dom.getElementsByTagName('description')[0].firstChild.wholeText
comic['ComicDescription'] = drophtml(descchunk)
except:
comic['ComicDescription'] = 'None'
#extract the first 60 characters
@ -257,3 +258,12 @@ def GetFirstIssue(issueid,dom):
the_date = the_year + '-' + the_month
return the_year
def drophtml(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)
text_parts = soup.findAll(text=True)
print ''.join(text_parts)
return ''.join(text_parts)

View File

@ -88,12 +88,29 @@ def listFiles(dir,watchcomic,AlternateSearch=None):
logger.fdebug("subname no brackets: " + str(subname[0]))
subname = re.sub('\_', ' ', subname[0])
nonocount = 0
charpos = 0
detneg = "no"
for nono in not_these:
if nono in subname:
subcnt = subname.count(nono)
charpos = indices(subname,nono) # will return a list of char positions in subname
#print "charpos: " + str(charpos)
if nono == '-':
i=0
while (i < len(charpos)):
for i,j in enumerate(charpos):
#print i,j
if subname[j+1:j+2].isdigit():
logger.fdebug("possible negative issue detected.")
nonocount = nonocount + subcnt - 1
detneg = "yes"
i+=1
if detneg == "no":
subname = re.sub(str(nono), ' ', subname)
nonocount = nonocount + subcnt
#logger.fdebug(str(nono) + " detected " + str(subcnt) + " times.")
# segment '.' having a . by itself will denote the entire string which we don't want
if nono == '.':
elif nono == '.':
subname = re.sub('\.', ' ', subname)
nonocount = nonocount + subcnt - 1 #(remove the extension from the length)
else:
@ -133,34 +150,65 @@ def listFiles(dir,watchcomic,AlternateSearch=None):
# subname = subname.replace('_', ' ')
logger.fdebug("watchcomic:" + str(modwatchcomic) + " ..comparing to found file: " + str(subname))
if modwatchcomic.lower() in subname.lower() or altsearchcomic.lower() in subname.lower():
if 'annual' in subname.lower():
#print ("it's an annual - unsure how to proceed")
continue
comicpath = os.path.join(basedir, item)
logger.fdebug( modwatchcomic + " - watchlist match on : " + comicpath)
comicsize = os.path.getsize(comicpath)
#print ("Comicsize:" + str(comicsize))
comiccnt+=1
if modwatchcomic.lower() in subname.lower():
#logger.fdebug("we should remove " + str(nonocount) + " characters")
#remove versioning here
if volrem != None:
jtd_len = len(modwatchcomic) + len(volrem) + nonocount + 1 #1 is to account for space btwn comic and vol #
else:
jtd_len = len(modwatchcomic) + nonocount
if detectand:
jtd_len = jtd_len - 2 # char substitution diff between & and 'and' = 2 chars
elif altsearchcomic.lower() in subname.lower():
#remove versioning here
if volrem != None:
jtd_len = len(altsearchcomic) + len(volrem) + nonocount + 1
else:
jtd_len = len(altsearchcomic) + nonocount
if detectand:
jtd_len = jtd_len - 2
stann = 0
if 'annual' in subname.lower():
logger.fdebug("Annual detected - proceeding")
jtd_len = subname.lower().find('annual')
else:
if modwatchcomic.lower() in subname.lower():
logger.fdebug("we should remove " + str(nonocount) + " characters")
findtitlepos = subname.find('-')
if charpos != 0:
logger.fdebug("detected " + str(len(charpos)) + " special characters")
i=0
while (i < len(charpos)):
for i,j in enumerate(charpos):
print i,j
print subname
print "digitchk: " + str(subname[j:])
if j >= len(subname):
logger.fdebug("end reached. ignoring remainder.")
break
elif subname[j:] == '-':
if i <= len(subname) and subname[i+1].isdigit():
logger.fdebug("negative issue detected.")
#detneg = "yes"
elif j > findtitlepos:
logger.fdebug("special character appears outside of title - ignoring @ position: " + str(charpos[i]))
nonocount-=1
i+=1
#remove versioning here
if volrem != None:
jtd_len = len(modwatchcomic) + len(volrem) + nonocount + 1 #1 is to account for space btwn comic and vol #
else:
jtd_len = len(modwatchcomic) + nonocount
if detectand:
jtd_len = jtd_len - 2 # char substitution diff between & and 'and' = 2 chars
elif altsearchcomic.lower() in subname.lower():
#remove versioning here
if volrem != None:
jtd_len = len(altsearchcomic) + len(volrem) + nonocount + 1
else:
jtd_len = len(altsearchcomic) + nonocount
if detectand:
jtd_len = jtd_len - 2
justthedigits = item[jtd_len:]
#remove the title if it appears
findtitle = justthedigits.find('-')
if findtitle > 0 and detneg == "no":
justthedigits = justthedigits[:findtitle]
logger.fdebug("removed title from name - is now : " + str(justthedigits))
comiclist.append({
'ComicFilename': item,
'ComicLocation': comicpath,
@ -195,3 +243,8 @@ def validateAndCreateDirectory(dir, create=False):
logger.warn("Provided directory is blank, aborting")
return False
return False
def indices(string, char):
return [ i for i,c in enumerate(string) if c == char ]

View File

@ -539,7 +539,23 @@ def updateComicLocation():
else:
folderformat = mylar.FOLDER_FORMAT
values = {'$Series': dl['ComicName'],
#remove all 'bad' characters from the Series Name in order to create directories.
u_comicnm = dl['ComicName']
u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
comicdir = u_comicname
if ':' in comicdir:
comicdir = comicdir.replace(':','')
if '/' in comicdir:
comicdir = comicdir.replace('/','-')
if ',' in comicdir:
comicdir = comicdir.replace(',','')
if '?' in comicdir:
comicdir = comicdir.replace('?','')
else: comicdir = u_comicname
values = {'$Series': comicdir,
'$Publisher': re.sub('!','',dl['ComicPublisher']),
'$Year': dl['ComicYear'],
'$series': dl['ComicName'].lower(),
@ -547,10 +563,11 @@ def updateComicLocation():
'$VolumeY': 'V' + str(dl['ComicYear']),
'$VolumeN': comversion
}
if mylar.FFTONEWCOM_DIR:
#if this is enabled (1) it will apply the Folder_Format to all the new dirs
if mylar.FOLDER_FORMAT == '':
comlocation = re.sub(mylar.DESTINATION_DIR, mylar.NEWCOM_DIR, dl['ComicLocation'])
comlocation = re.sub(mylar.DESTINATION_DIR, mylar.NEWCOM_DIR, comicdir)
else:
first = replace_all(folderformat, values)
if mylar.REPLACE_SPACES:
@ -559,7 +576,7 @@ def updateComicLocation():
comlocation = os.path.join(mylar.NEWCOM_DIR,first)
else:
comlocation = re.sub(mylar.DESTINATION_DIR, mylar.NEWCOM_DIR, dl['ComicLocation'])
comlocation = re.sub(mylar.DESTINATION_DIR, mylar.NEWCOM_DIR, comicdir)
ctrlVal = {"ComicID": dl['ComicID']}
newVal = {"ComicLocation": comlocation}
@ -589,3 +606,64 @@ def cleanhtml(raw_html):
flipflop = soup.renderContents()
print flipflop
return flipflop
def issuedigits(issnum):
import db, logger
#print "issnum : " + str(issnum)
if issnum.isdigit():
int_issnum = int( issnum ) * 1000
else:
if 'au' in issnum.lower() and issnum[:1].isdigit():
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('u')
elif 'ai' in issnum.lower() and issnum[:1].isdigit():
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('i')
elif u'\xbd' in issnum:
issnum = .5
int_issnum = int(issnum) * 1000
elif '.' in issnum or ',' in issnum:
if ',' in issnum: issnum = re.sub(',','.', issnum)
issst = str(issnum).find('.')
issb4dec = str(issnum)[:issst]
decis = str(issnum)[issst+1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
if len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
try:
int_issnum = (int(issb4dec) * 1000) + (int(issaftdec) * 100)
except ValueError:
logger.error("This has no issue #'s for me to get - Either a Graphic Novel or one-shot.")
int_issnum = 999999999999999
else:
try:
x = float(issnum)
#validity check
if x < 0:
#logger.info("I've encountered a negative issue #: " + str(issnum) + ". Trying to accomodate.")
int_issnum = (int(x)*1000) - 1
else: raise ValueError
except ValueError, e:
#logger.error(str(issnum) + "this has an alpha-numeric in the issue # which I cannot account for.")
int_issnum = 999999999999999
return int_issnum
def checkthepub(ComicID):
import db, logger
myDB = db.DBConnection()
publishers = ['marvel', 'dc', 'darkhorse']
pubchk = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
if pubchk is None:
logger.fdebug("No publisher information found to aid in determining series..defaulting to base check of 555 days.")
return mylar.BIGGIE_PUB
else:
for publish in publishers:
if publish in str(pubchk['ComicPublisher']).lower():
logger.fdebug("Biggie publisher detected - " + str(pubchk['ComicPublisher']))
return mylar.BIGGIE_PUB
logger.fdebug("Indie publisher detected - " + str(pubchk['ComicPublisher']))
return mylar.INDIE_PUB

View File

@ -143,22 +143,109 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
#let's do the Annual check here.
if mylar.ANNUALS_ON:
annualcomicname = re.sub('[\,\:]', '', comic['ComicName'])
annuals = comicbookdb.cbdb(annualcomicname, SeriesYear)
print ("Number of Annuals returned: " + str(annuals['totalissues']))
nb = 0
while (nb <= int(annuals['totalissues'])):
try:
annualval = annuals['annualslist'][nb]
except IndexError:
break
newCtrl = {"IssueID": str(annualval['AnnualIssue'] + annualval['AnnualDate'])}
newVals = {"Issue_Number": annualval['AnnualIssue'],
"IssueDate": annualval['AnnualDate'],
"IssueName": annualval['AnnualTitle'],
"ComicID": comicid,
"Status": "Skipped"}
myDB.upsert("annuals", newVals, newCtrl)
nb+=1
#----- CBDB (outdated)
# annuals = comicbookdb.cbdb(annualcomicname, SeriesYear)
# print ("Number of Annuals returned: " + str(annuals['totalissues']))
# nb = 0
# while (nb <= int(annuals['totalissues'])):
# try:
# annualval = annuals['annualslist'][nb]
# except IndexError:
# break
#----
#this issueid doesn't exist at this point since we got the data from cbdb...let's try and figure out
#the issueID for CV based on what we know so we can use that ID (and thereby the metadata too)
#other inherit issue - results below will return the ID for the Series of Annuals, not the series itself.
#sr['comicid'] not the same as comicid for series.
annComicName = annualcomicname + ' annual'
mode = 'series'
#if annuals['totalissues'] is None:
# annissues = 0
#else:
# annissues = annuals['totalissues']
#print "annissues :" + str(annissues)
# annuals happen once / year. determine how many.
annualyear = SeriesYear # no matter what, the year won't be less than this.
#if annualval['AnnualYear'] is None:
# sresults = mb.findComic(annComicName, mode, issue=annissues)
#else:
#sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear'])
#print "annualyear: " + str(annualval['AnnualYear'])
logger.fdebug("annualyear:" + str(annualyear))
sresults = mb.findComic(annComicName, mode, issue=None)
#logger.fdebug("sresults : " + str(sresults))
type='comic'
if len(sresults) == 1:
logger.fdebug("1 result")
if len(sresults) > 0:
logger.fdebug("there are " + str(len(sresults)) + " results.")
num_res = 0
while (num_res < len(sresults)):
sr = sresults[num_res]
if 'paperback' in sr['description'] or 'collecting' in sr['description']:
logger.fdebug("tradeback/collected edition detected - skipping " + str(sr['comicid']))
else:
if comicid in sr['description']:
logger.fdebug(str(comicid) + " found. Assuming it's part of the greater collection.")
issueid = sr['comicid']
logger.fdebug(str(issueid) + " added to series list as an Annual")
issued = cv.getComic(issueid,'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug("Couldn't find any annual information...")
pass
else:
n = 0
logger.fdebug("there are " + str(sr['issues']) + " annuals in this series.")
while (n < int(sr['issues'])):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
cleanname = helpers.cleanName(firstval['Issue_Name'])
issid = str(firstval['Issue_ID'])
issnum = str(firstval['Issue_Number'])
issname = cleanname
issdate = str(firstval['Issue_Date'])
newCtrl = {"IssueID": issid}
newVals = {"Issue_Number": issnum,
"Int_IssueNumber": helpers.issuedigits(issnum),
"IssueDate": issdate,
"IssueName": issname,
"ComicID": comicid,
"Status": "Skipped"}
myDB.upsert("annuals", newVals, newCtrl)
n+=1
num_res+=1
elif len(sresults) == 0 or len(sresults) is None:
logger.fdebug("no results, removing the year from the agenda and re-querying.")
sresults = mb.findComic(annComicName, mode, issue=None)
if len(sresults) == 1:
sr = sresults[0]
logger.fdebug(str(comicid) + " found. Assuming it's part of the greater collection.")
else:
resultset = 0
else:
logger.fdebug("returning results to screen - more than one possibility")
for sr in sresults:
if annualyear < sr['comicyear']:
logger.fdebug(str(annualyear) + " is less than " + str(sr['comicyear']))
if int(sr['issues']) > (2013 - int(sr['comicyear'])):
logger.fdebug("issue count is wrong")
#newCtrl = {"IssueID": issueid}
#newVals = {"Issue_Number": annualval['AnnualIssue'],
# "IssueDate": annualval['AnnualDate'],
# "IssueName": annualval['AnnualTitle'],
# "ComicID": comicid,
# "Status": "Skipped"}
#myDB.upsert("annuals", newVals, newCtrl)
#nb+=1
#parseit.annualCheck(gcomicid=gcdinfo['GCDComicID'], comicid=comicid, comicname=comic['ComicName'], comicyear=SeriesYear)
#comic book location on machine
@ -168,17 +255,23 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
# let's remove the non-standard characters here.
u_comicnm = comic['ComicName']
u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
comicdir = u_comicname
if ':' in comicdir:
comicdir = comicdir.replace(':','')
if '/' in comicdir:
comicdir = comicdir.replace('/','-')
if ',' in comicdir:
comicdir = comicdir.replace(',','')
if '?' in comicdir:
comicdir = comicdir.replace('?','')
else: comicdir = u_comicname
dirbad = [':',',','/','?','!'] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
comicdir = u_comicname
for dbd in dirbad:
if dbd in u_comicname:
if dbd == '/': repthechar = '-'
else: repthechar = ''
comicdir = comicdir.replace(dbd,repthechar)
# if ':' in comicdir:
# comicdir = comicdir.replace(dbd,'')
# if '/' in comicdir:
# comicdir = comicdir.replace('/','-')
# if ',' in comicdir:
# comicdir = comicdir.replace(',','')
# if '?' in comicdir:
# comicdir = comicdir.replace('?','')
# if '!' in comicdir:
# comicdir = comicdir.replace('!','')
series = comicdir
publisher = re.sub('!','',comic['ComicPublisher']) # thanks Boom!
@ -484,22 +577,27 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
#---NEW.code
try:
firstval = issued['issuechoice'][n]
print firstval
except IndexError:
break
cleanname = helpers.cleanName(firstval['Issue_Name'])
issid = str(firstval['Issue_ID'])
issnum = firstval['Issue_Number']
#print ("issnum: " + str(issnum))
print ("issnum: " + str(issnum))
issname = cleanname
issdate = str(firstval['Issue_Date'])
if issnum.isdigit():
int_issnum = int( issnum )
int_issnum = int( issnum ) * 1000
else:
if 'a.i.' in issnum.lower(): issnum = re.sub('\.', '', issnum)
print str(issnum)
if 'au' in issnum.lower():
int_issnum = str(int(issnum[:-2])) + 'AU'
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('u')
elif 'ai' in issnum.lower():
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('i')
elif u'\xbd' in issnum:
issnum = .5
int_issnum = .5
int_issnum = int(issnum) * 1000
elif '.' in issnum or ',' in issnum:
if ',' in issnum: issnum = re.sub(',','.', issnum)
issst = str(issnum).find('.')
@ -516,7 +614,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
decisval = int(decis)
issaftdec = str(decisval)
try:
int_issnum = str(issnum)
# int_issnum = str(issnum)
int_issnum = (int(issb4dec) * 1000) + (int(issaftdec) * 100)
except ValueError:
logger.error("This has no issue #'s for me to get - Either a Graphic Novel or one-shot.")
updater.no_searchresults(comicid)
@ -527,10 +626,11 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
#validity check
if x < 0:
logger.info("I've encountered a negative issue #: " + str(issnum) + ". Trying to accomodate.")
int_issnum = str(issnum)
logger.fdebug("value of x is : " + str(x))
int_issnum = (int(x)*1000) - 1
else: raise ValueError
except ValueError, e:
logger.error(str(issnum) + "this has an alpha-numeric in the issue # which I cannot account for.")
logger.error(str(issnum) + " this has an alpha-numeric in the issue # which I cannot account for.")
return
#get the latest issue / date using the date.
if firstval['Issue_Date'] > latestdate:
@ -556,9 +656,10 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
}
if iss_exists:
#print ("Existing status : " + str(iss_exists['Status']))
print ("Existing status : " + str(iss_exists['Status']))
newValueDict['Status'] = iss_exists['Status']
else:
print "issue doesn't exist in db."
if mylar.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
@ -590,7 +691,7 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
ltmonth = helpers.fullmonth(latestdate[5:7])
#try to determine if it's an 'actively' published comic from above dates
#threshold is if it's within a month (<45 days) let's assume it's recent.
#threshold is if it's within a month (<55 days) let's assume it's recent.
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
@ -652,6 +753,8 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None):
logger.info(u"Attempting to grab wanted issues for : " + comic['ComicName'])
for result in results:
print "Searching for : " + str(result['Issue_Number'])
print "Status of : " + str(result['Status'])
search.searchforissue(result['IssueID'])
else: logger.info(u"No issues marked as wanted for " + comic['ComicName'])

View File

@ -28,7 +28,8 @@ mb_lock = threading.Lock()
def pullsearch(comicapi,comicquery,offset):
PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=volume&query=' + str(comicquery) + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher&format=xml&page=' + str(offset)
u_comicquery = comicquery.encode('utf-8').strip()
PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=volume&query=' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&page=' + str(offset)
#all these imports are standard on most modern python implementations
#download the file:
@ -114,6 +115,11 @@ def findComic(name, mode, issue, limityear=None):
xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText
else:
xmlimage = "cache/blankcover.jpg"
try:
xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText
except:
xmldesc = "None"
comiclist.append({
'name': xmlTag,
'comicyear': xmlYr,
@ -121,7 +127,8 @@ def findComic(name, mode, issue, limityear=None):
'url': xmlurl,
'issues': xmlcnt,
'comicimage': xmlimage,
'publisher': xmlpub
'publisher': xmlpub,
'description': xmldesc
})
else:
print ("year: " + str(xmlYr) + " - contraint not met. Has to be within " + str(limityear))
@ -130,4 +137,3 @@ def findComic(name, mode, issue, limityear=None):
countResults = countResults + 100
return comiclist

View File

@ -36,10 +36,17 @@ from xml.dom.minidom import parseString
import urllib2
from datetime import datetime
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None):
def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, IssueDate, IssueID, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=None, IssueArcID=None, mode=None):
if ComicYear == None: ComicYear = '2013'
else: ComicYear = str(ComicYear)[:4]
if mode == 'want_ann':
logger.info("Annual issue search detected. Appending to issue #")
#anything for mode other than None indicates an annual.
ComicName = ComicName + " annual"
if AlternateSearch is not None and AlternateSearch != "None":
AlternateSearch = AlternateSearch + " annual"
if IssueID is None:
#one-off the download.
print ("ComicName: " + ComicName)
@ -251,7 +258,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
findcomic.append(u_ComicName)
# this should be called elsewhere..redudant code.
issue_except = None
if '.' in IssueNumber:
if '.' in IssueNumber and IssueNumber[IssueNumber.find('.'):].isdigit():
isschk_find = IssueNumber.find('.')
isschk_b4dec = IssueNumber[:isschk_find]
isschk_decval = IssueNumber[isschk_find+1:]
@ -282,6 +289,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
iss = re.sub("[^0-9]", "", IssueNumber) # get just the digits
intIss = int(iss) * 1000
issue_except = 'AU' # if it contains AU, mark it as an exception (future dict possibly)
elif 'ai' in IssueNumber.lower():
iss = re.sub("[^0-9]", "", IssueNumber) # get just the digits
intIss = int(iss) * 1000
issue_except = 'AI' # if it contains AI, mark it as an exception (future dict possibly)
else:
iss = IssueNumber
intIss = int(iss) * 1000
@ -385,11 +396,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
pause_the_search = 1 * 60
#bypass for local newznabs
if nzbprov == 'newznab' and 'localhost' in str(host_newznab_fix):
pass
else:
logger.fdebug("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering")
time.sleep(pause_the_search)
if nzbprov == 'newznab':
if host_newznab_fix[:3] == '10.' or host_newznab_fix[:4] == '172.' or host_newznab_fix[:4] == '192.' or 'localhost' in str(host_newznab_fix):
pass
else:
logger.fdebug("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering")
time.sleep(pause_the_search)
try:
data = opener.open(request).read()
@ -559,6 +571,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
i+=1
logger.fdebug("chg_comic:" + str(chg_comic))
findcomic_chksplit = re.sub('[\-\:\,\.\?]', ' ', findcomic[findloop])
chg_comic = re.sub('[\s]', '', chg_comic)
findcomic_chksplit = re.sub('[\s]', '', findcomic_chksplit)
print chg_comic.upper()
print findcomic_chksplit.upper()
if chg_comic.upper() == findcomic_chksplit.upper():
logger.fdebug("series contains numerics...adjusting..")
else:
@ -583,10 +599,15 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
logger.fdebug(str(splitit) + " nzb series word count: " + str(splitst))
logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split)))
#account for possible version inclusion here.
#account for possible version inclusion here and annual inclusions.
cvers = "false"
annualize = "false"
if 'annual' in ComicName.lower():
logger.fdebug("IssueID of : " + str(IssueID) + " - This is an annual...let's adjust.")
annualize = "true"
splitst = splitst - 1
for tstsplit in splitit:
if 'v' in tstsplit and tstsplit[1:].isdigit():
if 'v' in tstsplit.lower() and tstsplit[1:].isdigit():
logger.fdebug("this has a version #...let's adjust")
cvers = "true"
splitst = splitst - 1
@ -597,9 +618,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
if str(splitit[0]).lower() == "the" or str(watchcomic_split[0]).lower() == "the":
if str(splitit[0]).lower() == "the":
logger.fdebug("THE word detected...attempting to adjust pattern matching")
comiciss = comiciss[4:]
print comic_iss
print comic_iss[4:]
splitit = comic_iss[4:].split(None)
#splitit = splitit[4:]
splitst = splitst - 1 #remove 'the' from start
logger.fdebug("comic is now : " + str(comiciss))
logger.fdebug("comic is now : " + str(comic_iss[4:]))
if str(watchcomic_split[0]).lower() == "the":
wtstart = watchcomic_nonsplit[4:]
watchcomic_split = wtstart.split(None)
@ -713,7 +737,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
#issue comparison now as well
if int(intIss) == int(comintIss):
logger.fdebug('issues match!')
logger.info(u"Found " + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov) )
logger.info(u"Found " + ComicName + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov) )
## -- inherit issue. Comic year is non-standard. nzb year is the year
## -- comic was printed, not the start year of the comic series and
## -- thus the deciding component if matches are correct or not
@ -882,7 +906,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
if foundc == "yes":
foundcomic.append("yes")
logger.fdebug("Found matching comic...preparing to send to Updater with IssueID: " + str(IssueID) + " and nzbname: " + str(nzbname))
updater.nzblog(IssueID, nzbname, SARC, IssueArcID)
updater.nzblog(IssueID, nzbname, ComicName, SARC, IssueArcID)
nzbpr == 0
#break
return foundc
@ -890,7 +914,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr, Is
foundcomic.append("no")
logger.fdebug("couldn't find a matching comic")
if IssDateFix == "no":
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + ComicName + "(" + str(comyear) + "). Status kept as wanted." )
break
return foundc

View File

@ -24,17 +24,25 @@ import os
import mylar
from mylar import db, logger, helpers, filechecker
def dbUpdate():
def dbUpdate(ComicIDList=None):
myDB = db.DBConnection()
print "comicidlist:" + str(ComicIDList)
if ComicIDList is None:
comiclist = myDB.select('SELECT ComicID, ComicName from comics WHERE Status="Active" or Status="Loading" order by LastUpdated ASC')
else:
comiclist = ComicIDList
activecomics = myDB.select('SELECT ComicID, ComicName from comics WHERE Status="Active" or Status="Loading" order by LastUpdated ASC')
logger.info('Starting update for %i active comics' % len(activecomics))
logger.info('Starting update for %i active comics' % len(comiclist))
for comic in activecomics:
comicid = comic[0]
for comic in comiclist:
print "comic" + comic
if ComicIDList is None:
comicid = comic[0]
else:
comicid = comic
print "comicid" + str(comicid)
mismatch = "no"
if not mylar.CV_ONLY or comicid[:1] == "G":
CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
@ -173,7 +181,7 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None)
#this works for issues existing in DB...
elif issuechk['Status'] == "Skipped":
newValue['Status'] = "Wanted"
values = { "Status": "Wanted"}
values = {"Status": "Wanted"}
logger.fdebug("...New status of Wanted")
elif issuechk['Status'] == "Wanted":
logger.fdebug("...Status already Wanted .. not changing.")
@ -185,6 +193,19 @@ def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None)
logger.fdebug("--attempt to find errant adds to Wanted list")
logger.fdebug("UpcomingNewValue: " + str(newValue))
logger.fdebug("UpcomingcontrolValue: " + str(controlValue))
if issuechk['IssueDate'] == '0000-00-00' and newValue['IssueDate'] != '0000-00-00':
logger.fdebug("Found a 0000-00-00 issue - force updating series to try and get it proper.")
dateVal = {"IssueDate": newValue['IssueDate'],
"ComicName": issuechk['ComicName'],
"Status": newValue['Status'],
"IssueNumber": issuechk['Issue_Number']}
logger.fdebug("updating date in upcoming table to : " + str(newValue['IssueDate']))
logger.fdebug("ComicID:" + str(controlValue))
myDB.upsert("upcoming", dateVal, controlValue)
logger.fdebug("Temporarily putting the Issue Date for " + str(issuechk['Issue_Number']) + " to " + str(newValue['IssueDate']))
values = {"IssueDate": newValue['IssueDate']}
#if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd='yes')
#else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd='yes')
myDB.upsert("issues", values, control)
if issuechk['Status'] == 'Downloaded':
logger.fdebug("updating Pull-list to reflect status.")
@ -230,7 +251,7 @@ def no_searchresults(ComicID):
"LatestIssue": "Error"}
myDB.upsert("comics", newValue, controlValue)
def nzblog(IssueID, NZBName, SARC=None, IssueArcID=None):
def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None):
myDB = db.DBConnection()
newValue = {"NZBName": NZBName}
@ -254,7 +275,7 @@ def nzblog(IssueID, NZBName, SARC=None, IssueArcID=None):
#print newValue
myDB.upsert("nzblog", newValue, controlValue)
def foundsearch(ComicID, IssueID, down=None):
def foundsearch(ComicID, IssueID, mode=None, down=None):
# When doing a Force Search (Wanted tab), the resulting search calls this to update.
# this is all redudant code that forceRescan already does.
@ -262,10 +283,14 @@ def foundsearch(ComicID, IssueID, down=None):
# series directory, it just scans for the issue it just downloaded and
# and change the status to Snatched accordingly. It is not to increment the have count
# at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.
logger.info("comicid: " + str(ComicID))
logger.info("issueid: " + str(IssueID))
myDB = db.DBConnection()
comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
if mode == 'want_ann':
issue = myDB.action('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
else:
issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
CYear = issue['IssueDate'][:4]
if down is None:
@ -273,34 +298,51 @@ def foundsearch(ComicID, IssueID, down=None):
logger.fdebug("updating status to snatched")
controlValue = {"IssueID": IssueID}
newValue = {"Status": "Snatched"}
myDB.upsert("issues", newValue, controlValue)
if mode == 'want_ann':
myDB.upsert("annuals", newValue, controlValue)
else:
myDB.upsert("issues", newValue, controlValue)
# update the snatched DB
snatchedupdate = {"IssueID": IssueID,
"Status": "Snatched"
}
if mode == 'want_ann':
IssueNum = "Annual " + issue['Issue_Number']
else:
IssueNum = issue['Issue_Number']
newsnatchValues = {"ComicName": comic['ComicName'],
"ComicID": ComicID,
"Issue_Number": issue['Issue_Number'],
"Issue_Number": IssueNum,
"DateAdded": helpers.now(),
"Status": "Snatched"
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
else:
if mode == 'want_ann':
IssueNum = "Annual " + issue['Issue_Number']
else:
IssueNum = issue['Issue_Number']
snatchedupdate = {"IssueID": IssueID,
"Status": "Downloaded"
}
newsnatchValues = {"ComicName": comic['ComicName'],
"ComicID": ComicID,
"Issue_Number": issue['Issue_Number'],
"Issue_Number": IssueNum,
"DateAdded": helpers.now(),
"Status": "Downloaded"
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
controlValue = {"IssueID": IssueID}
newValue = {"Status": "Downloaded"}
myDB.upsert("issues", newValue, controlValue)
#print ("finished updating snatched db.")
logger.info(u"Updating now complete for " + comic['ComicName'] + " issue: " + str(issue['Issue_Number']))
logger.info(u"Updating now complete for " + comic['ComicName'] + " issue: " + str(IssueNum))
return
def forceRescan(ComicID,archive=None):
@ -319,6 +361,7 @@ def forceRescan(ComicID,archive=None):
fcnew = []
fn = 0
issuedupechk = []
annualdupechk = []
issueexceptdupechk = []
reissues = myDB.action('SELECT * FROM issues WHERE ComicID=?', [ComicID]).fetchall()
while (fn < fccnt):
@ -333,14 +376,14 @@ def forceRescan(ComicID,archive=None):
# temploc = tmpfc['ComicFilename'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
logger.fdebug("temploc: " + str(temploc))
if 'annual' not in temploc:
if 'annual' not in temploc.lower():
#remove the extension here
extensions = ('.cbr','.cbz')
if temploc.lower().endswith(extensions):
#logger.fdebug("removed extension for issue:" + str(temploc))
temploc = temploc[:-4]
logger.fdebug("removed extension for issue:" + str(temploc))
#temploc = temploc[:-4]
deccnt = str(temploc).count('.')
if deccnt > 1:
if deccnt > 0:
#logger.fdebug("decimal counts are :" + str(deccnt))
#if the file is formatted with '.' in place of spaces we need to adjust.
#before replacing - check to see if digits on either side of decimal and if yes, DON'T REMOVE
@ -386,9 +429,11 @@ def forceRescan(ComicID,archive=None):
reiss = reissues[n]
except IndexError:
break
int_iss, iss_except = helpers.decimal_issue(reiss['Issue_Number'])
# int_iss, iss_except = helpers.decimal_issue(reiss['Issue_Number'])
int_iss = helpers.issuedigits(reiss['Issue_Number'])
issyear = reiss['IssueDate'][:4]
old_status = reiss['Status']
issname = reiss['IssueName']
#logger.fdebug("integer_issue:" + str(int_iss) + " ... status: " + str(old_status))
#if comic in format of "SomeSeries 5(c2c)(2013).cbr" whatever...it'll die.
@ -398,7 +443,7 @@ def forceRescan(ComicID,archive=None):
while (som < fcn):
#counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
#logger.fdebug("checking word - " + str(fcnew[som]))
logger.fdebug("checking word - " + str(fcnew[som]))
if ".cbr" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbr", "")
elif ".cbz" in fcnew[som].lower():
@ -408,6 +453,7 @@ def forceRescan(ComicID,archive=None):
get_issue = shlex.split(str(fcnew[som]))
if fcnew[som] != " ":
fcnew[som] = get_issue[0]
if '.' in fcnew[som]:
#logger.fdebug("decimal detected...adjusting.")
try:
@ -419,96 +465,39 @@ def forceRescan(ComicID,archive=None):
else:
#numeric
pass
if fcnew[som].isdigit():
#this won't match on decimal issues - need to fix.
#logger.fdebug("digit detected")
if int(fcnew[som]) > 0:
# fcdigit = fcnew[som].lstrip('0')
#fcdigit = str(int(fcnew[som]))
fcdigit = int(fcnew[som]) * 1000
if som+1 < len(fcnew) and 'au' in fcnew[som+1].lower():
if len(fcnew[som+1]) == 2:
#if the 'AU' is in 005AU vs 005 AU it will yield different results.
fnd_iss_except = 'AU'
#logger.info("AU Detected - fnd_iss_except set.")
else:
#fcdigit = "0"
fcdigit = 0
elif "." in fcnew[som]:
#this will match on decimal issues
IssueChk = fcnew[som]
#logger.fdebug("decimal detected...analyzing if issue")
isschk_find = IssueChk.find('.')
isschk_b4dec = IssueChk[:isschk_find]
isschk_decval = IssueChk[isschk_find+1:]
if isschk_b4dec.isdigit():
#logger.fdebug("digit detected prior to decimal.")
if isschk_decval.isdigit():
pass
#logger.fdebug("digit detected after decimal.")
else:
#logger.fdebug("not an issue - no digit detected after decimal")
break
else:
#logger.fdebug("not an issue - no digit detected prior to decimal")
break
#logger.fdebug("IssueNumber: " + str(IssueChk))
#logger.fdebug("..before decimal: " + str(isschk_b4dec))
#logger.fdebug("...after decimal: " + str(isschk_decval))
#--let's make sure we don't wipe out decimal issues ;)
if int(isschk_decval) == 0:
iss = isschk_b4dec
intdec = int(isschk_decval)
else:
if len(isschk_decval) == 1:
iss = isschk_b4dec + "." + isschk_decval
intdec = int(isschk_decval) * 10
else:
iss = isschk_b4dec + "." + isschk_decval.rstrip('0')
intdec = int(isschk_decval.rstrip('0')) * 10
fcdigit = (int(isschk_b4dec) * 1000) + intdec
#logger.fdebug("b4dec: " + str(isschk_b4dec))
#logger.fdebug("decval: " + str(isschk_decval))
#logger.fdebug("intdec: " + str(intdec))
#logger.fdebug("let's compare with this issue value: " + str(fcdigit))
elif 'au' in fcnew[som].lower():
#if AU is part of issue (5AU instead of 5 AU)
austart = fcnew[som].lower().find('au')
if fcnew[som][:austart].isdigit():
fcdigit = int(fcnew[som][:austart]) * 1000
fnd_iss_except = 'AU'
#logger.info("iss_except set to AU")
else:
# it's a word, skip it.
fcdigit = 19283838380101193
if som+1 < len(fcnew) and len(fcnew[som+1]) == 2:
print "fcnew[som+1]: " + str(fcnew[som+1])
print "fcnew[som]: " + str(fcnew[som])
if 'au' in fcnew[som+1].lower():
#if the 'AU' is in 005AU vs 005 AU it will yield different results.
fcnew[som] = fcnew[som] + 'AU'
fcnew[som+1] = '93939999919190933'
logger.info("AU Detected seperate from issue - combining and continuing")
elif 'ai' in fcnew[som+1].lower():
#if the 'AI' is in 005AI vs 005 AI it will yield different results.
fcnew[som] = fcnew[som] + 'AI'
fcnew[som+1] = '93939999919190933'
logger.info("AI Detected seperate from issue - combining and continuing")
fcdigit = helpers.issuedigits(fcnew[som])
#logger.fdebug("fcdigit: " + str(fcdigit))
#logger.fdebug("int_iss: " + str(int_iss))
if "." in str(int_iss):
int_iss = helpers.decimal_issue(int_iss)
#logger.fdebug("this is the int issue:" + str(int_iss))
#logger.fdebug("this is the fcdigit:" + str(fcdigit))
if int(fcdigit) == int_iss:
#logger.fdebug("issue match")
#logger.fdebug("fnd_iss_except: " + str(fnd_iss_except))
#logger.fdebug("iss_except: " + str(iss_except))
if str(fnd_iss_except) != 'None' and str(iss_except) == 'AU':
if fnd_iss_except.lower() == iss_except.lower():
logger.fdebug("matched for AU")
else:
logger.fdebug("this is not an AU match..ignoring result.")
break
elif str(fnd_iss_except) == 'None' and str(iss_except) == 'AU':break
elif str(fnd_iss_except) == 'AU' and str(iss_except) == 'None':break
#if issyear in fcnew[som+1]:
# print "matched on year:" + str(issyear)
#issuedupechk here.
#print ("fcdigit:" + str(fcdigit))
#print ("findiss_except:" + str(fnd_iss_except) + " = iss_except:" + str(iss_except))
#if int(fcdigit) in issuedupechk and str(fnd_iss_except) not in issueexceptdupechk: #str(fnd_iss_except) == str(iss_except):
if int(fcdigit) == int_iss:
logger.fdebug("issue match - fcdigit: " + str(fcdigit) + " ... int_iss: " + str(int_iss))
if '-' in temploc and temploc.find(reiss['Issue_Number']) > temploc.find('-'):
logger.fdebug("I've detected a possible Title in the filename")
logger.fdebug("the issue # has occured after the -, so I'm assuming it's part of the Title")
break
for d in issuedupechk:
if int(d['fcdigit']) == int(fcdigit) and d['fnd_iss_except'] == str(fnd_iss_except):
if int(d['fcdigit']) == int(fcdigit):
logger.fdebug("duplicate issue detected - not counting this: " + str(tmpfc['ComicFilename']))
logger.fdebug("is a duplicate of " + d['filename'])
logger.fdebug("fcdigit:" + str(fcdigit) + " === dupedigit: " + str(d['fcdigit']))
issuedupe = "yes"
break
if issuedupe == "no":
@ -522,48 +511,122 @@ def forceRescan(ComicID,archive=None):
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': int(fcdigit),
'fnd_iss_except': fnd_iss_except})
'filename': tmpfc['ComicFilename']})
break
#else:
# if the issue # matches, but there is no year present - still match.
# determine a way to match on year if present, or no year (currently).
if issuedupe == "yes":
logger.fdebug("I should break out here because of a dupe.")
break
som+=1
if haveissue == "yes" or issuedupe == "yes": break
n+=1
else:
# annual inclusion here.
#logger.fdebug("checking " + str(temploc))
reannuals = myDB.action('SELECT * FROM annuals WHERE ComicID=?', [ComicID]).fetchall()
an_cnt = myDB.action("SELECT COUNT(*) FROM issues WHERE ComicID=?", [ComicID]).fetchall()
fcnew = shlex.split(str(temploc))
fcn = len(fcnew)
anncnt = an_cnt[0][0]
n = 0
while (n < anncnt):
som = 0
try:
reann = reannuals[n]
except IndexError:
break
int_iss, iss_except = helpers.decimal_issue(reann['Issue_Number'])
issyear = reann['IssueDate'][:4]
old_status = reann['Status']
while (som < fcn):
#counts get buggered up when the issue is the last field in the filename - ie. '50$
#logger.fdebug("checking word - " + str(fcnew[som]))
if ".cbr" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbr", "")
elif ".cbz" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbz", "")
if "(c2c)" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace("(c2c)", " ")
get_issue = shlex.split(str(fcnew[som]))
if fcnew[som] != " ":
fcnew[som] = get_issue[0]
if fcnew[som].lower() == 'annual':
logger.fdebug("Annual detected.")
if fcnew[som+1].isdigit():
ann_iss = fcnew[som+1]
logger.fdebug("Annual # " + str(ann_iss) + " detected.")
fcdigit = int(ann_iss) * 1000
logger.fdebug("fcdigit:" + str(fcdigit))
logger.fdebug("int_iss:" + str(int_iss))
if int(fcdigit) == int_iss:
logger.fdebug("annual match - issue : " + str(int_iss))
for d in annualdupechk:
if int(d['fcdigit']) == int(fcdigit):
logger.fdebug("duplicate annual issue detected - not counting this: " + str(tmpfc['ComicFilename']))
issuedupe = "yes"
break
if issuedupe == "no":
logger.fdebug("matched...annual issue: " + rescan['ComicName'] + "#" + str(reann['Issue_Number']) + " --- " + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(".......filename: " + str(isslocation))
logger.fdebug(".......filesize: " + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
annualdupechk.append({'fcdigit': int(fcdigit)})
break
som+=1
if haveissue == "yes": break
n+=1
#we have the # of comics, now let's update the db.
#even if we couldn't find the physical issue, check the status.
#if Archived, increase the 'Have' count.
if archive:
issStatus = "Archived"
if haveissue == "no" and issuedupe == "no":
isslocation = "None"
if old_status == "Skipped":
if mylar.AUTOWANT_ALL:
if issuedupe == "yes": pass
else:
logger.fdebug("issueID to write to db:" + str(reiss['IssueID']))
#we have the # of comics, now let's update the db.
#even if we couldn't find the physical issue, check the status.
if 'annual' in temploc.lower():
controlValueDict = {"IssueID": str(reann['IssueID'])}
else:
controlValueDict = {"IssueID": reiss['IssueID']}
#if Archived, increase the 'Have' count.
if archive:
issStatus = "Archived"
if haveissue == "no" and issuedupe == "no":
isslocation = "None"
if old_status == "Skipped":
if mylar.AUTOWANT_ALL:
issStatus = "Wanted"
else:
issStatus = "Skipped"
elif old_status == "Archived":
havefiles+=1
issStatus = "Archived"
elif old_status == "Downloaded":
issStatus = "Archived"
havefiles+=1
elif old_status == "Wanted":
issStatus = "Wanted"
else:
issStatus = "Skipped"
elif old_status == "Archived":
havefiles+=1
issStatus = "Archived"
elif old_status == "Downloaded":
issStatus = "Archived"
havefiles+=1
elif old_status == "Wanted":
issStatus = "Wanted"
else:
issStatus = "Skipped"
controlValueDict = {"IssueID": reiss['IssueID']}
newValueDict = {"Status": issStatus }
elif haveissue == "yes":
issStatus = "Downloaded"
controlValueDict = {"IssueID": reiss['IssueID']}
newValueDict = {"Location": isslocation,
"ComicSize": issSize,
"Status": issStatus
}
myDB.upsert("issues", newValueDict, controlValueDict)
newValueDict = {"Status": issStatus }
elif haveissue == "yes":
issStatus = "Downloaded"
newValueDict = {"Location": isslocation,
"ComicSize": issSize,
"Status": issStatus
}
if 'annual' in temploc.lower():
myDB.upsert("annuals", newValueDict, controlValueDict)
else:
myDB.upsert("issues", newValueDict, controlValueDict)
fn+=1

View File

@ -479,7 +479,9 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("home")
addArtists.exposed = True
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None):
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None):
print "ComicID:" + str(ComicID)
print "mode:" + str(mode)
now = datetime.datetime.now()
myDB = db.DBConnection()
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
@ -491,6 +493,7 @@ class WebInterface(object):
# comics that have X many issues
raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'pullseries'))
elif ComicID is None and mode == 'readlist':
print "blahblah"
# this is for marking individual comics from a readlist to be downloaded.
# Because there is no associated ComicID or IssueID, follow same pattern as in 'pullwant'
# except we know the Year
@ -503,10 +506,14 @@ class WebInterface(object):
logger.info(u"IssueArcID : " + str(IssueArcID))
if ComicYear is None: ComicYear = SeriesYear
logger.info(u"Marking " + ComicName + " " + ComicIssue + " as wanted...")
controlValueDict = {"IssueArcID": IssueArcID}
newStatus = {"Status": "Wanted"}
myDB.upsert("readinglist", newStatus, controlValueDict)
foundcom = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, IssueDate=None, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
if foundcom == "yes":
logger.info(u"Downloaded " + ComicName + " #" + ComicIssue + " (" + str(ComicYear) + ")")
raise cherrypy.HTTPRedirect("readlist")
#raise cherrypy.HTTPRedirect("readlist")
return foundcom
elif ComicID is None and mode == 'pullwant':
#this is for marking individual comics from the pullist to be downloaded.
@ -521,18 +528,28 @@ class WebInterface(object):
logger.info(u"Downloaded " + ComicName + " " + ComicIssue )
raise cherrypy.HTTPRedirect("pullist")
#return
elif mode == 'want':
elif mode == 'want' or mode == 'want_ann':
cdname = myDB.action("SELECT ComicName from comics where ComicID=?", [ComicID]).fetchone()
ComicName = cdname['ComicName']
logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...")
#---
#this should be on it's own somewhere
if IssueID is not None:
controlValueDict = {"IssueID": IssueID}
newStatus = {"Status": "Wanted"}
myDB.upsert("issues", newStatus, controlValueDict)
if mode == 'want':
logger.info(u"Marking " + ComicName + " issue: " + ComicIssue + " as wanted...")
myDB.upsert("issues", newStatus, controlValueDict)
else:
logger.info(u"Marking " + ComicName + " Annual: " + ComicIssue + " as wanted...")
myDB.upsert("annuals", newStatus, controlValueDict)
#---
#this should be on it's own somewhere
#if IssueID is not None:
# controlValueDict = {"IssueID": IssueID}
# newStatus = {"Status": "Wanted"}
# myDB.upsert("issues", newStatus, controlValueDict)
#for future reference, the year should default to current year (.datetime)
issues = myDB.action("SELECT IssueDate FROM issues WHERE IssueID=?", [IssueID]).fetchone()
if mode == 'want':
issues = myDB.action("SELECT IssueDate FROM issues WHERE IssueID=?", [IssueID]).fetchone()
elif mode == 'want_ann':
issues = myDB.action("SELECT IssueDate FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if ComicYear == None:
ComicYear = str(issues['IssueDate'])[:4]
miy = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
@ -540,11 +557,12 @@ class WebInterface(object):
AlternateSearch = miy['AlternateSearch']
UseAFuzzy = miy['UseFuzzy']
ComicVersion = miy['ComicVersion']
foundcom = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, issues['IssueDate'], IssueID, AlternateSearch, UseAFuzzy, ComicVersion)
foundcom = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, issues['IssueDate'], IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode)
if foundcom == "yes":
# file check to see if issue exists and update 'have' count
if IssueID is not None:
return updater.foundsearch(ComicID, IssueID)
logger.info("passing to updater.")
return updater.foundsearch(ComicID, IssueID, mode)
if ComicID:
raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
else:
@ -579,7 +597,7 @@ class WebInterface(object):
if popit:
w_results = myDB.select("SELECT PUBLISHER, ISSUE, COMIC, STATUS from weekly")
for weekly in w_results:
if weekly['ISSUE'].isdigit() or 'au' in weekly['ISSUE'].lower():
if weekly['ISSUE'].isdigit() or 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower():
weeklyresults.append({
"PUBLISHER" : weekly['PUBLISHER'],
"ISSUE" : weekly['ISSUE'],
@ -786,7 +804,8 @@ class WebInterface(object):
comicsToAdd.append(ComicID)
if len(comicsToAdd) > 0:
logger.debug("Refreshing comics: %s" % comicsToAdd)
threading.Thread(target=importer.addComicIDListToDB, args=[comicsToAdd]).start()
#threading.Thread(target=importer.addComicIDListToDB, args=[comicsToAdd]).start()
threading.Thread(target=updater.dbUpdate, args=[comicsToAdd]).start()
raise cherrypy.HTTPRedirect("home")
markComics.exposed = True
@ -830,7 +849,8 @@ class WebInterface(object):
readlist = myDB.select("SELECT * from readinglist group by StoryArcID COLLATE NOCASE")
issuelist = myDB.select("SELECT * from readlist")
readConfig = {
"read2filename" : helpers.checked(mylar.READ2FILENAME)
"read2filename" : helpers.checked(mylar.READ2FILENAME),
"storyarcdir" : helpers.checked(mylar.STORYARCDIR)
}
return serve_template(templatename="readinglist.html", title="Readlist", readlist=readlist, issuelist=issuelist,readConfig=readConfig)
return page
@ -839,7 +859,11 @@ class WebInterface(object):
def detailReadlist(self,StoryArcID, StoryArcName):
myDB = db.DBConnection()
readlist = myDB.select("SELECT * from readinglist WHERE StoryArcID=? order by ReadingOrder ASC", [StoryArcID])
return serve_template(templatename="readlist.html", title="Detailed Arc list", readlist=readlist, storyarcname=StoryArcName, storyarcid=StoryArcID)
readConfig = {
"read2filename" : helpers.checked(mylar.READ2FILENAME),
"storyarcdir" : helpers.checked(mylar.STORYARCDIR)
}
return serve_template(templatename="readlist.html", title="Detailed Arc list", readlist=readlist, storyarcname=StoryArcName, storyarcid=StoryArcID, readConfig=readConfig)
detailReadlist.exposed = True
def removefromreadlist(self, IssueID=None, StoryArcID=None, IssueArcID=None, AllRead=None):
@ -976,7 +1000,7 @@ class WebInterface(object):
mod_watch = re.sub('\\band\\b', '', mod_watch.lower())
mod_watch = re.sub(r'\s', '', mod_watch)
if mod_watch == mod_arc:# and arc['SeriesYear'] == comic['ComicYear']:
logger.fdebug("intial name match - confirming issue # is present in series")
logger.fdebug("initial name match - confirming issue # is present in series")
if comic['ComicID'][:1] == 'G':
# if it's a multi-volume series, it's decimalized - let's get rid of the decimal.
GCDissue, whocares = helpers.decimal_issue(arc['IssueNumber'])
@ -989,6 +1013,11 @@ class WebInterface(object):
if isschk is None:
logger.fdebug("we matched on name, but issue " + str(arc['IssueNumber']) + " doesn't exist for " + comic['ComicName'])
else:
#this gets ugly - if the name matches and the issue, it could still be wrong series
#use series year to break it down further.
if int(comic['ComicYear']) != int(arc['SeriesYear']):
logger.fdebug("Series years are different - discarding match. " + str(comic['ComicYear']) + " != " + str(arc['SeriesYear']))
break
logger.fdebug("issue #: " + str(arc['IssueNumber']) + " is present!")
print isschk
print ("Comicname: " + arc['ComicName'])
@ -1042,20 +1071,81 @@ class WebInterface(object):
newVal = {"Status": "Wanted",
"IssueID": issue['IssueID']}
myDB.upsert("readinglist",newVal,ctrlVal)
logger.info("Marked " + issue['ComicName'] + " :# " + str(issue['Issue_Number']) + " as WANTED.")
logger.info("Marked " + issue['ComicName'] + " :# " + str(issue['Issue_Number']) + " as Wanted.")
ArcWatchlist.exposed = True
def ReadGetWanted(self, StoryArcID):
# this will queue up (ie. make 'Wanted') issues in a given Story Arc that are 'Not Watched'
print StoryArcID
stupdate = []
myDB = db.DBConnection()
wantedlist = myDB.select("SELECT * FROM readlist WHERE StoryArcID=? AND Status='Not Watched'", [StoryArcID])
wantedlist = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=? AND Status is Null", [StoryArcID])
if wantedlist is not None:
for want in wantedlist:
self.queueissue(mode='readinglist', ComicName=want['ComicName'], ComicID=None, ComicYear=want['ComicYear'], ComicIssue=want['Issue_Number'], IssueID=None, SeriesYear=want['SeriesYear'])
print want
issuechk = myDB.action("SELECT * FROM issues WHERE IssueID=?", [want['IssueArcID']]).fetchone()
SARC = want['StoryArc']
IssueArcID = want['IssueArcID']
if issuechk is None:
# none means it's not a 'watched' series
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug(want['ComicName'] + " -- #" + str(want['IssueNumber']))
logger.info(u"Story Arc : " + str(SARC) + " queueing selected issue...")
logger.info(u"IssueArcID : " + str(IssueArcID))
foundcom = search.search_init(ComicName=want['ComicName'], IssueNumber=want['IssueNumber'], ComicYear=want['IssueYear'], SeriesYear=want['SeriesYear'], IssueDate=None, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
else:
# it's a watched series
logger.fdebug("-- watched series queue.")
logger.fdebug(issuechk['ComicName'] + " -- #" + str(issuechk['Issue_Number']))
foundcom = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], IssueDate=None, IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
if foundcom == "yes":
print "sucessfully found."
else:
print "not sucessfully found."
stupdate.append({"Status": "Wanted",
"IssueArcID": IssueArcID,
"IssueID": "None"})
watchlistchk = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=? AND Status='Wanted'", [StoryArcID])
if watchlistchk is not None:
for watchchk in watchlistchk:
print "Watchlist hit - " + str(watchchk)
issuechk = myDB.action("SELECT * FROM issues WHERE IssueID=?", [watchchk['IssueArcID']]).fetchone()
SARC = watchchk['StoryArc']
IssueArcID = watchchk['IssueArcID']
if issuechk is None:
# none means it's not a 'watched' series
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug(watchchk['ComicName'] + " -- #" + str(want['IssueNumber']))
logger.info(u"Story Arc : " + str(SARC) + " queueing selected issue...")
logger.info(u"IssueArcID : " + str(IssueArcID))
foundcom = search.search_init(ComicName=watchchk['ComicName'], IssueNumber=watchchk['IssueNumber'], ComicYear=want['IssueYear'], SeriesYear=want['SeriesYear'], IssueDate=None, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
else:
# it's a watched series
logger.fdebug("-- watched series queue.")
logger.fdebug(issuechk['ComicName'] + " -- #" + str(issuechk['Issue_Number']))
foundcom = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], IssueDate=None, IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
if foundcom == "yes":
print "sucessfully found."
else:
print "Watchlist issue not sucessfully found."
print "issuearcid: " + str(IssueArcID)
print "issueid: " + str(IssueID)
stupdate.append({"Status": "Wanted",
"IssueArcID": IssueArcID,
"IssueID": issuechk['IssueID']})
if len(stupdate) > 0:
print str(len(stupdate)) + " issues need to get updated to Wanted Status"
for st in stupdate:
ctrlVal = {'IssueArcID': st['IssueArcID']}
newVal = {'Status': st['Status']}
if st['IssueID']:
print "issueid:" + str(st['IssueID'])
newVal['IssueID'] = st['IssueID']
myDB.upsert("readinglist", newVal, ctrlVal)
ReadGetWanted.exposed = True
@ -1280,13 +1370,13 @@ class WebInterface(object):
while (sl < int(noids)):
soma_sl = soma['comic_info'][sl]
print ("soma_sl: " + str(soma_sl))
print ("comicname: " + soma_sl['comicname'])
print ("filename: " + soma_sl['comfilename'])
print ("comicname: " + soma_sl['comicname'].encode('utf-8'))
print ("filename: " + soma_sl['comfilename'].encode('utf-8'))
controlValue = {"impID": soma_sl['impid']}
newValue = {"ComicYear": soma_sl['comicyear'],
"Status": "Not Imported",
"ComicName": soma_sl['comicname'],
"ComicFilename": soma_sl['comfilename'],
"ComicName": soma_sl['comicname'].encode('utf-8'),
"ComicFilename": soma_sl['comfilename'].encode('utf-8'),
"ComicLocation": soma_sl['comlocation'].encode('utf-8'),
"ImportDate": helpers.today(),
"WatchMatch": soma_sl['watchmatch']}
@ -1592,6 +1682,8 @@ class WebInterface(object):
"enable_extra_scripts" : helpers.checked(mylar.ENABLE_EXTRA_SCRIPTS),
"extra_scripts" : mylar.EXTRA_SCRIPTS,
"post_processing" : helpers.checked(mylar.POST_PROCESSING),
"enable_meta" : helpers.checked(mylar.ENABLE_META),
"cmtagger_path" : mylar.CMTAGGER_PATH,
"branch" : version.MYLAR_VERSION,
"br_type" : mylar.INSTALL_TYPE,
"br_version" : mylar.versioncheck.getVersion(),
@ -1728,7 +1820,8 @@ class WebInterface(object):
use_sabnzbd=0, sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, sab_directory=None, log_dir=None, log_level=0, blackhole=0, blackhole_dir=None,
use_nzbget=0, nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None,
usenet_retention=None, nzbsu=0, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, nzbx=0, newznab=0, newznab_host=None, newznab_apikey=None, newznab_enabled=0,
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
raw=0, raw_provider=None, raw_username=None, raw_password=None, raw_groups=None, experimental=0,
enable_meta=0, cmtagger_path=None,
prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, cvapifix=0,
destination_dir=None, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs):
@ -1817,6 +1910,8 @@ class WebInterface(object):
mylar.ENABLE_PRE_SCRIPTS = enable_pre_scripts
mylar.POST_PROCESSING = post_processing
mylar.PRE_SCRIPTS = pre_scripts
mylar.ENABLE_META = enable_meta
mylar.CMTAGGER_PATH = cmtagger_path
mylar.LOG_DIR = log_dir
mylar.LOG_LEVEL = log_level
mylar.CHMOD_DIR = chmod_dir
@ -1858,6 +1953,11 @@ class WebInterface(object):
logger.info("CHMOD File value is not a valid numeric - please correct. Defaulting to 0660")
mylar.CHMOD_FILE = '0660'
if mylar.ENABLE_META:
if mylar.CMTAGGER_PATH is None or mylar.CMTAGGER_PATH == '':
logger.info("ComicTagger Path not set - defaulting to Mylar Program Directory : " + mylar.PROG_DIR)
mylar.CMTAGGER_PATH = mylar.PROG_DIR
# Write the config
mylar.config_write()

View File

@ -380,7 +380,7 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
cur = con.cursor()
# if it's a one-off check (during an add series), load the comicname here and ignore below.
if comic1off_name:
logger.fdebug("this is a one-off" + str(comic1off_name))
logger.fdebug("this is a one-off" + comic1off_name)
lines.append(comic1off_name.strip())
unlines.append(comic1off_name.strip())
comicid.append(comic1off_id)
@ -398,16 +398,23 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
# year, and the new series starts in the same year - ie. Avengers
# lets' grab the latest issue date and see how far it is from current
# anything > 45 days we'll assume it's a false match ;)
#logger.fdebug("ComicName: " + watchd[1])
logger.fdebug("ComicName: " + watchd[1])
latestdate = watchd[5]
#logger.fdebug("latestdate: " + str(latestdate))
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
logger.fdebug("latestdate: " + str(latestdate))
if latestdate[8:] == '':
logger.fdebug("invalid date " + str(latestdate) + " appending 01 for day for continuation.")
latest_day = '01'
else:
latest_day = latestdate[8:]
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),int(latest_day))
n_date = datetime.date.today()
#logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
recentchk = (n_date - c_date).days
#logger.fdebug("recentchk: " + str(recentchk) + " days")
#logger.fdebug(" ----- ")
if recentchk < 55:
logger.fdebug("recentchk: " + str(recentchk) + " days")
chklimit = helpers.checkthepub(watchd[0])
logger.fdebug("Check date limit set to : " + str(chklimit))
logger.fdebug(" ----- ")
if recentchk < int(chklimit):
# let's not even bother with comics that are in the Present.
a_list.append(watchd[1])
b_list.append(watchd[2])
@ -423,6 +430,8 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
ltmpwords = len(tmpwords)
ltmp = 1
w+=1
else:
logger.fdebug("Determined to not be a Continuing series at this time.")
cnt = int(w-1)
cntback = int(w-1)
kp = []
@ -436,12 +445,14 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
while (cnt > -1):
lines[cnt] = lines[cnt].upper()
#llen[cnt] = str(llen[cnt])
#logger.fdebug("looking for : " + str(lines[cnt]))
sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', lines[cnt])
sqlsearch = re.sub(r'\s', '%', sqlsearch)
if 'THE' in sqlsearch: sqlsearch = re.sub('THE', '', sqlsearch)
logger.fdebug("looking for : " + lines[cnt])
sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', ' ', lines[cnt])
sqlsearch = re.sub("\&", '%', sqlsearch)
sqlsearch = re.sub("\\bAND\\b", '%', sqlsearch)
sqlsearch = re.sub("\\bTHE\\b", '', sqlsearch)
if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch)
#logger.fdebug("searchsql: " + str(sqlsearch))
sqlsearch = re.sub(r'\s', '%', sqlsearch)
logger.fdebug("searchsql: " + sqlsearch)
weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch])
#cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
for week in weekly:
@ -462,22 +473,24 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
comicnm = week['COMIC']
#here's the tricky part, ie. BATMAN will match on
#every batman comic, not exact
#logger.fdebug("comparing" + str(comicnm) + "..to.." + str(unlines[cnt]).upper())
logger.fdebug("comparing" + comicnm + "..to.." + unlines[cnt].upper())
#-NEW-
# strip out all special characters and compare
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', '', unlines[cnt])
comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', '', comicnm)
watchcomic = re.sub(r'\s', '', watchcomic)
comicnm = re.sub(r'\s', '', comicnm)
#logger.fdebug("Revised_Watch: " + watchcomic)
#logger.fdebug("ComicNM: " + comicnm)
if 'THE' in watchcomic.upper():
modwatchcomic = re.sub('THE', '', watchcomic.upper())
modcomicnm = re.sub('THE', '', comicnm)
watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', '', unlines[cnt])
comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', '', comicnm)
if "THE" in watchcomic.upper() or "THE" in comicnm.upper():
modwatchcomic = re.sub("\\bTHE\\b", "", watchcomic.upper())
modcomicnm = re.sub("\\bTHE\\b", "", comicnm)
else:
modwatchcomic = watchcomic
modcomicnm = comicnm
if '&' in watchcomic.upper():
modwatchcomic = re.sub('\&', 'AND', modwatchcomic.upper())
modcomicnm = re.sub('\&', 'AND', modcomicnm)
if '&' in comicnm:
modwatchcom = re.sub('\&', 'AND', modwatchcomic.upper())
modcomicnm = re.sub('\&', 'AND', modcomicnm)
#thnx to A+X for this...
if '+' in watchcomic:
logger.fdebug("+ detected...adjusting.")
@ -486,8 +499,15 @@ def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
modwatchcomic = re.sub('\+', 'PLUS', modwatchcomic)
#logger.fdebug("modcomicnm:" + modcomicnm)
#logger.fdebug("modwatchcomic:" + modwatchcomic)
watchcomic = re.sub(r'\s', '', watchcomic)
comicnm = re.sub(r'\s', '', comicnm)
modwatchcomic = re.sub(r'\s', '', modwatchcomic)
modcomicnm = re.sub(r'\s', '', modcomicnm)
logger.fdebug("watchcomic : " + str(watchcomic) + " / mod :" + str(modwatchcomic))
logger.fdebug("comicnm : " + str(comicnm) + " / mod :" + str(modcomicnm))
if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
logger.fdebug("matched on:" + str(comicnm) + "..." + str(watchcomic).upper())
logger.fdebug("matched on:" + comicnm + "..." + watchcomic.upper())
pass
elif ("ANNUAL" in week['EXTRA']):
pass