FIX: One-off Failed Download handling will now work, IMP: Some better error handling when detecting one-off's during post-processing, FIX:(#1143) When series contained a digit preceded by a dash, would incorrectly assume it as a negative issue number, FIX: Improved being able to detect corresponding annuals on CV when refreshing/importing a series when the annuals are new (ie. no data on CV), FIX: Alt_Pull method for weekly pull list retrieval working again, FIX: Fixed nzbid detection for experimental search, IMP: Mass Import button now available on Import Results screen (will attempt to import all series that are in a 'Not Imported' status), IMP: When searching for arc issues using the 'Search for Missing' option, the call is now threaded so it runs in the background, IMP: Changed IssueYear to IssueDate for Story Arc Issues on the arc details page, FIX:(#1156) Typo that caused error when attempting to view cbz comics in the series detail page, FIX:(#1145) Select All option via top checkbox (on series detail page), FIX: Auto-Want feature via weeklypull will better match to titles that contain 'the' and have hypens in differing character positions, FIX:(#1160) Would incorrectly take the length of a decimal placed issue when searching and never complete the issue number cylcing search, FIX:(#1161) When annuals not enabled, and series in watchlist and series annual in pullist, would error out trying to link series to pullist.

This commit is contained in:
evilhero 2015-11-18 01:32:40 -05:00
parent 72065be604
commit 626167813f
16 changed files with 565 additions and 173 deletions

View File

@ -490,7 +490,7 @@
<form action="markannuals" method="get" id="markannuals">
<div id="markannuals">Mark selected annuals as
<select name="ann_action" form="markannuals" onChange="doAjaxCall('markannuals',$(this),'table',true);" data-success="selected issues marked">
<select name="action" form="markannuals" onChange="doAjaxCall('markannuals',$(this),'table',true);" data-success="selected issues marked">
<option disabled="disabled" selected="selected">Choose...</option>
<option value="Wanted">Wanted</option>
<option value="Skipped">Skipped</option>
@ -516,7 +516,7 @@
-->
<thead>
<tr>
<th id="ann_action" align="left"><input type="checkbox" onClick="toggle(this)" class="checkbox" /></th>
<th id="select" align="left"><input type="checkbox" onClick="toggle(this)" name="annuals" class="checkbox" /></th>
<th id="aint_issuenumber">Int_IssNumber</th>
<th id="aissuenumber">Number</th>
<th id="aissuename">Name</th>
@ -548,7 +548,7 @@
agrade = 'A'
%>
<tr class="${annual['Status']} grade${agrade}">
<td id="ann_action"><input type="checkbox" name="${annual['IssueID']}" class="checkbox" value="${annual['IssueID']}" /></td>
<td id="select"><input type="checkbox" name="${annual['IssueID']}" class="checkbox" value="${annual['IssueID']}" /></td>
<%
if annual['Int_IssueNumber'] is None:
annual_Number = annual['Int_IssueNumber']

View File

@ -8,6 +8,7 @@
<div id="subhead_container">
<div id="subhead_menu">
<a id="menu_link_refresh" onclick="doAjaxCall('flushImports', $(this),'table')" data-success="Import Results Flushed">Flush all Imports</a>
<a id="menu_link_refresh" onclick="doAjaxCall('markImports?action=massimport', $(this),'table')" data-success="Successfully started Mass Import of Non-Imported items.">Mass Import</a>
</div>
</div>
</%def>
@ -55,7 +56,7 @@
<div id="markcomic">
<select name="action" onChange="doAjaxCall('markImports',$(this),'table',true);" data-error="You didn't select any comics">
<option disabled="disabled" selected="selected">Choose...</option>
<option value="massimport">Start Import</option>
<option value="importselected">Start Import</option>
<option value="removeimport">Remove</option>
</select>
<input type="hidden" value="Go">

View File

@ -13,8 +13,8 @@
%endif
<a id="menu_link_delete" href="#">Remove Read</a>
<a id="menu_link_delete" href="#">Clear File Cache</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ReadGetWanted?StoryArcID=${storyarcid}',$(this),'table')" data-success="Searching for Missing StoryArc Issues">Search for Missing</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ArcWatchlist?StoryArcID=${storyarcid}',$(this),'table')" data-success="Searching for matches on Watchlist">Search for Watchlist matches</a>
<a id="menu_link_refresh" onclick="doAjaxCall('SearchArcIssues?StoryArcID=${storyarcid}',$(this),'table')" data-success="Now searching for Missing StoryArc Issues">Search for Missing</a>
<a id="menu_link_refresh" onclick="doAjaxCall('ArcWatchlist?StoryArcID=${storyarcid}',$(this),'table')" data-success="Now searching for matches on Watchlist">Search for Watchlist matches</a>
%if cvarcid:
<a id="menu_link_refresh" onclick="doAjaxCall('addStoryArc_thread?arcid=${storyarcid}&cvarcid=${cvarcid}&storyarcname=${storyarcname}&arcrefresh=True',$(this),'table')" data-success="Refreshed Story Arc">Refresh Story Arc</a>
%endif
@ -64,7 +64,7 @@
<th id="readingorder"></th>
<th id="comicname">ComicName</th>
<th id="issue">Issue</th>
<th id="issueyear">Pub Year</th>
<th id="issuedate">Pub Date</th>
<th id="status">Status</th>
<th id="action">Options</th>
</tr>
@ -101,26 +101,26 @@
<%
try:
if item['IssueDate'] != '0000-00-00' and item['IssueDate'] is not None:
issueyear = item['IssueDate'][:4]
issuedate = item['IssueDate']
else:
if item['StoreDate'] != '0000-00-00' and item['StoreDate'] is not None:
issueyear = item['IssueDate'][:4]
issuedate = item['IssueDate']
else:
# this is needed for imported cbl's
try:
issueyear = item['issueYEAR']
issuedate = item['issueYEAR']
except:
issueyear = '0000'
issuedate = '0000-00-00'
except:
issueyear = '0000'
issuedate = '0000-00-00'
%>
<td id="issueyear">${issueyear}</td>
<td id="issuedate">${issuedate}</td>
<td id="status">${item['Status']}</td>
<td id="action">
%if item['Status'] is None or item['Status'] == None:
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issueyear}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Now searching for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Grab it</a>
<a href="#" onclick="doAjaxCall('queueit?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Now searching for ${item['ComicName']} #${item['IssueNumber']}"><span class="ui-icon ui-icon-plus"></span>Grab it</a>
%elif item['Status'] == 'Snatched':
<a href="#" onclick="doAjaxCall('queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issueyear}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Trying to Retry"><span class="ui-icon ui-icon-plus"></span>Retry</a>
<a href="#" onclick="doAjaxCall('queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issuedate}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Trying to Retry"><span class="ui-icon ui-icon-plus"></span>Retry</a>
%endif
</td>
</tr>
@ -135,7 +135,9 @@
<%def name="javascriptIncludes()">
<script src="js/libs/jquery.dataTables.min.js"></script>
<script src="js/libs/jquery.dataTables.rowReordering.js"></script>
<script type="text/javascript">
$("#menu_link_scan").click(function() {
$('#chkoptions').submit();
@ -160,6 +162,10 @@
"iDisplayLength": 25,
"sPaginationType": "full_numbers",
"aaSorting": []
})
.rowReordering({
sURL: "orderThis",
sRequestType: "GET"
});
resetFilters("item");
}

View File

@ -0,0 +1,240 @@
/*
* File: jquery.dataTables.rowReordering.js
* Version: 1.0.0.
* Author: Jovan Popovic
*
* Copyright 2012 Jovan Popovic, all rights reserved.
*
* This source file is free software, under either the GPL v2 license or a
* BSD style license, as supplied with this software.
*
* This source file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
*
* Parameters:
* @iIndexColumn int Position of the indexing column
* @sURL String Server side page tat will be notified that order is changed
* @iGroupingLevel int Defines that grouping is used
*/
(function ($) {
$.fn.rowReordering = function (options) {
function _fnStartProcessingMode() {
///<summary>
///Function that starts "Processing" mode i.e. shows "Processing..." dialog while some action is executing(Default function)
///</summary>
if (oTable.fnSettings().oFeatures.bProcessing) {
$(".dataTables_processing").css('visibility', 'visible');
}
}
function _fnEndProcessingMode() {
///<summary>
///Function that ends the "Processing" mode and returns the table in the normal state(Default function)
///</summary>
if (oTable.fnSettings().oFeatures.bProcessing) {
$(".dataTables_processing").css('visibility', 'hidden');
}
}
function fnGetStartPosition(sSelector) {
var iStart = 1000000;
$(sSelector, oTable).each(function () {
iPosition = parseInt(oTable.fnGetData(this, properties.iIndexColumn));
if (iPosition < iStart)
iStart = iPosition;
});
return iStart;
}
function fnCancelSorting(tbody, properties, iLogLevel, sMessage) {
tbody.sortable('cancel');
if(iLogLevel<=properties.iLogLevel){
if(sMessage!= undefined){
properties.fnAlert(sMessage, "");
}else{
properties.fnAlert("Row cannot be moved", "");
}
}
properties.fnEndProcessingMode();
}
function fnGetState(sSelector, id) {
var tr = $("#" + id);
var iCurrentPosition = oTable.fnGetData(tr[0], properties.iIndexColumn);
var iNewPosition = -1; // fnGetStartPosition(sSelector);
var sDirection;
var trPrevious = tr.prev(sSelector);
if (trPrevious.length > 0) {
iNewPosition = parseInt(oTable.fnGetData(trPrevious[0], properties.iIndexColumn));
if (iNewPosition < iCurrentPosition) {
iNewPosition = iNewPosition + 1;
}
} else {
var trNext = tr.next(sSelector);
if (trNext.length > 0) {
iNewPosition = parseInt(oTable.fnGetData(trNext[0], properties.iIndexColumn));
if (iNewPosition > iCurrentPosition)//moved back
iNewPosition = iNewPosition - 1;
}
}
if (iNewPosition < iCurrentPosition)
sDirection = "back";
else
sDirection = "forward";
return { sDirection: sDirection, iCurrentPosition: iCurrentPosition, iNewPosition: iNewPosition };
}
function fnMoveRows(sSelector, iCurrentPosition, iNewPosition, sDirection, id, sGroup) {
var iStart = iCurrentPosition;
var iEnd = iNewPosition;
if (sDirection == "back") {
iStart = iNewPosition;
iEnd = iCurrentPosition;
}
$(oTable.fnGetNodes()).each(function () {
if (sGroup != "" && $(this).attr("data-group") != sGroup)
return;
var tr = this;
var iRowPosition = parseInt(oTable.fnGetData(tr, properties.iIndexColumn));
if (iStart <= iRowPosition && iRowPosition <= iEnd) {
if (tr.id == id) {
oTable.fnUpdate(iNewPosition,
oTable.fnGetPosition(tr), // get row position in current model
properties.iIndexColumn,
false); // false = defer redraw until all row updates are done
} else {
if (sDirection == "back") {
oTable.fnUpdate(iRowPosition + 1,
oTable.fnGetPosition(tr), // get row position in current model
properties.iIndexColumn,
false); // false = defer redraw until all row updates are done
} else {
oTable.fnUpdate(iRowPosition - 1,
oTable.fnGetPosition(tr), // get row position in current model
properties.iIndexColumn,
false); // false = defer redraw until all row updates are done
}
}
}
});
var oSettings = oTable.fnSettings();
//Standing Redraw Extension
//Author: Jonathan Hoguet
//http://datatables.net/plug-ins/api#fnStandingRedraw
if (oSettings.oFeatures.bServerSide === false) {
var before = oSettings._iDisplayStart;
oSettings.oApi._fnReDraw(oSettings);
//iDisplayStart has been reset to zero - so lets change it back
oSettings._iDisplayStart = before;
oSettings.oApi._fnCalculateEnd(oSettings);
}
//draw the 'current' page
oSettings.oApi._fnDraw(oSettings);
}
function _fnAlert(message, type) { alert(message); }
var oTable = this;
var defaults = {
iIndexColumn: 0,
iStartPosition: 1,
sURL: null,
sRequestType: "POST",
iGroupingLevel: 0,
fnAlert: _fnAlert,
iLogLevel: 1,
sDataGroupAttribute: "data-group",
fnStartProcessingMode: _fnStartProcessingMode,
fnEndProcessingMode: _fnEndProcessingMode
};
var properties = $.extend(defaults, options);
var iFrom, iTo;
return this.each(function () {
var aaSortingFixed = (oTable.fnSettings().aaSortingFixed == null ? new Array() : oTable.fnSettings().aaSortingFixed);
aaSortingFixed.push([properties.iIndexColumn, "asc"]);
oTable.fnSettings().aaSortingFixed = aaSortingFixed;
for (var i = 0; i < oTable.fnSettings().aoColumns.length; i++) {
oTable.fnSettings().aoColumns[i].bSortable = false;
/*for(var j=0; j<aaSortingFixed.length; j++)
{
if( i == aaSortingFixed[j][0] )
oTable.fnSettings().aoColumns[i].bSortable = false;
}*/
}
oTable.fnDraw();
$("tbody", oTable).sortable({
cursor: "move",
update: function (event, ui) {
var tbody = $(this);
var sSelector = "tbody tr";
var sGroup = "";
if (properties.bGroupingUsed) {
sGroup = $(ui.item).attr(properties.sDataGroupAttribute);
if(sGroup==null || sGroup==undefined){
fnCancelSorting(tbody, properties, 3, "Grouping row cannot be moved");
return;
}
sSelector = "tbody tr[" + properties.sDataGroupAttribute + " ='" + sGroup + "']";
}
var oState = fnGetState(sSelector, ui.item.context.id);
if(oState.iNewPosition == -1)
{
fnCancelSorting(tbody, properties,2);
return;
}
if (properties.sURL != null) {
properties.fnStartProcessingMode();
$.ajax({
url: properties.sURL,
type: properties.sRequestType,
data: { id: ui.item.context.id,
fromPosition: oState.iCurrentPosition,
toPosition: oState.iNewPosition,
direction: oState.sDirection,
group: sGroup
},
success: function () {
fnMoveRows(sSelector, oState.iCurrentPosition, oState.iNewPosition, oState.sDirection, ui.item.context.id, sGroup);
properties.fnEndProcessingMode();
},
error: function (jqXHR) {
fnCancelSorting(tbody, properties, 1, jqXHR.statusText);
}
});
} else {
fnMoveRows(sSelector, oState.iCurrentPosition, oState.iNewPosition, oState.sDirection, ui.item.context.id, sGroup);
}
}
});
});
};
})(jQuery);

View File

@ -35,7 +35,7 @@ import datetime
class FailedProcessor(object):
""" Handles Failed downloads that are passed from SABnzbd thus far """
def __init__(self, nzb_name=None, nzb_folder=None, id=None, issueid=None, comicid=None, prov=None, queue=None):
def __init__(self, nzb_name=None, nzb_folder=None, id=None, issueid=None, comicid=None, prov=None, queue=None, oneoffinfo=None):
"""
nzb_name : Full name of the nzb file that has returned as a fail.
nzb_folder: Full path to the folder of the failed download.
@ -56,6 +56,11 @@ class FailedProcessor(object):
else:
self.comicid = None
if oneoffinfo:
self.oneoffinfo = oneoffinfo
else:
self.oneoffinfo = None
self.prov = prov
if queue: self.queue = queue
self.valreturn = []
@ -273,24 +278,31 @@ class FailedProcessor(object):
logger.fdebug(module + 'nzb_id: ' + str(self.id))
logger.fdebug(module + 'prov: ' + self.prov)
if 'annual' in self.nzb_name.lower():
logger.info(module + ' Annual detected.')
annchk = "yes"
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
logger.fdebug('oneoffinfo: ' + str(self.oneoffinfo))
if self.oneoffinfo:
ComicName = self.oneoffinfo['ComicName']
IssueNumber = self.oneoffinfo['IssueNumber']
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
if 'annual' in self.nzb_name.lower():
logger.info(module + ' Annual detected.')
annchk = "yes"
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
ctrlVal = {"IssueID": self.issueid}
Vals = {"Status": 'Failed'}
myDB.upsert("issues", Vals, ctrlVal)
ctrlVal = {"IssueID": self.issueid}
Vals = {"Status": 'Failed'}
myDB.upsert("issues", Vals, ctrlVal)
ComicName = issuenzb['ComicName']
IssueNumber = issuenzb['Issue_Number']
ctrlVal = {"ID": self.id,
"Provider": self.prov,
"NZBName": self.nzb_name}
Vals = {"Status": 'Failed',
"ComicName": issuenzb['ComicName'],
"Issue_Number": issuenzb['Issue_Number'],
"ComicName": ComicName,
"Issue_Number": IssueNumber,
"IssueID": self.issueid,
"ComicID": self.comicid,
"DateFailed": helpers.now()}

View File

@ -621,15 +621,17 @@ class PostProcessor(object):
logger.fdebug(module + ' Removed temporary directory : ' + self.nzb_folder)
#delete entry from nzblog table
IssArcID = 'S' + str(ml['IssueArcID'])
myDB.action('DELETE from nzblog WHERE IssueID=? AND SARC=?', [IssArcID,ml['StoryArc']])
if 'S' in sandwich:
IssArcID = 'S' + str(ml['IssueArcID'])
myDB.action('DELETE from nzblog WHERE IssueID=? AND SARC=?', [IssArcID,ml['StoryArc']])
logger.fdebug(module + ' IssueArcID: ' + str(ml['IssueArcID']))
ctrlVal = {"IssueArcID": ml['IssueArcID']}
newVal = {"Status": "Downloaded",
"Location": grab_dst}
logger.fdebug('writing: ' + str(newVal) + ' -- ' + str(ctrlVal))
myDB.upsert("readinglist", newVal, ctrlVal)
logger.fdebug(module + ' IssueArcID: ' + str(ml['IssueArcID']))
ctrlVal = {"IssueArcID": ml['IssueArcID']}
newVal = {"Status": "Downloaded",
"Location": grab_dst}
logger.fdebug('writing: ' + str(newVal) + ' -- ' + str(ctrlVal))
myDB.upsert("readinglist", newVal, ctrlVal)
logger.fdebug(module + ' [' + ml['StoryArc'] + '] Post-Processing completed for: ' + grab_dst)
else:
@ -708,6 +710,14 @@ class PostProcessor(object):
sandwich = issueid
elif 'G' in issueid or '-' in issueid:
sandwich = 1
elif issueid == '1':
logger.info(module + ' [ONE-OFF POST-PROCESSING] One-off download detected. Post-processing as a non-watchlist item.')
sandwich = None #arbitrarily set it to None just to force one-off downloading below.
else:
logger.error(module + ' Download not detected as being initiated via Mylar. Unable to continue post-processing said item. Either download the issue with Mylar, or use manual post-processing to post-process.')
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
else:
logger.info(module + ' Successfully located issue as an annual. Continuing.')
annchk = "yes"
@ -724,7 +734,7 @@ class PostProcessor(object):
# sandwich = issueid
# elif 'G' in issueid or '-' in issueid:
# sandwich = 1
if helpers.is_number(sandwich):
if sandwich is not None and helpers.is_number(sandwich):
if sandwich < 900000:
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
pass
@ -732,7 +742,7 @@ class PostProcessor(object):
if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
# this has no issueID, therefore it's a one-off or a manual post-proc.
# At this point, let's just drop it into the Comic Location folder and forget about it..
if 'S' in sandwich:
if sandwich is not None and 'S' in sandwich:
self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
logger.info(module + ' One-off STORYARC mode enabled for Post-Processing for ' + str(sarc))
arcdir = helpers.filesafe(sarc)
@ -770,11 +780,12 @@ class PostProcessor(object):
if odir is None:
odir = self.nzb_folder
issuearcid = re.sub('S', '', issueid)
logger.fdebug(module + ' issuearcid:' + str(issuearcid))
arcdata = myDB.selectone("SELECT * FROM readinglist WHERE IssueArcID=?", [issuearcid]).fetchone()
if sandwich is not None and 'S' in sandwich:
issuearcid = re.sub('S', '', issueid)
logger.fdebug(module + ' issuearcid:' + str(issuearcid))
arcdata = myDB.selectone("SELECT * FROM readinglist WHERE IssueArcID=?", [issuearcid]).fetchone()
issueid = arcdata['IssueID']
issueid = arcdata['IssueID']
#tag the meta.
if mylar.ENABLE_META:
self._log("Metatagging enabled - proceeding...")
@ -795,7 +806,7 @@ class PostProcessor(object):
logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..')
self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...')
if 'S' in sandwich:
if sandwich is not None and 'S' in sandwich:
if mylar.STORYARCDIR:
grdst = storyarcd
else:
@ -808,7 +819,7 @@ class PostProcessor(object):
filechecker.validateAndCreateDirectory(grdst, True, module=module)
if 'S' in sandwich:
if sandwich is not None and 'S' in sandwich:
#if from a StoryArc, check to see if we're appending the ReadingOrder to the filename
if mylar.READ2FILENAME:
logger.fdebug(module + ' readingorder#: ' + str(arcdata['ReadingOrder']))
@ -850,7 +861,7 @@ class PostProcessor(object):
#delete entry from nzblog table
myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
if 'S' in issueid:
if sandwich is not None and 'S' in sandwich:
#issuearcid = re.sub('S', '', issueid)
logger.info(module + ' IssueArcID is : ' + str(issuearcid))
ctrlVal = {"IssueArcID": issuearcid}
@ -861,8 +872,8 @@ class PostProcessor(object):
logger.info('wrote.')
logger.info(module + ' Updated status to Downloaded')
logger.info(module + ' Post-Processing completed for: [' + sarc + '] ' + grab_dst)
self._log(u"Post Processing SUCCESSFUL! ")
logger.info(module + ' Post-Processing completed for: [' + sarc + '] ' + grab_dst)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
@ -972,6 +983,19 @@ class PostProcessor(object):
elif u'\u221e' in issuenum:
#issnum = utf-8 will encode the infinity symbol without any help
issuenum = 'infinity'
else:
issue_exceptions = ['A',
'B',
'C',
'X',
'O']
exceptionmatch = [x for x in issue_exceptions if x.lower() in issuenum.lower()]
if exceptionmatch:
logger.fdebug('[FILECHECKER] We matched on : ' + str(exceptionmatch))
for x in exceptionmatch:
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = x
if '.' in issuenum:
iss_find = issuenum.find('.')
@ -1007,11 +1031,11 @@ class PostProcessor(object):
elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"
logger.fdebug(module + ' Zero Suppression set to : ' + str(mylar.ZERO_LEVEL_N))
if str(len(issueno)) > 1:
if issueno.isalpha():
self._log('issue detected as an alpha.')
prettycomiss = str(issueno)
elif int(issueno) < 0:
self._log("issue detected is a negative")
prettycomiss = '-' + str(zeroadd) + str(abs(issueno))

View File

@ -75,7 +75,7 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
elif type == 'comicyears':
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher&offset=' + str(offset)
#logger.info('PULLURL: ' + PULLURL)
#logger.info('CV.PULLURL: ' + PULLURL)
#CV API Check here.
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
chkit = cvapi_check()
@ -128,7 +128,7 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
return False
countResults = 0
while (countResults < int(totalResults)):
logger.fdebug("querying " + str(countResults))
logger.fdebug("querying range from " + str(countResults) + " to " + str(countResults + 100))
if countResults > 0:
#new api - have to change to page # instead of offset count
offsetcount = countResults
@ -144,7 +144,6 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
issue['issuechoice'] = ndic
issue['firstdate'] = firstdate
return issue
elif type == 'comic':

View File

@ -174,7 +174,14 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
if AlternateSearch is not None:
chkthealt = AlternateSearch.split('##')
if chkthealt == 0:
AS_Alternate = AlternateSearch
u_altsearchcomic = AS_Alternate.encode('ascii', 'ignore').strip()
altsearchcomic = re.sub('[\_\#\,\/\:\;\.\!\$\%\+\?\@]', ' ', u_altsearchcomic)
altsearchcomic = re.sub('[\-\']', '', altsearchcomic) #because this is a watchcomic registered, use same algorithim for watchcomic
altsearchcomic = re.sub('\&', ' and ', altsearchcomic)
#if detectthe_sub == True:
altsearchcomic = re.sub("\\bthe\\b", "", altsearchcomic.lower())
altsearchcomic = re.sub('\s+', ' ', str(altsearchcomic)).strip()
AS_Alternate = altsearchcomic
for calt in chkthealt:
AS_tupled = False
AS_Alternate = re.sub('##', '', calt)
@ -211,7 +218,12 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
for i in watchcomic.split():
if i.isdigit():
numberinseries = 'True'
else:
digitstogether = re.sub("[^0-9]", "", i).strip()
if digitstogether.isdigit():
logger.fdebug('[FILECHECKER] Detected digits attached to series title with no spacing.')
numberinseries = 'True'
if ('20' in i or '19' in i):
if i.isdigit():
numberinseries = 'True'
@ -242,6 +254,9 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
bracket_word = watchcomic[bracket_length_st:bracket_length_en +1]
logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracket_word))
if any([numberinseries, decimalinseries, bracketsinseries]):
break
logger.fdebug('[FILECHECKER] numberinseries: ' + str(numberinseries))
logger.fdebug('[FILECHECKER] decimalinseries: ' + str(decimalinseries))
logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracketsinseries))
@ -1046,7 +1061,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
else:
continue
if 'annual' in subname.lower():
if 'annual' in subname.lower() and mylar.ANNUALS_ON:
subname = re.sub('annual', '', subname.lower())
subname = re.sub('\s+', ' ', subname)
#if the sub has an annual, let's remove it from the modwatch as well
@ -1100,13 +1115,14 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
logger.fdebug('[FILECHECKER] sub_removed: ' + sub_removed)
split_sub = sub_removed.rsplit(' ', 1)[0].split(' ') #removes last word (assuming it's the issue#)
split_mod = modwatchcomic.replace('_', ' ').split() #batman
i = 0
newc = ''
while (i < len(split_mod)):
newc += split_sub[i] + ' '
i+=1
if newc:
split_sub = newc.strip().split()
#i = 0
#newc = ''
split_sub = ' '.join(split_sub).strip().split()
#while (i < len(split_mod)):
# newc += ' ' + split_sub[i]
# i+=1
#if newc:
# split_sub = newc.strip().split()
logger.fdebug('[FILECHECKER] split_sub: ' + str(split_sub))
logger.fdebug('[FILECHECKER] split_mod: ' + str(split_mod))

View File

@ -1343,7 +1343,7 @@ def IssueDetails(filelocation, IssueID=None):
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensiosn):
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions):
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
altlist = ('00a', '00b', '00c', '00d', '00e')
for alt in altlist:
@ -1830,10 +1830,15 @@ def parse_32pfeed(rssfeedline):
# return True
# elif mylar.FILE_OPS == 'softlink':
# try:
# os.symlink( path,dst )
# except:
# print 'Unable to create symlink.'
# return False
# os.symlink( path, dst )
# except OSError, e:
# if e.errno == errno.EEXIST:
# os.remove(dst)
# os.symlink( path, dst )
# else:
# raise e
# print 'Unable to create symlink.'
# return False
# return True
# else:
# return False

View File

@ -252,10 +252,14 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
pass
else:
n = 0
logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr['issues']) + ' annuals in this series.')
while (n < int(sr['issues'])):
if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
sr_issues = 1
else:
sr_issues = sr['issues']
logger.fdebug('[IMPORTER-ANNUAL (MAIN)] - There are ' + str(sr_issues) + ' annuals in this series.')
while (n < int(sr_issues)):
try:
firstval = issued['issuechoice'][n]
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
@ -1081,9 +1085,12 @@ def manualAnnual(manual_comicid, comicname, comicyear, comicid):
return
else:
n = 0
noissues = sr['ComicIssues']
logger.fdebug('there are ' + str(noissues) + ' annuals within this series.')
issued = cv.getComic(re.sub('4050-', '', manual_comicid).strip(), 'issue')
if int(sr['ComicIssues']) == 0 and len(issued['issuechoice']) == 1:
noissues = 1
else:
noissues = sr['ComicIssues']
logger.fdebug('there are ' + str(noissues) + ' annuals within this series.')
if issued == 'apireached':
return 'apireached'
while (n < int(noissues)):
@ -1502,8 +1509,12 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
pass
else:
n = 0
logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr['issues']) + ' annuals in this series.')
while (n < int(sr['issues'])):
if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
sr_issues = 1
else:
sr_issues = sr['issues']
logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr_issues) + ' annuals in this series.')
while (n < int(sr_issues)):
try:
firstval = issued['issuechoice'][n]
except IndexError:

View File

@ -61,7 +61,7 @@ def pullsearch(comicapi, comicquery, offset, explicit, type):
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
#all these imports are standard on most modern python implementations
#CV API Check here.
#logger.info('PULLURL:' + PULLURL)
#logger.info('MB.PULLURL:' + PULLURL)
if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
chkit = cvapi_check()
if chkit == False:
@ -457,3 +457,4 @@ def storyarcinfo(xmlid):
}
return arcinfo

View File

@ -12,6 +12,7 @@ import unicodedata
from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
import lib.requests as requests
import mylar
from mylar import logger
@ -19,9 +20,15 @@ from mylar import logger
def newpull():
pagelinks = "http://www.previewsworld.com/Home/1/1/71/952"
pageresponse = urllib2.urlopen (pagelinks)
soup = BeautifulSoup (pageresponse)
try:
r = requests.get(pagelinks, verify=False)
except Exception, e:
logger.warn('Error fetching data: %s' % (tmpprov, e))
soup = BeautifulSoup(r.content)
getthedate = soup.findAll("div", {"class": "Headline"})[0]
#the date will be in the FIRST ahref
try:
getdate_link = getthedate('a')[0]
@ -42,52 +49,52 @@ def newpull():
endthis = False
pull_list = []
publishers = {'914': 'DARK HORSE COMICS', '915': 'DC COMICS', '916': 'IDW PUBLISHING', '917': 'IMAGE COMICS', '918': 'MARVEL COMICS', '952': 'COMICS & GRAPHIC NOVELS'}
publishers = {'PREVIEWS PUBLICATIONS', 'DARK HORSE COMICS', 'DC COMICS', 'IDW PUBLISHING', 'IMAGE COMICS', 'MARVEL COMICS', 'COMICS & GRAPHIC NOVELS'}
isspublisher = None
while (x < lenlinks):
headt = cntlinks[x] #iterate through the hrefs pulling out only results.
if 'STK669382' in str(headt):
x+=1
continue
elif '?stockItemID=' in str(headt):
#914 - Dark Horse Comics
#915 - DC Comics
#916 - IDW Publishing
#917 - Image Comics
#918 - Marvel Comics
#952 - Comics & Graphic Novels
# - Magazines
findurl_link = headt.findAll('a', href=True)[0]
urlID = findurl_link.findNext(text=True)
issue_link = findurl_link['href']
issue_lk = issue_link.find('?stockItemID=')
if issue_lk == -1:
continue
#headName = headt.findNext(text=True)
publisher_id = issue_link[issue_lk -3:issue_lk]
for pub in publishers:
if pub == publisher_id:
isspublisher = publishers[pub]
#logger.fdebug('publisher:' + str(isspublisher))
found_iss = headt.findAll('td')
if "Home/1/1/71/920" in issue_link:
#logger.fdebug('Ignoring - menu option.')
return
if "PREVIEWS" in headt:
#logger.fdebug('Ignoring: ' + found_iss[0])
break
if "MAGAZINES" in headt:
#logger.fdebug('End.')
endthis = True
break
if len(found_iss) > 0:
pull_list.append({"iss_url": found_iss[0],
"name": found_iss[1].findNext(text=True),
"price": found_iss[2],
"publisher": isspublisher,
"ID": urlID})
found_iss = headt.findAll('td')
pubcheck = found_iss[0].text.strip() #.findNext(text=True)
for pub in publishers:
if pub in pubcheck:
chklink = found_iss[0].findAll('a', href=True) #make sure it doesn't have a link in it.
if not chklink:
isspublisher = pub
break
if isspublisher == 'PREVIEWS PUBLICATIONS' or isspublisher is None:
pass
else:
if '/Catalog/' in str(headt):
findurl_link = headt.findAll('a', href=True)[0]
urlID = findurl_link.findNext(text=True)
issue_link = findurl_link['href']
issue_lk = issue_link.find('/Catalog/')
if issue_lk == -1:
x+=1
continue
elif "Home/1/1/71" in issue_link:
#logger.fdebug('Ignoring - menu option.')
x+=1
continue
if len(found_iss) > 0:
pull_list.append({"iss_url": issue_link,
"name": found_iss[1].findNext(text=True),
"price": found_iss[2],
"publisher": isspublisher,
"ID": urlID})
if "PREVIEWS" in headt:
#logger.fdebug('Ignoring: ' + found_iss[0])
break
if "MAGAZINES" in headt:
#logger.fdebug('End.')
endthis = True
break
if endthis == True: break
x+=1
logger.fdebug('Saving new pull-list information into local file for subsequent merge')

View File

@ -167,7 +167,7 @@ class PUSHOVER:
'title': event,
'priority': mylar.PUSHOVER_PRIORITY}
r = self._session.post(self.PUSHOVER_URL, data=data)
r = self._session.post(self.PUSHOVER_URL, data=data, verify=True)
if r.status_code == 200:
logger.info(module + ' PushOver notifications sent.')

View File

@ -199,8 +199,13 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#torprtmp = 0 # torprtmp = torpr
prov_count = 0
while (prov_count <= len(prov_order) -1):
#while (torprtmp <= torpr): #(torprtmp >=0 ):
if len(prov_order) == 1:
tmp_prov_count = 1
else:
tmp_prov_count = len(prov_order) - 1
while (prov_count < tmp_prov_count): #len(prov_order) -1):
send_prov_count = tmp_prov_count - prov_count
newznab_host = None
if prov_order[prov_count] == '32p':
searchprov = '32P'
@ -221,7 +226,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
searchprov = prov_order[prov_count].lower()
if searchmode == 'rss':
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
logger.fdebug("findit = found!")
break
@ -234,7 +239,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
for calt in chkthealt:
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate)
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=AS_Alternate)
if findit == 'yes':
break
if findit == 'yes': break
@ -242,24 +247,24 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
else:
if searchprov == '32P':
logger.fdebug('32P backlog searching is not currently supported.')
break
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
logger.fdebug("findit = found!")
break
else:
if AlternateSearch is not None and AlternateSearch != "None":
chkthealt = AlternateSearch.split('##')
if chkthealt == 0:
AS_Alternate = AlternateSearch
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
break
if findit == 'yes': break
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
logger.fdebug("findit = found!")
break
else:
if AlternateSearch is not None and AlternateSearch != "None":
chkthealt = AlternateSearch.split('##')
if chkthealt == 0:
AS_Alternate = AlternateSearch
loopit = len(chkthealt)
for calt in chkthealt:
AS_Alternate = re.sub('##', '', calt)
logger.info(u"Alternate Search pattern detected...re-adjusting to : " + str(AS_Alternate) + " " + str(ComicYear))
findit = NZB_SEARCH(AS_Alternate, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, semd_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName)
if findit == 'yes':
break
if findit == 'yes': break
if searchprov == 'newznab':
searchprov = newznab_host[0].rstrip()
@ -400,7 +405,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
break
elif '.' in findcomiciss[i]:
c_number = findcomiciss[:i].rstrip()
c_num_a4 = findcomiciss[i +1:].rstrip()
c_num_a4 = findcomiciss[i+1:].rstrip()
#if decimal seperates numeric from alpha (ie - 7.INH)
#don't give calpha a value or else will seperate with a space further down
#assign it to dsp_c_alpha so that it can be displayed for debugging.
@ -415,6 +420,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if c_number is None:
c_number = findcomiciss # if it's None, means no special alphas or decimals
if '.' in c_number:
decst = c_number.find('.')
c_number = c_number[:decst].rstrip()
#logger.fdebug('setting cmloopit to: ' + str(c_number))
if len(c_number) == 1:
cmloopit = 3
elif len(c_number) == 2:
@ -439,6 +449,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
# results. '011' will return different than '11', as will '009' and '09'.
while (findloop < findcount):
#logger.fdebug('findloop: ' + str(findloop) + ' / findcount: ' + str(findcount))
comsrc = comsearch
while (cmloopit >= 1):
#if issue_except is None: issue_exc = ''
@ -529,7 +540,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
### IF USENET_RETENTION is set, honour it
### For newznab sites, that means appending "&maxage=<whatever>" on the URL
if mylar.USENET_RETENTION != None and nzbprov != 'torznab':
findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION)
if nzbprov == 'omgwtfnzbs':
findurl = findurl + "&retention=" + str(mylar.USENET_RETENTION)
else:
findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION)
# Add a user-agent
#print ("user-agent:" + str(mylar.USER_AGENT))
@ -1428,6 +1442,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.info("Alphanumerics detected within IssueNumber. Seperating from Issue # and re-trying.")
cmloopit = origcmloopit
seperatealpha = "yes"
findloop+=1
if foundc == "yes":
foundcomic.append("yes")
@ -1444,9 +1459,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
prov_count == 0
#break
return foundc
elif foundc == "no" and prov_count == 0:
if foundc == "no":# and prov_count == 0:
#logger.fdebug('prov_count: ' + str(prov_count))
foundcomic.append("no")
#logger.fdebug('Could not find a matching comic using ' + str(tmpprov))
if IssDateFix == "no":
#logger.info('Could not find Issue ' + str(IssueNumber) + ' of ' + ComicName + '(' + str(comyear) + ') using ' + str(tmpprov) + '. Status kept as wanted.' )
break
@ -1916,7 +1932,11 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
try:
# only nzb providers will have a filen, try it and pass exception
if filen is None:
return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov)
if IssueID is None:
logger.fdebug('One-off mode was initiated - Failed Download handling for : ' + ComicName + ' #' + str(IssueNumber))
comicinfo = {"ComicName": ComicName,
"IssueNumber": IssueNumber}
return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov, oneoffinfo=comicinfo)
except:
pass
call_the_fail = Failed.FailedProcessor(nzb_name=nzbname, id=nzbid, issueid=IssueID, comicid=ComicID, prov=tmpprov)
@ -1969,7 +1989,17 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if rcheck == "fail":
if mylar.FAILED_DOWNLOAD_HANDLING:
logger.error('Unable to send torrent to client. Assuming incomplete link - sending to Failed Handler and continuing search.')
return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov)
if IssueID is None:
logger.fdebug('One-off mode was initiated - Failed Download handling for : ' + ComicName + ' #' + str(IssueNumber))
comicinfo = {"ComicName": ComicName,
"IssueNumber": IssueNumber}
else:
comicinfo_temp = {"ComicName": comicinfo[0]['ComicName'],
"modcomicname": comicinfo[0]['modcomicname'],
"IssueNumber": comicinfo[0]['IssueNumber'],
"comyear": comicinfo[0]['comyear']}
comicinfo = comicinfo_temp
return FailedMark(ComicID=ComicID, IssueID=IssueID, id=nzbid, nzbname=nzbname, prov=nzbprov, oneoffinfo=comicinfo)
else:
logger.error('Unable to send torrent - check logs and settings (this would be marked as a BAD torrent if Failed Handling was enabled)')
return "torrent-fail"
@ -2200,12 +2230,12 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov)
return
def FailedMark(IssueID, ComicID, id, nzbname, prov):
def FailedMark(IssueID, ComicID, id, nzbname, prov, oneoffinfo=None):
# Used to pass a failed attempt at sending a download to a client, to the failed handler, and then back again to continue searching.
from mylar import Failed
FailProcess = Failed.FailedProcessor(issueid=IssueID, comicid=ComicID, id=id, nzb_name=nzbname, prov=prov)
FailProcess = Failed.FailedProcessor(issueid=IssueID, comicid=ComicID, id=id, nzb_name=nzbname, prov=prov, oneoffinfo=oneoffinfo)
Markit = FailProcess.markFailed()
if prov == '32P' or prov == 'KAT': return "torrent-fail"
@ -2346,7 +2376,7 @@ def generate_id(nzbprov, link):
#id is located after the /download/ portion
url_parts = urlparse.urlparse(link)
path_parts = url_parts[2].rpartition('/')
nzbtempid = path_parts[2].rpartition('/')
nzbtempid = path_parts[0].rpartition('/')
nzblen = len(nzbtempid)
nzbid = nzbtempid[nzblen -1]
elif nzbprov == '32P':

View File

@ -969,7 +969,8 @@ class WebInterface(object):
else:
newaction = action
for IssueID in args:
if any([IssueID is None, 'issue_table' in IssueID, 'history_table' in IssueID, 'manage_issues' in IssueID, 'issue_table_length' in IssueID]):
logger.info(IssueID)
if any([IssueID is None, 'issue_table' in IssueID, 'history_table' in IssueID, 'manage_issues' in IssueID, 'issue_table_length' in IssueID, 'issues' in IssueID, 'annuals' in IssueID]):
continue
else:
mi = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
@ -1956,14 +1957,21 @@ class WebInterface(object):
def markImports(self, action=None, **args):
myDB = db.DBConnection()
comicstoimport = []
for ComicName in args:
if action == 'massimport':
logger.info("initiating mass import mode for " + ComicName)
cid = ComicName.decode('utf-8', 'replace')
comicstoimport.append(cid)
elif action == 'removeimport':
logger.info("removing " + ComicName + " from the Import list")
myDB.action('DELETE from importresults WHERE ComicName=?', [ComicName])
if action == 'massimport':
logger.info('initiating mass import.')
cnames = myDB.select("SELECT ComicName from importresults WHERE Status='Not Imported' GROUP BY ComicName")
for cname in cnames:
comicstoimport.append(cname['ComicName'].decode('utf-8', 'replace'))
logger.info(str(len(comicstoimport)) + ' series will be attempted to be imported.')
else:
for ComicName in args:
if action == 'importselected':
logger.info("initiating mass import mode for " + ComicName)
cid = ComicName.decode('utf-8', 'replace')
comicstoimport.append(cid)
elif action == 'removeimport':
logger.info("removing " + ComicName + " from the Import list")
myDB.action('DELETE from importresults WHERE ComicName=?', [ComicName])
if len(comicstoimport) > 0:
logger.debug("Mass importing the following series: %s" % comicstoimport)
@ -2571,6 +2579,10 @@ class WebInterface(object):
ArcWatchlist.exposed = True
def SearchArcIssues(self, **kwargs):
threading.Thread(target=self.ReadGetWanted, kwargs=kwargs).start()
SearchArcIssues.exposed = True
def ReadGetWanted(self, StoryArcID):
# this will queue up (ie. make 'Wanted') issues in a given Story Arc that are 'Not Watched'
#print StoryArcID
@ -3099,6 +3111,8 @@ class WebInterface(object):
mode='series'
displaycomic = helpers.filesafe(ComicName)
displaycomic = re.sub('[\-]','', displaycomic).strip()
displaycomic = re.sub('\s+', ' ', displaycomic).strip()
logger.fdebug('displaycomic : ' + displaycomic)
logger.fdebug('comicname : ' + ComicName)
if yearRANGE is None:

View File

@ -130,12 +130,12 @@ def pullit(forcecheck=None):
newrl = mylar.CACHE_DIR + "/newreleases.txt"
if mylar.ALT_PULL:
logger.info('[PULL-LIST] The Alt-Pull method is currently broken. Defaulting back to the normal method of grabbing the pull-list.')
#logger.info('[PULL-LIST] Populating & Loading pull-list data directly from webpage')
#newpull.newpull()
#else:
logger.info('[PULL-LIST] Populating & Loading pull-list data from file')
f = urllib.urlretrieve(PULLURL, newrl)
#logger.info('[PULL-LIST] The Alt-Pull method is currently broken. Defaulting back to the normal method of grabbing the pull-list.')
logger.info('[PULL-LIST] Populating & Loading pull-list data directly from webpage')
newpull.newpull()
else:
logger.info('[PULL-LIST] Populating & Loading pull-list data from file')
f = urllib.urlretrieve(PULLURL, newrl)
#newtxtfile header info ("SHIPDATE\tPUBLISHER\tISSUE\tCOMIC\tEXTRA\tSTATUS\n")
#STATUS denotes default status to be applied to pulllist in Mylar (default = Skipped)
@ -363,13 +363,13 @@ def pullit(forcecheck=None):
dupefound = "no"
#-- remove html tags when alt_pull is enabled
#if mylar.ALT_PULL:
# if '&amp;' in comicnm:
# comicnm = re.sub('&amp;', '&', comicnm).strip()
# if '&amp;' in pub:
# pub = re.sub('&amp;', '&', pub).strip()
# if '&amp;' in comicrm:
# comicrm = re.sub('&amp;', '&', comicrm).strip()
if mylar.ALT_PULL:
if '&amp;' in comicnm:
comicnm = re.sub('&amp;', '&', comicnm).strip()
if '&amp;' in pub:
pub = re.sub('&amp;', '&', pub).strip()
if '&amp;' in comicrm:
comicrm = re.sub('&amp;', '&', comicrm).strip()
#--start duplicate comic / issue chk
# pullist has shortforms of a series' title sometimes and causes problems
@ -660,6 +660,21 @@ def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurep
logger.fdebug("comicnm : " + str(comicnm) + " / mod :" + str(modcomicnm))
if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
if mylar.ANNUALS_ON:
if 'annual' in watchcomic.lower() and 'annual' not in comicnm.lower():
logger.fdebug('Annual detected in issue, but annuals are not enabled and no series match in wachlist.')
break
else:
#(annual in comicnm & in watchcomic) or (annual in comicnm & not in watchcomic)(with annuals on) = match.
pass
else:
#annuals off
if ('annual' in comicnm.lower() and 'annual' not in watchcomic.lower()) or ('annual' in watchcomic.lower() and 'annual' not in comicnm.lower()):
logger.fdebug('Annual detected in issue, but annuals are not enabled and no series match in wachlist.')
break
else:
#annual in comicnm & in watchcomic (with annuals off) = match.
pass
logger.fdebug("matched on:" + comicnm + "..." + watchcomic.upper())
watchcomic = unlines[cnt]
pass
@ -1050,8 +1065,19 @@ def future_check():
logger.fdebug('Publisher of series to be added: ' + str(ser['Publisher']))
for sr in searchresults:
logger.fdebug('Comparing ' + sr['name'] + ' - to - ' + ser['ComicName'])
tmpsername = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]', '', ser['ComicName']).strip()
tmpsrname = re.sub('[\'\*\^\%\$\#\@\!\-\/\,\.\:\(\)]', '', sr['name']).strip()
tmpsername = re.sub('[\'\*\^\%\$\#\@\!\/\,\.\:\(\)]', '', ser['ComicName']).strip()
tmpsrname = re.sub('[\'\*\^\%\$\#\@\!\/\,\.\:\(\)]', '', sr['name']).strip()
tmpsername = re.sub('\-', ' ', tmpsername)
if tmpsername.lower().startswith('the '):
tmpsername = re.sub('the ', ' ', tmpsername.lower()).strip()
else:
tmpsername = re.sub(' the ', ' ', tmpsername.lower()).strip()
tmpsrname = re.sub('\-', ' ', tmpsrname)
if tmpsrname.lower().startswith('the '):
tmpsrname = re.sub('the ', ' ', tmpsrname.lower()).strip()
else:
tmpsrname = re.sub(' the ', ' ', tmpsrname.lower()).strip()
logger.fdebug('Comparing ' + tmpsrname + ' - to - ' + tmpsername)
if tmpsername.lower() == tmpsrname.lower():
logger.info('Name matched successful: ' + sr['name'])
if str(sr['comicyear']) == str(theissdate):