mirror of https://github.com/evilhero/mylar
Merge branch 'development'
This commit is contained in:
commit
3c3db4110b
|
@ -1,7 +1,7 @@
|
|||
<%inherit file="base.html"/>
|
||||
<%!
|
||||
import os
|
||||
from mylar import db
|
||||
import os, re
|
||||
from mylar import db, helpers
|
||||
import mylar
|
||||
%>
|
||||
|
||||
|
@ -109,13 +109,47 @@
|
|||
<label><big>Status: </big><norm>${comic['Status']}</norm></label>
|
||||
</div>
|
||||
<%
|
||||
if comic['Type'] == 'None' or comic['Type'] is None or comic['Type'] == 'Print':
|
||||
if any([comic['Type'] == 'None', comic['Type'] is None, comic['Type'] == 'Print']) and comic['Corrected_Type'] != 'TPB':
|
||||
comictype = 'Print'
|
||||
else:
|
||||
comictype = 'Digital'
|
||||
if comic['Corrected_Type'] is not None:
|
||||
comictype = comic['Corrected_Type']
|
||||
else:
|
||||
comictype = comic['Type']
|
||||
%>
|
||||
<div>
|
||||
<label><big>Edition: </big><norm>${comictype}</norm></label>
|
||||
<label><big>Edition: </big><norm>${comictype}</norm>
|
||||
<%
|
||||
if comicConfig['issue_list'] is not None:
|
||||
cnt = 0
|
||||
line = ''
|
||||
for x in comicConfig['issue_list']:
|
||||
if x['comicid'] is not None:
|
||||
cid = helpers.listLibrary(x['comicid'])
|
||||
try:
|
||||
if cid[re.sub('4050-', '', x['comicid']).strip()]['comicid']:
|
||||
watch = "<a href='comicDetails?ComicID=%s' title='In Watchlist'>%s</a>" % (cid[re.sub('4050-', '', x['comicid']).strip()]['comicid'], x['series'])
|
||||
except:
|
||||
watch = "%s" % (x['series'])
|
||||
else:
|
||||
watch = x['series']
|
||||
if cnt == 0:
|
||||
if x['issues'] is not None:
|
||||
line += "<span style='display:inline-block;'>%s %s</span>" % (watch, x['issues'])
|
||||
else:
|
||||
line += "<span style='display:inline-block;'>%s</span>" % watch
|
||||
else:
|
||||
if x['issues'] is not None:
|
||||
line += " / <span style='display:inline-block;'>%s %s</span>" % (watch, x['issues'])
|
||||
else:
|
||||
line += " / <span style='display:inline-block;'>%s</span>" % watch
|
||||
cnt+=1
|
||||
|
||||
%>
|
||||
%if comicConfig['issue_list'] is not None:
|
||||
( Collects: ${line} )
|
||||
%endif
|
||||
</label>
|
||||
</div>
|
||||
<div>
|
||||
<label><big>Last Updated: </big>
|
||||
|
@ -180,9 +214,20 @@
|
|||
<br/>
|
||||
<div class="row checkbox right clearfix">
|
||||
<label>Forcibly Mark as Continuing</label>
|
||||
<input type="checkbox" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="force_continuing" value="2" ${comicConfig['force_continuing']} />
|
||||
<input type="checkbox" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="force_continuing" value="1" ${comicConfig['force_continuing']} />
|
||||
<a href="#" title="Will forcibly mark this series as 'Continuing' regardless of actual status"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
</div>
|
||||
<div class="row checkbox right clearfix">
|
||||
%if comic['Type'] != 'TPB':
|
||||
<label>Forcibly Mark series as TPB/GN</label>
|
||||
<input type="checkbox" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="force_type" value="1" ${comicConfig['force_type']} />
|
||||
<a href="#" title="Will forcibly mark this series as TPB/GN in those instances where it assumes it's a normal issue-based series"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
%else:
|
||||
<label>Forcibly Mark series as Print</label>
|
||||
<input type="checkbox" style="vertical-align: bottom; margin: 3px; margin-top: -3px;" name="force_type" value="2" ${comicConfig['force_type']} />
|
||||
<a href="#" title="Will forcibly mark this series as an Issue-Based series in those instances where it assumes it's a TPB/GN series"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
%endif
|
||||
</div>
|
||||
%if any([comic['ComicYear'] == '2099',comic['ComicYear'] == '0000', comic['ComicYear'] == '', comic['Corrected_SeriesYear']]):
|
||||
<div class="row">
|
||||
<label>Series Year</label>
|
||||
|
@ -326,7 +371,15 @@
|
|||
endif
|
||||
%>
|
||||
<td id="issuename">${issuename}</td>
|
||||
<td class="edit" title="Publication Date (click to edit)" id="${issue['ComicID']}.${issue['IssueID']}">${issue['IssueDate']}</td>
|
||||
<%
|
||||
issdate = issue['IssueDate']
|
||||
dateline = "Publication Date (click to edit)"
|
||||
if all([issue['DigitalDate'] != '0000-00-00', issue['DigitalDate'] is not None]):
|
||||
dateline += "\nDigital Release: %s" % issue['DigitalDate']
|
||||
issdate += "**"
|
||||
endif
|
||||
%>
|
||||
<td class="edit" title="${dateline}" id="${issue['ComicID']}.${issue['IssueID']}">${issdate}</td>
|
||||
<td id="status">${issue['Status']}
|
||||
%if issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived':
|
||||
<%Csize = mylar.helpers.human_size(issue['ComicSize'])%>
|
||||
|
@ -336,7 +389,7 @@
|
|||
<td id="options">
|
||||
<a href="#" title="Manual Search" onclick="doAjaxCall('queueit?ComicID=${issue['ComicID']}&IssueID=${issue['IssueID']}&ComicIssue=${issue['Issue_Number']}&ComicYear=${issue['IssueDate']}&mode=want&manualsearch=True',$(this),'table')" data-success="Manually searching for ${comic['ComicName']} #${issue['Issue_Number']}"><img src="interfaces/default/images/search.png" height="25" width="25" /></a>
|
||||
<!--
|
||||
<a class="menu_link_edit" id="choose_specific_download" title="Choose Specific Download" href="javascript:void(0)" onclick="getAvailableDownloads('${issue['IssueID']}')"><i class="fa fa-search"></i><img src="interfaces/default/images/magnifier.png" height="25" width="25" class="highqual" /></a>
|
||||
<a class="menu_link_edit" id="choose_specific_download" title="Choose Specific Download" href="javascript:void(0)" onclick="getAvailableDownloads('${issue['IssueID']}')" data-success="Successfully started search for ${comic['ComicName']} ${issue['Issue_Number']}"><i class="fa fa-search"></i><img src="interfaces/default/images/magnifier.png" height="25" width="25" class="highqual" /></a>
|
||||
<div id="choose_specific_download_dialog" title="Choose a specific download for this issue" style="display:none" class="configtable">
|
||||
<table class="display" id="downloads_table">
|
||||
<thead>
|
||||
|
@ -480,7 +533,15 @@
|
|||
<td id="aint_issuenumber">${annual_Number}</td>
|
||||
<td id="aissuenumber">${annual['Issue_Number']}</td>
|
||||
<td id="aissuename">${annual['IssueName']}</td>
|
||||
<td id="areldate">${annual['IssueDate']}</td>
|
||||
<%
|
||||
anndate = annual['IssueDate']
|
||||
dateline = ""
|
||||
if all([annual['DigitalDate'] != '0000-00-00', annual['DigitalDate'] is not None]):
|
||||
dateline += "\nDigital Release: %s" % annual['DigitalDate']
|
||||
anndate += "**"
|
||||
endif
|
||||
%>
|
||||
<td title="${dateline}" id="areldate">${anndate}</td>
|
||||
<td id="astatus">${annual['Status']}
|
||||
%if annual['Status'] == 'Downloaded' or annual['Status'] == 'Archived':
|
||||
<%Csize = mylar.helpers.human_size(annual['ComicSize'])%>
|
||||
|
@ -662,7 +723,6 @@
|
|||
url: "IssueInfo",
|
||||
data: { filelocation: filelink, comicname: comicname, issue: issue, date: date, title: title },
|
||||
success: function(response) {
|
||||
var names = response
|
||||
$('#responsethis').html(response);
|
||||
},
|
||||
error: function(data)
|
||||
|
@ -760,13 +820,15 @@
|
|||
|
||||
function getAvailableDownloads(issueid) {
|
||||
ShowSpinner();
|
||||
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>Now searching...</div>");
|
||||
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
|
||||
$.getJSON("choose_specific_download", {issueid: issueid}, function(data) {
|
||||
loader.remove();
|
||||
feedback.fadeOut();
|
||||
search_results = data
|
||||
search_results = data;
|
||||
for( var i = 0, len = data.length; i < len; i++ ) {
|
||||
$('#downloads_table_body').append('<tr><td id="title"><a href="javascript:void(0)" onclick="downloadSpecificRelease('+i+')">'+data[i].nzbtitle+'</a></td><td id="provider">'+data[i].provider+'</td><td id="size">'+data[i].size+'</td><td id="kind">'+data[i].kind+'</td></tr>');
|
||||
}
|
||||
$('#downloads_table_body').append('<tr><td id="title"><a href="javascript:void(0)" onclick="downloadSpecificRelease('+i+');">'+data[i].nzbtitle+'</a></td><td id="provider">'+data[i].provider+'</td><td id="size">'+data[i].size+'</td><td id="kind">'+data[i].kind+'</td></tr>');
|
||||
}
|
||||
$('#downloads_table').dataTable({
|
||||
"aoColumns": [
|
||||
null,
|
||||
|
@ -781,6 +843,7 @@
|
|||
"bDestroy": true
|
||||
});
|
||||
$("#choose_specific_download_dialog").dialog({
|
||||
modal: true,
|
||||
width: "60%",
|
||||
maxHeight: 500
|
||||
});
|
||||
|
@ -789,17 +852,24 @@
|
|||
}
|
||||
|
||||
function downloadSpecificRelease(i){
|
||||
|
||||
name = search_results[i].nzbtitle
|
||||
prov = search_results[i].tmpprov
|
||||
nzbid = search_results[i].nzbid
|
||||
name = search_results[i].nzbtitle;
|
||||
prov = search_results[i].tmpprov;
|
||||
nzbid = search_results[i].nzbid;
|
||||
ShowSpinner();
|
||||
$.getJSON("download_specific_release", {nzbid: nzbid, provider: prov, name: name}, function(data) {
|
||||
loader.remove();
|
||||
feedback.fadeOut();
|
||||
refreshSubmenu();
|
||||
$("#choose_specific_download_dialog").dialog("close");
|
||||
});
|
||||
$.get("download_specific_release",
|
||||
{ nzbid: nzbid, provider: prov, name: name },
|
||||
function(data) {
|
||||
if (data.error != undefined) {
|
||||
alert(data.error);
|
||||
return;
|
||||
}
|
||||
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>Successfully downloaded "+name+"</div>");
|
||||
if ( data.indexOf("success") > -1){
|
||||
$("#choose_specific_download_dialog").dialog.close();
|
||||
}
|
||||
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
function ShowSpinner() {
|
||||
|
|
|
@ -763,8 +763,13 @@
|
|||
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Note: this is an experimental search - results may be better/worse.</small>
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
|
||||
<!--
|
||||
<fieldset>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input type="checkbox" id="enable_ddl" name="enable_ddl" value=1 ${config['enable_ddl']} /><legend>Enable DDL (GetComics)</legend>
|
||||
</div>
|
||||
</fieldset>
|
||||
-->
|
||||
<fieldset>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input id="enable_torrent_search" type="checkbox" onclick="initConfigCheckbox($(this));" name="enable_torrent_search" value=1 ${config['enable_torrent_search']} /><legend>Torrents</legned>
|
||||
|
@ -773,7 +778,7 @@
|
|||
<div class="row checkbox left clearfix">
|
||||
<input id="enable_public" title="Use Public Torrents" type="checkbox" name="enable_public" value=1 ${config['enable_public']} /><label>Enable Public Torrent Search</label>
|
||||
<div align="left">
|
||||
<small class="heading"><span style="float: left; margin-left: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Search: WWT / RSS: DEM & WWT</small>
|
||||
<small class="heading"><span style="float: left; margin-left: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Search: WWT / RSS: WWT</small>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row checkbox left clearfix">
|
||||
|
@ -1160,7 +1165,7 @@
|
|||
<label>Folder Format</label>
|
||||
<input type="text" name="folder_format" value="${config['folder_format']}" size="43">
|
||||
<%
|
||||
folder_options = "$Series = SeriesName\n$Year = SeriesYear\n$Annual = Annual (word)\n$VolumeY = V{SeriesYear}\n$VolumeN = V{Volume#}"
|
||||
folder_options = "$Series = SeriesName\n$Year = SeriesYear\n$Annual = Annual (word)\n$VolumeY = V{SeriesYear}\n$VolumeN = V{Volume#}\n$Type = BookType (TPB/GN)"
|
||||
%>
|
||||
<a href="#" title="${folder_options}"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
<small>Use: $Publisher, $Series, $Year<br />
|
||||
|
@ -1170,7 +1175,7 @@
|
|||
<label> File Format</label>
|
||||
<input type="text" name="file_format" value="${config['file_format']}" size="43">
|
||||
<%
|
||||
file_options = "$Series = SeriesName\n$Year = IssueYear\n$Annual = Annual (word)\n$Issue = IssueNumber\n$VolumeY = V{SeriesYear}\n$VolumeN = V{Volume#}\n$month = publication month number\n$monthname = publication month name"
|
||||
file_options = "$Series = SeriesName\n$Year = IssueYear\n$Annual = Annual (word)\n$Issue = IssueNumber\n$VolumeY = V{SeriesYear}\n$VolumeN = V{Volume#}\n$month = publication month number\n$monthname = publication month name\n$Type = BookType (TPB)"
|
||||
%>
|
||||
<a href="#" title="${file_options}"><img src="interfaces/default/images/info32.png" height="16" alt="" /></a>
|
||||
<small>Use: $Series, $Year, $Issue<br />
|
||||
|
|
|
@ -30,20 +30,25 @@
|
|||
<img src="interfaces/default/images/ultron.png" style="float:right" height="125" width="125" />
|
||||
<fieldset>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" name="autoadd" style="vertical-align: middle; margin: 3px; margin-top: -1px;" id="autoadd" value="1" ${checked(mylar.CONFIG.ADD_COMICS)}><label>Auto-add new series</label>
|
||||
<input type="checkbox" disabled name="autoadd" style="vertical-align: middle; margin: 3px; margin-top: -1px;" id="autoadd" value="1" ${checked(mylar.CONFIG.ADD_COMICS)}><label>Auto-add new series</label>
|
||||
</div>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_move" id="imp_move" value="1" ${checked(mylar.CONFIG.IMP_MOVE)}><label>Move files</label>
|
||||
<input type="checkbox" disabled style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_move" id="imp_move" value="1" ${checked(mylar.CONFIG.IMP_MOVE)}><label>Move files</label>
|
||||
</div>
|
||||
%if mylar.CONFIG.IMP_PATHS is True:
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" disabled style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_paths" id="imp_paths" value="1" ${checked(mylar.CONFIG.IMP_PATHS)}><label>Series directories will be set to current Imported series paths</label>
|
||||
</div>
|
||||
%endif
|
||||
%if mylar.CONFIG.RENAME_FILES:
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.CONFIG.IMP_RENAME)}><label>Rename Files </label>
|
||||
<input type="checkbox" disabled style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.CONFIG.IMP_RENAME)}><label>Rename Files </label>
|
||||
<small>(After importing, Rename the files to configuration settings)</small>
|
||||
<label>${mylar.CONFIG.FOLDER_FORMAT}/${mylar.CONFIG.FILE_FORMAT}</label>
|
||||
</div>
|
||||
%endif
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_metadata" id="imp_metadata" value="1" ${checked(mylar.CONFIG.IMP_METADATA)}><label>Use Existing Metadata</label>
|
||||
<input type="checkbox" disabled style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_metadata" id="imp_metadata" value="1" ${checked(mylar.CONFIG.IMP_METADATA)}><label>Use Existing Metadata</label>
|
||||
<small>(Use existing Metadata to better locate series for import)</small>
|
||||
</div>
|
||||
%if mylar.IMPORTLOCK:
|
||||
|
|
|
@ -69,10 +69,27 @@
|
|||
except:
|
||||
pass
|
||||
|
||||
comicline = comicname
|
||||
|
||||
comictype = comic['Type']
|
||||
try:
|
||||
if (any([comictype == 'None', comictype is None, comictype == 'Print']) and comic['Corrected_Type'] != 'TPB') or all([comic['Corrected_Type'] is not None, comic['Corrected_Type'] == 'Print']):
|
||||
comictype = None
|
||||
else:
|
||||
if comic['Corrected_Type'] is not None:
|
||||
comictype = comic['Corrected_Type']
|
||||
else:
|
||||
comictype = comictype
|
||||
except:
|
||||
comictype = None
|
||||
|
||||
if comictype is not None:
|
||||
comicline += ' [%s]' % (comictype)
|
||||
|
||||
%>
|
||||
<tr class="grade${grade}">
|
||||
<td id="publisher"><span title="${comicpub}"></span>${comicpub}</td>
|
||||
<td id="name"><span title="${comicname}"></span><a title="${comic['ComicName']}" href="comicDetails?ComicID=${comic['ComicID']}">${comicname}</a></td>
|
||||
<td id="name"><span title="${comicname}"></span><a title="${comic['ComicName']}" href="comicDetails?ComicID=${comic['ComicID']}">${comicline}</a></td>
|
||||
<td id="year"><span title="${comic['ComicYear']}"></span>${comic['ComicYear']}</td>
|
||||
<td id="issue"><span title="${comic['LatestIssue']}"></span># ${comic['LatestIssue']}</td>
|
||||
<td id="published">${comic['LatestDate']}</td>
|
||||
|
|
|
@ -62,6 +62,7 @@
|
|||
%endif
|
||||
</div>
|
||||
<form action="comicScan" method="GET" id="comicScan">
|
||||
<fieldset>
|
||||
<legend>Scan Comic Library</legend>
|
||||
<p><strong>Where are the comics you want scanned in located?</strong></p>
|
||||
<p>You can put in any directory, and it will scan for comics</br>
|
||||
|
@ -73,35 +74,41 @@
|
|||
</p>
|
||||
<br/>
|
||||
<div class="row">
|
||||
<label for="">Path to directory</label>
|
||||
%if mylar.CONFIG.COMIC_DIR:
|
||||
<input type="text" value="${mylar.CONFIG.COMIC_DIR}" name="path" size="70" />
|
||||
%else:
|
||||
<input type="text" value="Enter a Comic Directory to scan" onfocus="if
|
||||
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
|
||||
%endif
|
||||
<label for="">Path to directory</label>
|
||||
%if mylar.CONFIG.COMIC_DIR:
|
||||
<input type="text" value="${mylar.CONFIG.COMIC_DIR}" name="path" size="70" />
|
||||
%else:
|
||||
<input type="text" value="Enter a Comic Directory to scan" onfocus="if
|
||||
(this.value==this.defaultValue) this.value='';" name="path" size="70" />
|
||||
%endif
|
||||
</div>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" name="autoadd" id="autoadd" value="1" ${checked(mylar.CONFIG.ADD_COMICS)}><label>Auto-add new series</label>
|
||||
<input type="checkbox" name="autoadd" id="autoadd" value="1" ${checked(mylar.CONFIG.ADD_COMICS)} /><label>Auto-add new series</label>
|
||||
</div>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_metadata" id="imp_metadata" value="1" ${checked(mylar.CONFIG.IMP_METADATA)}><label>Use existing Metadata</label>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_metadata" id="imp_metadata" value="1" ${checked(mylar.CONFIG.IMP_METADATA)} /><label>Use existing Metadata</label>
|
||||
<small>Use existing Metadata to better locate series for import</small>
|
||||
</div>
|
||||
<div class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_move" onclick="initConfigCheckbox($this));" id="imp_move" value="1" ${checked(mylar.CONFIG.IMP_MOVE)}><label>Move files into corresponding Series directory</label>
|
||||
<small>Leaving this unchecked will not move anything, but will mark the issues as Archived</small>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_move" id="imps" value="1" ${checked(mylar.CONFIG.IMP_MOVE)} /><label>Move files</label>
|
||||
<small>Unchecked will not move anything, but will mark the issues as Archived</small>
|
||||
</br>
|
||||
</div>
|
||||
<div class="config">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.CONFIG.IMP_RENAME)}><label>Rename Files </label>
|
||||
<small>Rename files to configuration settings</small>
|
||||
<div id="move_options" class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.CONFIG.IMP_RENAME)} /><label>Rename Files</label>
|
||||
<small>Rename files to configuration settings</small>
|
||||
</div>
|
||||
<div id="path_options" class="row checkbox">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_paths" id="imp_paths" value="1" ${checked(mylar.CONFIG.IMP_PATHS)} /><label>Set import paths to series location paths</label>
|
||||
<small>Use the folder paths during import as the series location path</small>
|
||||
</div>
|
||||
<br/>
|
||||
<input type="button" value="Save Changes and Scan" onclick="addScanAction();doAjaxCall('comicScan',$(this),'tabs',true);return true;" data-success="Import Scan now submitted." data-error="Unable to start the scan. Check the logs.">
|
||||
<input type="button" value="Save Changes without Scanning Library" onclick="doAjaxCall('comicScan',$(this),'tabs',true);return false;" data-success="Changes Saved Successfully" data-error="Unable to save settings. Check the logs.">
|
||||
<input type="button" value="Save Changes and Scan" onclick="addScanAction();doAjaxCall('comicScan',$(this),'tabs',true);return true;" data-success="Import Scan now submitted." data-error="Unable to start the scan. Check the logs." />
|
||||
<input type="button" value="Save Changes without Scanning Library" onclick="doAjaxCall('comicScan',$(this),'tabs',true);return false;" data-success="Changes Saved Successfully" data-error="Unable to save settings. Check the logs." />
|
||||
%if mylar.IMPORTBUTTON:
|
||||
<input type="button" value="Import Results Management" style="float: right;" onclick="location.href='importResults';" />
|
||||
%endif
|
||||
</fieldset>
|
||||
</form>
|
||||
</div>
|
||||
<div id="tabs-2" class="configtable">
|
||||
|
@ -278,9 +285,31 @@
|
|||
}
|
||||
};
|
||||
function initThisPage() {
|
||||
if ($("#imps").is(":checked"))
|
||||
{
|
||||
$("#move_options").show();
|
||||
$("#path_options").hide();
|
||||
}
|
||||
else
|
||||
{
|
||||
$("#path_options").show();
|
||||
$("#move_options").hide();
|
||||
}
|
||||
|
||||
$("#imps").click(function(){
|
||||
if ($("#imps").is(":checked"))
|
||||
{
|
||||
$("#move_options").slideDown();
|
||||
$("#path_options").slideUp();
|
||||
}
|
||||
else
|
||||
{
|
||||
$("#path_options").slideDown();
|
||||
$("#move_options").slideUp();
|
||||
}
|
||||
});
|
||||
jQuery( "#tabs" ).tabs();
|
||||
initActions();
|
||||
initConfigCheckbox("#imp_move");
|
||||
startTime();
|
||||
};
|
||||
$(document).ready(function() {
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
<%inherit file="base.html"/>
|
||||
<%
|
||||
import mylar
|
||||
%>
|
||||
|
||||
<%def name="headIncludes()">
|
||||
</%def>
|
||||
|
||||
<%def name="body()">
|
||||
<div id="paddingheader">
|
||||
<h1 class="clearfix">${title}</h1></br>
|
||||
</div>
|
||||
<div style="position:relative; width:960px; height:0px; margin:10px auto;">
|
||||
<%
|
||||
format_len = len(file_format)
|
||||
%>
|
||||
<form action="previewRename" type="GET">
|
||||
<div style="position:absolute; top:-20px; right:0px;">
|
||||
<label>File Format (Applied):</label>
|
||||
<input type="text" name="file_format" value="${file_format}" size="${format_len}"><input type="image" src="interfaces/default/images/submit.png" height="25" width="25" class="highqual" />
|
||||
<input type="hidden" name="comicid" value="${comicid}" />
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div>
|
||||
<table cellpadding="5" cellspacing="5">
|
||||
<thead>
|
||||
<tr>
|
||||
<th id="issueid">IssueID</th>
|
||||
<th id="originalname" width="500" style="text-align:center">Original Name</th>
|
||||
<th id="renamed" width="500" style="text-align:center">Renamed</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
%for ti in resultlist:
|
||||
<tr>
|
||||
<td id="issueid">${ti['issueid']}</td>
|
||||
<td id="originalname" width="500" style="text-align:center">${ti['original']}</td>
|
||||
<td id="renamed" width="500" style="text-align:center">${ti['new']}</td>
|
||||
</tr>
|
||||
%endfor
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</%def>
|
|
@ -14,7 +14,7 @@
|
|||
<a id="menu_link_delete" href="#">Remove Read</a>
|
||||
<a id="menu_link_delete" href="#">Clear File Cache</a>
|
||||
<a id="menu_link_refresh" onclick="doAjaxCall('SearchArcIssues?StoryArcID=${storyarcid}',$(this),'table')" data-success="Now searching for Missing StoryArc Issues">Search for Missing</a>
|
||||
<a id="menu_link_refresh" onclick="doAjaxCall('ArcWatchlist?StoryArcID=${storyarcid}',$(this),'table')" data-success="Now searching for matches on Watchlist">Search for Watchlist matches</a>
|
||||
<a id="menu_link_refresh" onclick="doAjaxCall('ArcWatchlist?StoryArcID=${storyarcid}',$(this),'table')" data-success="Now searching for matches on Watchlist & Rechecking files">Search for Watchlist matches/Recheck Files</a>
|
||||
%if cvarcid:
|
||||
<a id="menu_link_refresh" onclick="doAjaxCall('addStoryArc_thread?arcid=${storyarcid}&cvarcid=${cvarcid}&storyarcname=${storyarcname}&arcrefresh=True',$(this),'table')" data-success="Refreshed Story Arc">Refresh Story Arc</a>
|
||||
%endif
|
||||
|
@ -110,6 +110,9 @@
|
|||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button type="button" onclick="">Finalize & Rename</button>
|
||||
|
||||
<table class="display" id="arc_detail">
|
||||
<thead>
|
||||
<tr>
|
||||
|
|
|
@ -34,10 +34,8 @@
|
|||
<tr/><tr>
|
||||
<td id="mainimg">
|
||||
<fieldset>
|
||||
<div id="artistImg">
|
||||
<div id="aristImg">
|
||||
<img src="${storyarcbanner}" onload="resizeimage('${bannerwidth}')" height="400" width="${bannerwidth}" id="banner" style="text-decoration: none;position: relative;top:0px;right:0px;left:0px;"/>
|
||||
</div>
|
||||
</fieldset>
|
||||
<%
|
||||
if arcdetail['percent'] == 101:
|
||||
css = '<div class=\"progress-container warning\">'
|
||||
|
@ -48,9 +46,11 @@
|
|||
|
||||
%>
|
||||
<div style="display:table;margin:auto;position:relative;top:0px;"><span title="${arcdetail['percent']}"></span>${css}<div style="width:${arcdetail['percent']}%"><span class="progressbar-front-text">${arcdetail['Have']}/${arcdetail['Total']}</span></div></div></div>
|
||||
</div>
|
||||
</fieldset>
|
||||
</td>
|
||||
<td width="100%" padding="5px" style="vertical-align:bottom;">
|
||||
<div style="display:inline;position:relative;text-color:black;top:-200px;margin:0 auto;">
|
||||
<div style="display:inline;position:relative;text-color:black;top:-150px;margin:0 auto;">
|
||||
<center><h1>${storyarcname}</h1>(${spanyears})</center>
|
||||
</div>
|
||||
|
||||
|
@ -81,7 +81,7 @@
|
|||
</form>
|
||||
</div>
|
||||
<%
|
||||
optpos = '<div style=\"display:block;float:right;position:relative;right:20px;top:50px;\">'
|
||||
optpos = '<div style=\"display:block;float:right;position:relative;right:20px;top:-10px;\">'
|
||||
%>
|
||||
${optpos}
|
||||
<form action="downloadBanner" method="GET">
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
<table width="100%" align="center">
|
||||
<tr>
|
||||
<td style="vertical-align: middle; text-align: right">
|
||||
<a href="pullist?week=${weekinfo['prev_weeknumber']}&year=${weekinfo['prev_year']}" title="Previous Week (${weekinfo['prev_weeknumber']})" onclick="$('#pull_table').page('first').draw('page');">
|
||||
<a href="pullist?week=${weekinfo['prev_weeknumber']}&year=${weekinfo['prev_year']}¤t=${weekinfo['weeknumber']}-${weekinfo['year']}" title="Previous Week (${weekinfo['prev_weeknumber']})" onclick="$('#pull_table').page('first').draw('page');">
|
||||
<img src="interfaces/default/images/prev.gif" width="16" height="18" Alt="Previous"/>
|
||||
</a>
|
||||
</td>
|
||||
|
@ -41,7 +41,7 @@
|
|||
%endif
|
||||
</td>
|
||||
<td style="vertical-align: middle; text-align: left">
|
||||
<a href="pullist?week=${weekinfo['next_weeknumber']}&year=${weekinfo['next_year']}" title="Next Week (${weekinfo['next_weeknumber']})" onclick="$('#pull_table').page('first').draw('page');">
|
||||
<a href="pullist?week=${weekinfo['next_weeknumber']}&year=${weekinfo['next_year']}¤t=${weekinfo['weeknumber']}-${weekinfo['year']}" title="Next Week (${weekinfo['next_weeknumber']})" onclick="$('#pull_table').page('first').draw('page');">
|
||||
<img src="interfaces/default/images/next.gif" width="16" height="18" Alt="Next"/>
|
||||
</a>
|
||||
</td>
|
||||
|
@ -129,6 +129,10 @@
|
|||
%if weekly['SERIESYEAR'] is not None:
|
||||
 (${weekly['SERIESYEAR']})
|
||||
%endif
|
||||
|
||||
%if weekly['FORMAT'] == 'Digital':
|
||||
 [${weekly['FORMAT']}]
|
||||
%endif
|
||||
</td>
|
||||
<td class="comicnumber">${weekly['ISSUE']}</td>
|
||||
%if weekly['AUTOWANT']:
|
||||
|
|
|
@ -26,7 +26,16 @@ import time
|
|||
import StringIO
|
||||
|
||||
from natsort import natsorted
|
||||
from rarfile import rarfile
|
||||
|
||||
try:
|
||||
site_root = os.path.dirname(os.path.realpath(__file__))
|
||||
parent_root = os.path.abspath(os.path.join(site_root, os.pardir))
|
||||
lib_path = os.path.abspath(os.path.join(parent_root, os.pardir))
|
||||
if lib_path not in sys.path:
|
||||
sys.path.append(lib_path)
|
||||
from rarfile import rarfile
|
||||
except:
|
||||
from lib.rarfile import rarfile
|
||||
|
||||
if platform.system() == "Windows":
|
||||
import _subprocess
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# This file should contain only these comments, and the line below.
|
||||
# Used by packaging makefiles and app
|
||||
version = "1.25.1"
|
||||
version = "1.25.2"
|
||||
fork="ninjas.walk.alone"
|
||||
fork_tag="SHURIKEN"
|
||||
|
|
|
@ -17,6 +17,7 @@ from __future__ import with_statement
|
|||
|
||||
import os
|
||||
import shutil
|
||||
import datetime
|
||||
import re
|
||||
import shlex
|
||||
import time
|
||||
|
@ -392,17 +393,24 @@ class PostProcessor(object):
|
|||
if '_' in self.issueid:
|
||||
logger.fdebug('Story Arc post-processing request detected.')
|
||||
self.issuearcid = self.issueid
|
||||
logger.fdebug('%s Now post-processing directly against ComicID: %s / IssueID: %s' % (module, self.comicid, self.issueid))
|
||||
if self.nzb_name.lower().endswith(self.extensions):
|
||||
flc = filechecker.FileChecker(self.nzb_folder, file=self.nzb_name, pp_mode=True)
|
||||
fl = flc.listFiles()
|
||||
filelist = {}
|
||||
filelist['comiclist'] = [fl]
|
||||
filelist['comiccount'] = len(filelist['comiclist'])
|
||||
self.issueid = None
|
||||
logger.fdebug('%s Now post-processing directly against StoryArcs - ComicID: %s / IssueArcID: %s' % (module, self.comicid, self.issuearcid))
|
||||
if self.issueid is not None:
|
||||
logger.fdebug('%s Now post-processing directly against ComicID: %s / IssueID: %s' % (module, self.comicid, self.issueid))
|
||||
if self.issuearcid is None:
|
||||
if self.nzb_name.lower().endswith(self.extensions):
|
||||
flc = filechecker.FileChecker(self.nzb_folder, file=self.nzb_name, pp_mode=True)
|
||||
fl = flc.listFiles()
|
||||
filelist = {}
|
||||
filelist['comiclist'] = [fl]
|
||||
filelist['comiccount'] = len(filelist['comiclist'])
|
||||
else:
|
||||
flc = filechecker.FileChecker(self.nzb_folder, justparse=True, pp_mode=True)
|
||||
filelist = flc.listFiles()
|
||||
else:
|
||||
flc = filechecker.FileChecker(self.nzb_folder, justparse=True, pp_mode=True)
|
||||
filelist = flc.listFiles()
|
||||
|
||||
filelist = {}
|
||||
filelist['comiclist'] = []
|
||||
filelist['comiccount'] = 0
|
||||
#preload the entire ALT list in here.
|
||||
alt_list = []
|
||||
alt_db = myDB.select("SELECT * FROM Comics WHERE AlternateSearch != 'None'")
|
||||
|
@ -421,10 +429,11 @@ class PostProcessor(object):
|
|||
self.matched = False
|
||||
as_d = filechecker.FileChecker()
|
||||
as_dinfo = as_d.dynamic_replace(helpers.conversion(fl['series_name']))
|
||||
orig_seriesname = as_dinfo['mod_seriesname']
|
||||
mod_seriesname = as_dinfo['mod_seriesname']
|
||||
loopchk = []
|
||||
if fl['alt_series'] is not None:
|
||||
logger.info('%s Alternate series naming detected: %s' % (module, fl['alt_series']))
|
||||
logger.fdebug('%s Alternate series naming detected: %s' % (module, fl['alt_series']))
|
||||
as_sinfo = as_d.dynamic_replace(helpers.conversion(fl['alt_series']))
|
||||
mod_altseriesname = as_sinfo['mod_seriesname']
|
||||
if all([mylar.CONFIG.ANNUALS_ON, 'annual' in mod_altseriesname.lower()]) or all([mylar.CONFIG.ANNUALS_ON, 'special' in mod_altseriesname.lower()]):
|
||||
|
@ -456,31 +465,58 @@ class PostProcessor(object):
|
|||
tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(seq=','.join('?' * len(loopchk)))
|
||||
comicseries = myDB.select(tmpsql, tuple(loopchk))
|
||||
|
||||
if comicseries is None:
|
||||
logger.error(module + ' No Series in Watchlist - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?')
|
||||
break
|
||||
else:
|
||||
watchvals = []
|
||||
for wv in comicseries:
|
||||
#do some extra checks in here to ignore these types:
|
||||
#check for Paused status /
|
||||
#check for Ended status and 100% completion of issues.
|
||||
if wv['Status'] == 'Paused' or (wv['Have'] == wv['Total'] and not any(['Present' in wv['ComicPublished'], helpers.now()[:4] in wv['ComicPublished']])):
|
||||
logger.warn(wv['ComicName'] + ' [' + wv['ComicYear'] + '] is either Paused or in an Ended status with 100% completion. Ignoring for match.')
|
||||
continue
|
||||
wv_comicname = wv['ComicName']
|
||||
wv_comicpublisher = wv['ComicPublisher']
|
||||
wv_alternatesearch = wv['AlternateSearch']
|
||||
wv_comicid = wv['ComicID']
|
||||
if not comicseries or orig_seriesname != mod_seriesname:
|
||||
if all(['special' in orig_seriesname.lower(), mylar.CONFIG.ANNUALS_ON, orig_seriesname != mod_seriesname]):
|
||||
if not any(re.sub('[\|\s]', '', orig_seriesname).lower() == x for x in loopchk):
|
||||
loopchk.append(re.sub('[\|\s]', '', orig_seriesname.lower()))
|
||||
tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(seq=','.join('?' * len(loopchk)))
|
||||
comicseries = myDB.select(tmpsql, tuple(loopchk))
|
||||
if not comicseries:
|
||||
logger.error(module + ' No Series in Watchlist - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?')
|
||||
break
|
||||
|
||||
wv_seriesyear = wv['ComicYear']
|
||||
wv_comicversion = wv['ComicVersion']
|
||||
wv_publisher = wv['ComicPublisher']
|
||||
wv_total = wv['Total']
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('Queuing to Check: ' + wv['ComicName'] + ' [' + str(wv['ComicYear']) + '] -- ' + str(wv['ComicID']))
|
||||
watchvals = []
|
||||
for wv in comicseries:
|
||||
logger.info('Now checking: %s [%s]' % (wv['ComicName'], wv['ComicID']))
|
||||
#do some extra checks in here to ignore these types:
|
||||
#check for Paused status /
|
||||
#check for Ended status and 100% completion of issues.
|
||||
if wv['Status'] == 'Paused' or (wv['Have'] == wv['Total'] and not any(['Present' in wv['ComicPublished'], helpers.now()[:4] in wv['ComicPublished']])):
|
||||
logger.warn(wv['ComicName'] + ' [' + wv['ComicYear'] + '] is either Paused or in an Ended status with 100% completion. Ignoring for match.')
|
||||
continue
|
||||
wv_comicname = wv['ComicName']
|
||||
wv_comicpublisher = wv['ComicPublisher']
|
||||
wv_alternatesearch = wv['AlternateSearch']
|
||||
wv_comicid = wv['ComicID']
|
||||
if (all([wv['Type'] != 'Print', wv['Type'] != 'Digital']) and wv['Corrected_Type'] != 'Print') or wv['Corrected_Type'] == 'TPB':
|
||||
wv_type = 'TPB'
|
||||
else:
|
||||
wv_type = None
|
||||
wv_seriesyear = wv['ComicYear']
|
||||
wv_comicversion = wv['ComicVersion']
|
||||
wv_publisher = wv['ComicPublisher']
|
||||
wv_total = wv['Total']
|
||||
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('Queuing to Check: ' + wv['ComicName'] + ' [' + str(wv['ComicYear']) + '] -- ' + str(wv['ComicID']))
|
||||
|
||||
#force it to use the Publication Date of the latest issue instead of the Latest Date (which could be anything)
|
||||
#force it to use the Publication Date of the latest issue instead of the Latest Date (which could be anything)
|
||||
latestdate = myDB.select('SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC', [wv['ComicID']])
|
||||
if latestdate:
|
||||
tmplatestdate = latestdate[0][0]
|
||||
if tmplatestdate[:4] != wv['LatestDate'][:4]:
|
||||
if tmplatestdate[:4] > wv['LatestDate'][:4]:
|
||||
latestdate = tmplatestdate
|
||||
else:
|
||||
latestdate = wv['LatestDate']
|
||||
else:
|
||||
latestdate = tmplatestdate
|
||||
else:
|
||||
latestdate = wv['LatestDate']
|
||||
|
||||
if latestdate == '0000-00-00' or latestdate == 'None' or latestdate is None:
|
||||
logger.fdebug('Forcing a refresh of series: ' + wv_comicname + ' as it appears to have incomplete issue dates.')
|
||||
updater.dbUpdate([wv_comicid])
|
||||
logger.fdebug('Refresh complete for ' + wv_comicname + '. Rechecking issue dates for completion.')
|
||||
latestdate = myDB.select('SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC', [wv['ComicID']])
|
||||
if latestdate:
|
||||
tmplatestdate = latestdate[0][0]
|
||||
|
@ -494,41 +530,26 @@ class PostProcessor(object):
|
|||
else:
|
||||
latestdate = wv['LatestDate']
|
||||
|
||||
logger.fdebug('Latest Date (after forced refresh) set to :' + str(latestdate))
|
||||
|
||||
if latestdate == '0000-00-00' or latestdate == 'None' or latestdate is None:
|
||||
logger.fdebug('Forcing a refresh of series: ' + wv_comicname + ' as it appears to have incomplete issue dates.')
|
||||
updater.dbUpdate([wv_comicid])
|
||||
logger.fdebug('Refresh complete for ' + wv_comicname + '. Rechecking issue dates for completion.')
|
||||
latestdate = myDB.select('SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC', [wv['ComicID']])
|
||||
if latestdate:
|
||||
tmplatestdate = latestdate[0][0]
|
||||
if tmplatestdate[:4] != wv['LatestDate'][:4]:
|
||||
if tmplatestdate[:4] > wv['LatestDate'][:4]:
|
||||
latestdate = tmplatestdate
|
||||
else:
|
||||
latestdate = wv['LatestDate']
|
||||
else:
|
||||
latestdate = tmplatestdate
|
||||
else:
|
||||
latestdate = wv['LatestDate']
|
||||
logger.fdebug('Unable to properly attain the Latest Date for series: ' + wv_comicname + '. Cannot check against this series for post-processing.')
|
||||
continue
|
||||
|
||||
logger.fdebug('Latest Date (after forced refresh) set to :' + str(latestdate))
|
||||
|
||||
if latestdate == '0000-00-00' or latestdate == 'None' or latestdate is None:
|
||||
logger.fdebug('Unable to properly attain the Latest Date for series: ' + wv_comicname + '. Cannot check against this series for post-processing.')
|
||||
continue
|
||||
|
||||
watchvals.append({"ComicName": wv_comicname,
|
||||
"ComicPublisher": wv_comicpublisher,
|
||||
"AlternateSearch": wv_alternatesearch,
|
||||
"ComicID": wv_comicid,
|
||||
"WatchValues": {"SeriesYear": wv_seriesyear,
|
||||
"LatestDate": latestdate,
|
||||
"ComicVersion": wv_comicversion,
|
||||
"Publisher": wv_publisher,
|
||||
"Total": wv_total,
|
||||
"ComicID": wv_comicid,
|
||||
"IsArc": False}
|
||||
})
|
||||
watchvals.append({"ComicName": wv_comicname,
|
||||
"ComicPublisher": wv_comicpublisher,
|
||||
"AlternateSearch": wv_alternatesearch,
|
||||
"ComicID": wv_comicid,
|
||||
"LastUpdated": wv['LastUpdated'],
|
||||
"WatchValues": {"SeriesYear": wv_seriesyear,
|
||||
"LatestDate": latestdate,
|
||||
"ComicVersion": wv_comicversion,
|
||||
"Type": wv_type,
|
||||
"Publisher": wv_publisher,
|
||||
"Total": wv_total,
|
||||
"ComicID": wv_comicid,
|
||||
"IsArc": False}
|
||||
})
|
||||
|
||||
ccnt=0
|
||||
nm=0
|
||||
|
@ -539,12 +560,28 @@ class PostProcessor(object):
|
|||
nm+=1
|
||||
continue
|
||||
else:
|
||||
temploc= watchmatch['justthedigits'].replace('_', ' ')
|
||||
temploc = re.sub('[\#\']', '', temploc)
|
||||
logger.info('temploc: %s' % temploc)
|
||||
try:
|
||||
if cs['WatchValues']['Type'] == 'TPB' and cs['WatchValues']['Total'] > 1:
|
||||
if watchmatch['series_volume'] is not None:
|
||||
just_the_digits = re.sub('[^0-9]', '', watchmatch['series_volume']).strip()
|
||||
else:
|
||||
just_the_digits = re.sub('[^0-9]', '', watchmatch['justthedigits']).strip()
|
||||
else:
|
||||
just_the_digits = watchmatch['justthedigits']
|
||||
except Exception as e:
|
||||
logger.warn('[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s] [WATCHMATCH: %s]' % (e, cs, watchmatch))
|
||||
nm+=1
|
||||
continue
|
||||
|
||||
if just_the_digits is not None:
|
||||
temploc= just_the_digits.replace('_', ' ')
|
||||
temploc = re.sub('[\#\']', '', temploc)
|
||||
logger.fdebug('temploc: %s' % temploc)
|
||||
else:
|
||||
temploc = None
|
||||
datematch = "False"
|
||||
|
||||
if any(['annual' in temploc.lower(), 'special' in temploc.lower()]) and mylar.CONFIG.ANNUALS_ON is True:
|
||||
if temploc is not None and (any(['annual' in temploc.lower(), 'special' in temploc.lower()]) and mylar.CONFIG.ANNUALS_ON is True):
|
||||
biannchk = re.sub('-', '', temploc.lower()).strip()
|
||||
if 'biannual' in biannchk:
|
||||
logger.fdebug(module + ' Bi-Annual detected.')
|
||||
|
@ -559,166 +596,216 @@ class PostProcessor(object):
|
|||
issuechk = myDB.select("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
|
||||
else:
|
||||
annchk = "no"
|
||||
fcdigit = helpers.issuedigits(temploc)
|
||||
issuechk = myDB.select("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
|
||||
if temploc is not None:
|
||||
fcdigit = helpers.issuedigits(temploc)
|
||||
issuechk = myDB.select("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
|
||||
else:
|
||||
issuechk = myDB.select("SELECT * from issues WHERE ComicID=?", [cs['ComicID']])
|
||||
|
||||
if issuechk is None:
|
||||
logger.fdebug(module + ' No corresponding issue # found for ' + str(cs['ComicID']))
|
||||
continue
|
||||
else:
|
||||
for isc in issuechk:
|
||||
datematch = "True"
|
||||
if isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00':
|
||||
if not issuechk:
|
||||
logger.fdebug('%s No corresponding issue #%s found for %s' % (module, temploc, cs['ComicID']))
|
||||
|
||||
#check the last refresh date of the series, and if > than an hr try again:
|
||||
c_date = cs['LastUpdated']
|
||||
if c_date is None:
|
||||
logger.error('%s %s failed during a previous add /refresh as it has no Last Update timestamp. Forcing refresh now.' % (module, cs['ComicName']))
|
||||
else:
|
||||
c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
|
||||
n_date = datetime.datetime.now()
|
||||
absdiff = abs(n_date - c_obj_date)
|
||||
hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
|
||||
if hours < 1:
|
||||
logger.fdebug('%s %s [%s] Was refreshed less than 1 hours ago. Skipping Refresh at this time so we don\'t hammer things unnecessarily.' % (module, cs['ComicName'], cs['ComicID']))
|
||||
continue
|
||||
updater.dbUpdate([cs['ComicID']])
|
||||
logger.fdebug('%s Succssfully refreshed series - now re-querying against new data for issue #%s.' % (module, temploc))
|
||||
if annchk == 'yes':
|
||||
issuechk = myDB.select("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
|
||||
else:
|
||||
issuechk = myDB.select("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
|
||||
if not issuechk:
|
||||
logger.fdebug('%s No corresponding issue #%s found for %s even after refreshing. It might not have the information available as of yet...' % (module, temploc, cs['ComicID']))
|
||||
continue
|
||||
|
||||
for isc in issuechk:
|
||||
datematch = "True"
|
||||
if isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00':
|
||||
try:
|
||||
if isc['DigitalDate'] != '0000-00-00' and int(re.sub('-', '', isc['DigitalDate']).strip()) <= int(re.sub('-', '', isc['ReleaseDate']).strip()):
|
||||
monthval = isc['DigitalDate']
|
||||
watch_issueyear = isc['DigitalDate'][:4]
|
||||
else:
|
||||
monthval = isc['ReleaseDate']
|
||||
watch_issueyear = isc['ReleaseDate'][:4]
|
||||
except:
|
||||
monthval = isc['ReleaseDate']
|
||||
watch_issueyear = isc['ReleaseDate'][:4]
|
||||
else:
|
||||
|
||||
else:
|
||||
try:
|
||||
if isc['DigitalDate'] != '0000-00-00' and int(re.sub('-', '', isc['DigitalDate']).strip()) <= int(re.sub('-', '', isc['ReleaseDate']).strip()):
|
||||
monthval = isc['DigitalDate']
|
||||
watch_issueyear = isc['DigitalDate'][:4]
|
||||
else:
|
||||
monthval = isc['IssueDate']
|
||||
watch_issueyear = isc['IssueDate'][:4]
|
||||
except:
|
||||
monthval = isc['IssueDate']
|
||||
watch_issueyear = isc['IssueDate'][:4]
|
||||
|
||||
if len(watchmatch) >= 1 and watchmatch['issue_year'] is not None:
|
||||
#if the # of matches is more than 1, we need to make sure we get the right series
|
||||
#compare the ReleaseDate for the issue, to the found issue date in the filename.
|
||||
#if ReleaseDate doesn't exist, use IssueDate
|
||||
#if no issue date was found, then ignore.
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] Now checking against ' + cs['ComicName'] + '-' + cs['ComicID'])
|
||||
issyr = None
|
||||
#logger.fdebug(module + ' issuedate:' + str(isc['IssueDate']))
|
||||
#logger.fdebug(module + ' isc: ' + str(isc['IssueDate'][5:7]))
|
||||
if len(watchmatch) >= 1 and watchmatch['issue_year'] is not None:
|
||||
#if the # of matches is more than 1, we need to make sure we get the right series
|
||||
#compare the ReleaseDate for the issue, to the found issue date in the filename.
|
||||
#if ReleaseDate doesn't exist, use IssueDate
|
||||
#if no issue date was found, then ignore.
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] Now checking against ' + cs['ComicName'] + '-' + cs['ComicID'])
|
||||
issyr = None
|
||||
#logger.fdebug(module + ' issuedate:' + str(isc['IssueDate']))
|
||||
#logger.fdebug(module + ' isc: ' + str(isc['IssueDate'][5:7]))
|
||||
|
||||
#logger.info(module + ' ReleaseDate: ' + str(isc['ReleaseDate']))
|
||||
#logger.info(module + ' IssueDate: ' + str(isc['IssueDate']))
|
||||
if isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00':
|
||||
if int(isc['ReleaseDate'][:4]) < int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['ReleaseDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename')
|
||||
datematch = "False"
|
||||
else:
|
||||
if int(isc['IssueDate'][:4]) < int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['IssueDate']) + ' is before the issue year ' + str(watchmatch['issue_year']) + ' that was discovered in the filename')
|
||||
datematch = "False"
|
||||
|
||||
if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12:
|
||||
issyr = int(monthval[:4]) + 1
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] IssueYear (issyr) is ' + str(issyr))
|
||||
elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3:
|
||||
issyr = int(monthval[:4]) - 1
|
||||
|
||||
if datematch == "False" and issyr is not None:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(issyr) + ' comparing to ' + str(watchmatch['issue_year']) + ' : rechecking by month-check versus year.')
|
||||
datematch = "True"
|
||||
if int(issyr) != int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][.:FAIL:.] Issue is before the modified issue year of ' + str(issyr))
|
||||
datematch = "False"
|
||||
#logger.info(module + ' ReleaseDate: ' + str(isc['ReleaseDate']))
|
||||
#logger.info(module + ' IssueDate: ' + str(isc['IssueDate']))
|
||||
if isc['DigitalDate'] is not None and isc['DigitalDate'] != '0000-00-00':
|
||||
if int(isc['DigitalDate'][:4]) < int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['DigitalDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename')
|
||||
datematch = "False"
|
||||
|
||||
elif isc['ReleaseDate'] is not None and isc['ReleaseDate'] != '0000-00-00':
|
||||
if int(isc['ReleaseDate'][:4]) < int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['ReleaseDate']) + ' is before the issue year of ' + str(watchmatch['issue_year']) + ' that was discovered in the filename')
|
||||
datematch = "False"
|
||||
else:
|
||||
logger.info(module + '[ISSUE-VERIFY] Found matching issue # ' + str(fcdigit) + ' for ComicID: ' + str(cs['ComicID']) + ' / IssueID: ' + str(isc['IssueID']))
|
||||
if int(isc['IssueDate'][:4]) < int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(isc['IssueDate']) + ' is before the issue year ' + str(watchmatch['issue_year']) + ' that was discovered in the filename')
|
||||
datematch = "False"
|
||||
|
||||
if datematch == "True":
|
||||
# if we get to here, we need to do some more comparisons just to make sure we have the right volume
|
||||
# first we chk volume label if it exists, then we drop down to issue year
|
||||
# if the above both don't exist, and there's more than one series on the watchlist (or the series is > v1)
|
||||
# then spit out the error message and don't post-process it.
|
||||
watch_values = cs['WatchValues']
|
||||
#logger.fdebug('WATCH_VALUES:' + str(watch_values))
|
||||
if any([watch_values['ComicVersion'] is None, watch_values['ComicVersion'] == 'None']):
|
||||
tmp_watchlist_vol = '1'
|
||||
else:
|
||||
tmp_watchlist_vol = re.sub("[^0-9]", "", watch_values['ComicVersion']).strip()
|
||||
if all([watchmatch['series_volume'] != 'None', watchmatch['series_volume'] is not None]):
|
||||
tmp_watchmatch_vol = re.sub("[^0-9]","", watchmatch['series_volume']).strip()
|
||||
if len(tmp_watchmatch_vol) == 4:
|
||||
if int(tmp_watchmatch_vol) == int(watch_values['SeriesYear']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of ' + str(watch_values['SeriesYear']) + ' matched to volume/year label of ' + str(tmp_watchmatch_vol))
|
||||
else:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of ' + str(watch_values['SeriesYear']) + ' DID NOT match to volume/year label of ' + tmp_watchmatch_vol)
|
||||
datematch = "False"
|
||||
if len(watchvals) > 1 and int(tmp_watchmatch_vol) > 1:
|
||||
if int(tmp_watchmatch_vol) == int(tmp_watchlist_vol):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of ' + str(watch_values['ComicVersion']) + ' matched to volume label of ' + str(watchmatch['series_volume']))
|
||||
else:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of ' + str(watch_values['ComicVersion']) + ' DID NOT match to volume label of ' + str(watchmatch['series_volume']))
|
||||
continue
|
||||
#datematch = "False"
|
||||
else:
|
||||
if any([tmp_watchlist_vol is None, tmp_watchlist_vol == 'None', tmp_watchlist_vol == '']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.')
|
||||
if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12:
|
||||
issyr = int(monthval[:4]) + 1
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] IssueYear (issyr) is ' + str(issyr))
|
||||
elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2 or int(monthval[5:7]) == 3:
|
||||
issyr = int(monthval[:4]) - 1
|
||||
|
||||
if datematch == "False" and issyr is not None:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] ' + str(issyr) + ' comparing to ' + str(watchmatch['issue_year']) + ' : rechecking by month-check versus year.')
|
||||
datematch = "True"
|
||||
if int(issyr) != int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][.:FAIL:.] Issue is before the modified issue year of ' + str(issyr))
|
||||
datematch = "False"
|
||||
|
||||
else:
|
||||
logger.info(module + '[ISSUE-VERIFY] Found matching issue # ' + str(fcdigit) + ' for ComicID: ' + str(cs['ComicID']) + ' / IssueID: ' + str(isc['IssueID']))
|
||||
|
||||
if datematch == "True":
|
||||
# if we get to here, we need to do some more comparisons just to make sure we have the right volume
|
||||
# first we chk volume label if it exists, then we drop down to issue year
|
||||
# if the above both don't exist, and there's more than one series on the watchlist (or the series is > v1)
|
||||
# then spit out the error message and don't post-process it.
|
||||
watch_values = cs['WatchValues']
|
||||
#logger.fdebug('WATCH_VALUES:' + str(watch_values))
|
||||
if any([watch_values['ComicVersion'] is None, watch_values['ComicVersion'] == 'None']):
|
||||
tmp_watchlist_vol = '1'
|
||||
else:
|
||||
tmp_watchlist_vol = re.sub("[^0-9]", "", watch_values['ComicVersion']).strip()
|
||||
if all([watchmatch['series_volume'] != 'None', watchmatch['series_volume'] is not None]):
|
||||
tmp_watchmatch_vol = re.sub("[^0-9]","", watchmatch['series_volume']).strip()
|
||||
if len(tmp_watchmatch_vol) == 4:
|
||||
if int(tmp_watchmatch_vol) == int(watch_values['SeriesYear']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of ' + str(watch_values['SeriesYear']) + ' matched to volume/year label of ' + str(tmp_watchmatch_vol))
|
||||
else:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of ' + str(watch_values['SeriesYear']) + ' DID NOT match to volume/year label of ' + tmp_watchmatch_vol)
|
||||
datematch = "False"
|
||||
elif len(watchvals) == 1 and int(tmp_watchlist_vol) == 1:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume MATCH] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates only volume for this series on your watchlist.')
|
||||
elif int(tmp_watchlist_vol) > 1:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.')
|
||||
datematch = "False"
|
||||
|
||||
if datematch == "False" and all([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None', watch_issueyear is not None]):
|
||||
#now we see if the issue year matches exactly to what we have within Mylar.
|
||||
if int(watch_issueyear) == int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Issue Year of ' + str(watch_issueyear) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year']))
|
||||
datematch = 'True'
|
||||
if len(watchvals) > 1 and int(tmp_watchmatch_vol) > 1:
|
||||
if int(tmp_watchmatch_vol) == int(tmp_watchlist_vol):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of ' + str(watch_values['ComicVersion']) + ' matched to volume label of ' + str(watchmatch['series_volume']))
|
||||
else:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year FAILURE] Issue Year of ' + str(watch_issueyear) + ' does NOT match the year found in the filename of : ' + str(watchmatch['issue_year']))
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year.')
|
||||
if issyr is not None:
|
||||
if int(issyr) != int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of ' + str(issyr) + ' is before the modified issue year of ' + str(issyr))
|
||||
else:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of ' + str(issyr) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year']))
|
||||
datematch = 'True'
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of ' + str(watch_values['ComicVersion']) + ' DID NOT match to volume label of ' + str(watchmatch['series_volume']))
|
||||
continue
|
||||
#datematch = "False"
|
||||
else:
|
||||
if any([tmp_watchlist_vol is None, tmp_watchlist_vol == 'None', tmp_watchlist_vol == '']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching.')
|
||||
datematch = "False"
|
||||
elif len(watchvals) == 1 and int(tmp_watchlist_vol) == 1:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume MATCH] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates only volume for this series on your watchlist.')
|
||||
elif int(tmp_watchlist_vol) > 1:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of ' + str(watch_values['ComicVersion']) + ' indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set.')
|
||||
datematch = "False"
|
||||
|
||||
if datematch == 'True':
|
||||
if watchmatch['sub']:
|
||||
logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, watchmatch['sub'], watchmatch['comiclocation']))
|
||||
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
|
||||
else:
|
||||
logger.fdebug('%s[CLOCATION] %s' % (module, watchmatch['comiclocation']))
|
||||
if self.issueid is not None and os.path.isfile(watchmatch['comiclocation']):
|
||||
clocation = watchmatch['comiclocation']
|
||||
else:
|
||||
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
|
||||
annualtype = None
|
||||
if annchk == 'yes':
|
||||
if 'Annual' in isc['ReleaseComicName']:
|
||||
annualtype = 'Annual'
|
||||
elif 'Special' in isc['ReleaseComicName']:
|
||||
annualtype = 'Special'
|
||||
else:
|
||||
if 'Annual' in isc['ComicName']:
|
||||
annualtype = 'Annual'
|
||||
elif 'Special' in isc['ComicName']:
|
||||
annualtype = 'Special'
|
||||
|
||||
manual_list.append({"ComicLocation": clocation,
|
||||
"ComicID": cs['ComicID'],
|
||||
"IssueID": isc['IssueID'],
|
||||
"IssueNumber": isc['Issue_Number'],
|
||||
"AnnualType": annualtype,
|
||||
"ComicName": cs['ComicName'],
|
||||
"Series": watchmatch['series_name'],
|
||||
"AltSeries": watchmatch['alt_series'],
|
||||
"One-Off": False})
|
||||
break
|
||||
if datematch == "False" and all([watchmatch['issue_year'] is not None, watchmatch['issue_year'] != 'None', watch_issueyear is not None]):
|
||||
#now we see if the issue year matches exactly to what we have within Mylar.
|
||||
if int(watch_issueyear) == int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Issue Year of ' + str(watch_issueyear) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year']))
|
||||
datematch = 'True'
|
||||
else:
|
||||
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
|
||||
continue
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year FAILURE] Issue Year of ' + str(watch_issueyear) + ' does NOT match the year found in the filename of : ' + str(watchmatch['issue_year']))
|
||||
logger.fdebug(module + '[ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year.')
|
||||
if issyr is not None:
|
||||
if int(issyr) != int(watchmatch['issue_year']):
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of ' + str(issyr) + ' is before the modified issue year of ' + str(issyr))
|
||||
else:
|
||||
logger.fdebug(module + '[ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of ' + str(issyr) + ' is a match to the year found in the filename of : ' + str(watchmatch['issue_year']))
|
||||
datematch = 'True'
|
||||
|
||||
if datematch == 'True':
|
||||
if watchmatch['sub']:
|
||||
logger.fdebug('%s[SUB: %s][CLOCATION: %s]' % (module, watchmatch['sub'], watchmatch['comiclocation']))
|
||||
clocation = os.path.join(watchmatch['comiclocation'], watchmatch['sub'], helpers.conversion(watchmatch['comicfilename']))
|
||||
else:
|
||||
logger.fdebug('%s[CLOCATION] %s' % (module, watchmatch['comiclocation']))
|
||||
if self.issueid is not None and os.path.isfile(watchmatch['comiclocation']):
|
||||
clocation = watchmatch['comiclocation']
|
||||
else:
|
||||
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
|
||||
annualtype = None
|
||||
if annchk == 'yes':
|
||||
if 'Annual' in isc['ReleaseComicName']:
|
||||
annualtype = 'Annual'
|
||||
elif 'Special' in isc['ReleaseComicName']:
|
||||
annualtype = 'Special'
|
||||
else:
|
||||
if 'Annual' in isc['ComicName']:
|
||||
annualtype = 'Annual'
|
||||
elif 'Special' in isc['ComicName']:
|
||||
annualtype = 'Special'
|
||||
|
||||
manual_list.append({"ComicLocation": clocation,
|
||||
"ComicID": cs['ComicID'],
|
||||
"IssueID": isc['IssueID'],
|
||||
"IssueNumber": isc['Issue_Number'],
|
||||
"AnnualType": annualtype,
|
||||
"ComicName": cs['ComicName'],
|
||||
"Series": watchmatch['series_name'],
|
||||
"AltSeries": watchmatch['alt_series'],
|
||||
"One-Off": False})
|
||||
break
|
||||
else:
|
||||
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
|
||||
continue
|
||||
else:
|
||||
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
|
||||
continue
|
||||
|
||||
xmld = filechecker.FileChecker()
|
||||
xmld1 = xmld.dynamic_replace(helpers.conversion(cs['ComicName']))
|
||||
xseries = xmld1['mod_seriesname'].lower()
|
||||
xmld2 = xmld.dynamic_replace(helpers.conversion(watchmatch['series_name']))
|
||||
xfile = xmld2['mod_seriesname'].lower()
|
||||
if datematch == 'True':
|
||||
xmld = filechecker.FileChecker()
|
||||
xmld1 = xmld.dynamic_replace(helpers.conversion(cs['ComicName']))
|
||||
xseries = xmld1['mod_seriesname'].lower()
|
||||
xmld2 = xmld.dynamic_replace(helpers.conversion(watchmatch['series_name']))
|
||||
xfile = xmld2['mod_seriesname'].lower()
|
||||
|
||||
if re.sub('\|', '', xseries) == re.sub('\|', '', xfile):
|
||||
logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, watchmatch['series_name'], cs['ComicID']))
|
||||
if len(manual_list) > 1:
|
||||
manual_list = [item for item in manual_list if all([item['IssueID'] == isc['IssueID'], item['AnnualType'] is not None]) or all([item['IssueID'] == isc['IssueID'], item['ComicLocation'] == clocation]) or all([item['IssueID'] != isc['IssueID'], item['ComicLocation'] != clocation])]
|
||||
self.matched = True
|
||||
else:
|
||||
continue #break
|
||||
if re.sub('\|', '', xseries) == re.sub('\|', '', xfile):
|
||||
logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, watchmatch['series_name'], cs['ComicID']))
|
||||
if len(manual_list) > 1:
|
||||
manual_list = [item for item in manual_list if all([item['IssueID'] == isc['IssueID'], item['AnnualType'] is not None]) or all([item['IssueID'] == isc['IssueID'], item['ComicLocation'] == clocation]) or all([item['IssueID'] != isc['IssueID'], item['ComicLocation'] != clocation])]
|
||||
self.matched = True
|
||||
else:
|
||||
continue #break
|
||||
|
||||
if datematch == 'True':
|
||||
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
|
||||
break
|
||||
elif self.matched is True:
|
||||
logger.warn(module + '[MATCH: %s - %s] We matched by name for this series, but cannot find a corresponding issue number in the series list.' % (cs['ComicName'], cs['ComicID']))
|
||||
|
||||
#mlp = []
|
||||
|
||||
#xmld = filechecker.FileChecker()
|
||||
|
@ -805,9 +892,10 @@ class PostProcessor(object):
|
|||
from collections import defaultdict
|
||||
res = defaultdict(list)
|
||||
for acv in arcvals:
|
||||
res[acv['ComicName']].append({"ArcValues": acv['ArcValues'],
|
||||
"WatchValues": acv['WatchValues']})
|
||||
|
||||
acv_check = [x for x in manual_list if x['ComicID'] == acv['WatchValues']['ComicID']]
|
||||
if acv_check:
|
||||
res[acv['ComicName']].append({"ArcValues": acv['ArcValues'],
|
||||
"WatchValues": acv['WatchValues']})
|
||||
if len(res) > 0:
|
||||
logger.fdebug('%s Now Checking if %s issue(s) may also reside in one of the storyarc\'s that I am watching.' % (module, len(res)))
|
||||
for k,v in res.items():
|
||||
|
@ -941,7 +1029,7 @@ class PostProcessor(object):
|
|||
oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;") #(s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;")
|
||||
#oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID and s.Hash is not NULL inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;") #(s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;")
|
||||
if not oneofflist:
|
||||
continue
|
||||
pass #continue
|
||||
else:
|
||||
logger.fdebug(module + '[ONEOFF-SELECTION][self.nzb_name: %s]' % self.nzb_name)
|
||||
oneoffvals = []
|
||||
|
@ -1011,8 +1099,8 @@ class PostProcessor(object):
|
|||
self.matched = True
|
||||
break
|
||||
|
||||
|
||||
logger.fdebug('%s There are %s files found that match on your watchlist, %s files are considered one-off\'s, and %s files do not match anything' % (module, len(manual_list), len(oneoff_issuelist), int(filelist['comiccount']) - len(manual_list)))
|
||||
if filelist['comiccount'] > 0:
|
||||
logger.fdebug('%s There are %s files found that match on your watchlist, %s files are considered one-off\'s, and %s files do not match anything' % (module, len(manual_list), len(oneoff_issuelist), int(filelist['comiccount']) - len(manual_list)))
|
||||
|
||||
delete_arc = []
|
||||
if len(manual_arclist) > 0:
|
||||
|
@ -1133,7 +1221,7 @@ class PostProcessor(object):
|
|||
|
||||
logger.fdebug(module + ' [' + ml['StoryArc'] + '] Post-Processing completed for: ' + grab_dst)
|
||||
|
||||
if (all([self.nzb_name != 'Manual Run', self.apicall is False]) or self.oneoffinlist is True) and not self.nzb_name.startswith('0-Day') and self.issuearcid is None: # and all([self.issueid is None, self.comicid is None, self.apicall is False]):
|
||||
if (all([self.nzb_name != 'Manual Run', self.apicall is False]) or (self.oneoffinlist is True or all([self.issuearcid is not None, self.issueid is None]))) and not self.nzb_name.startswith('0-Day'): # and all([self.issueid is None, self.comicid is None, self.apicall is False]):
|
||||
ppinfo = []
|
||||
if self.oneoffinlist is False:
|
||||
nzbname = self.nzb_name
|
||||
|
@ -1284,10 +1372,15 @@ class PostProcessor(object):
|
|||
if any([self.nzb_name == 'Manual Run', self.issueid is not None, self.comicid is not None, self.apicall is True]):
|
||||
#loop through the hits here.
|
||||
if len(manual_list) == 0 and len(manual_arclist) == 0:
|
||||
logger.info(module + ' No matches for Manual Run ... exiting.')
|
||||
if self.nzb_name == 'Manual Run':
|
||||
logger.info(module + ' No matches for Manual Run ... exiting.')
|
||||
if mylar.APILOCK is True:
|
||||
mylar.APILOCK = False
|
||||
return
|
||||
elif len(manual_arclist) > 0 and len(manual_list) == 0:
|
||||
logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.')
|
||||
if mylar.APILOCK is True:
|
||||
mylar.APILOCK = False
|
||||
return
|
||||
elif len(manual_arclist) > 0:
|
||||
logger.info(module + ' Manual post-processing completed for ' + str(len(manual_arclist)) + ' story-arc issues.')
|
||||
|
|
|
@ -129,7 +129,7 @@ PP_QUEUE = Queue.Queue()
|
|||
SEARCH_QUEUE = Queue.Queue()
|
||||
SEARCH_TIER_DATE = None
|
||||
COMICSORT = None
|
||||
PULLBYFILE = None
|
||||
PULLBYFILE = False
|
||||
CFG = None
|
||||
CURRENT_WEEKNUMBER = None
|
||||
CURRENT_YEAR = None
|
||||
|
@ -243,7 +243,7 @@ def initialize(config_file):
|
|||
CVURL = 'https://comicvine.gamespot.com/api/'
|
||||
|
||||
#set default URL for Public trackers (just in case it changes more frequently)
|
||||
WWTURL = 'https://worldwidetorrents.me/'
|
||||
WWTURL = 'https://worldwidetorrents.to/'
|
||||
DEMURL = 'https://www.demonoid.pw/'
|
||||
|
||||
if CONFIG.LOCMOVE:
|
||||
|
@ -343,27 +343,30 @@ def start():
|
|||
SCHED_RSS_LAST = monitors['rss']
|
||||
|
||||
# Start our scheduled background tasks
|
||||
SCHED.add_job(func=updater.dbUpdate, id='dbupdater', name='DB Updater', args=[None,None,True], trigger=IntervalTrigger(hours=5, minutes=5, timezone='UTC'))
|
||||
if UPDATER_STATUS != 'Paused':
|
||||
SCHED.add_job(func=updater.dbUpdate, id='dbupdater', name='DB Updater', args=[None,None,True], trigger=IntervalTrigger(hours=0, minutes=5, timezone='UTC'))
|
||||
logger.info('DB Updater sccheduled to fire every 5 minutes')
|
||||
|
||||
#let's do a run at the Wanted issues here (on startup) if enabled.
|
||||
ss = searchit.CurrentSearcher()
|
||||
if CONFIG.NZB_STARTUP_SEARCH:
|
||||
SCHED.add_job(func=ss.run, id='search', next_run_time=datetime.datetime.utcnow(), name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
else:
|
||||
if SCHED_SEARCH_LAST is not None:
|
||||
search_timestamp = float(SCHED_SEARCH_LAST)
|
||||
logger.fdebug('[AUTO-SEARCH] Search last run @ %s' % datetime.datetime.utcfromtimestamp(search_timestamp))
|
||||
if SEARCH_STATUS != 'Paused':
|
||||
ss = searchit.CurrentSearcher()
|
||||
if CONFIG.NZB_STARTUP_SEARCH:
|
||||
SCHED.add_job(func=ss.run, id='search', next_run_time=datetime.datetime.utcnow(), name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
else:
|
||||
search_timestamp = helpers.utctimestamp() + (int(CONFIG.SEARCH_INTERVAL) *60)
|
||||
if SCHED_SEARCH_LAST is not None:
|
||||
search_timestamp = float(SCHED_SEARCH_LAST)
|
||||
logger.fdebug('[AUTO-SEARCH] Search last run @ %s' % datetime.datetime.utcfromtimestamp(search_timestamp))
|
||||
else:
|
||||
search_timestamp = helpers.utctimestamp() + (int(CONFIG.SEARCH_INTERVAL) *60)
|
||||
|
||||
duration_diff = (helpers.utctimestamp() - search_timestamp)/60
|
||||
if duration_diff >= int(CONFIG.SEARCH_INTERVAL):
|
||||
logger.fdebug('[AUTO-SEARCH]Auto-Search set to a delay of one minute before initialization as it has been %s minutes since the last run' % duration_diff)
|
||||
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
else:
|
||||
search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60) - (duration_diff*60)))
|
||||
logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL))
|
||||
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
duration_diff = (helpers.utctimestamp() - search_timestamp)/60
|
||||
if duration_diff >= int(CONFIG.SEARCH_INTERVAL):
|
||||
logger.fdebug('[AUTO-SEARCH]Auto-Search set to a delay of one minute before initialization as it has been %s minutes since the last run' % duration_diff)
|
||||
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
else:
|
||||
search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60) - (duration_diff*60)))
|
||||
logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL))
|
||||
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
|
||||
if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]):
|
||||
logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
|
||||
|
@ -420,17 +423,18 @@ def start():
|
|||
ws = weeklypullit.Weekly()
|
||||
duration_diff = (weektimestamp - weekly_timestamp)/60
|
||||
|
||||
if abs(duration_diff) >= weekly_interval/60:
|
||||
logger.info('[WEEKLY] Weekly Pull-Update initializing immediately as it has been %s hours since the last run' % abs(duration_diff/60))
|
||||
SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
|
||||
else:
|
||||
weekly_diff = datetime.datetime.utcfromtimestamp(weektimestamp + (weekly_interval - (duration_diff * 60)))
|
||||
logger.fdebug('[WEEKLY] Scheduling next run for @ %s every %s hours' % (weekly_diff, weektimer))
|
||||
SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=weekly_diff, trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
|
||||
if WEEKLY_STATUS != 'Paused':
|
||||
if abs(duration_diff) >= weekly_interval/60:
|
||||
logger.info('[WEEKLY] Weekly Pull-Update initializing immediately as it has been %s hours since the last run' % abs(duration_diff/60))
|
||||
SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
|
||||
else:
|
||||
weekly_diff = datetime.datetime.utcfromtimestamp(weektimestamp + (weekly_interval - (duration_diff * 60)))
|
||||
logger.fdebug('[WEEKLY] Scheduling next run for @ %s every %s hours' % (weekly_diff, weektimer))
|
||||
SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=weekly_diff, trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
|
||||
|
||||
#initiate startup rss feeds for torrents/nzbs here...
|
||||
rs = rsscheckit.tehMain()
|
||||
if CONFIG.ENABLE_RSS:
|
||||
if CONFIG.ENABLE_RSS is True:
|
||||
logger.info('[RSS-FEEDS] Initiating startup-RSS feed checks.')
|
||||
if SCHED_RSS_LAST is not None:
|
||||
rss_timestamp = float(SCHED_RSS_LAST)
|
||||
|
@ -444,13 +448,16 @@ def start():
|
|||
rss_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) * 60) - (duration_diff * 60))
|
||||
logger.fdebug('[RSS-FEEDS] Scheduling next run for @ %s every %s minutes' % (rss_diff, CONFIG.RSS_CHECKINTERVAL))
|
||||
SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=rss_diff, trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
|
||||
#else:
|
||||
else:
|
||||
RSS_STATUS = 'Paused'
|
||||
# SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
|
||||
# SCHED.pause_job('rss')
|
||||
|
||||
if CONFIG.CHECK_GITHUB:
|
||||
vs = versioncheckit.CheckVersion()
|
||||
SCHED.add_job(func=vs.run, id='version', name='Check Version', trigger=IntervalTrigger(hours=0, minutes=CONFIG.CHECK_GITHUB_INTERVAL, timezone='UTC'))
|
||||
else:
|
||||
VERSION_STATUS = 'Paused'
|
||||
|
||||
##run checkFolder every X minutes (basically Manual Run Post-Processing)
|
||||
if CONFIG.ENABLE_CHECK_FOLDER:
|
||||
|
@ -460,6 +467,8 @@ def start():
|
|||
SCHED.add_job(func=fm.run, id='monitor', name='Folder Monitor', trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.DOWNLOAD_SCAN_INTERVAL), timezone='UTC'))
|
||||
else:
|
||||
logger.error('[FOLDER MONITOR] You need to specify a monitoring time for the check folder option to work')
|
||||
else:
|
||||
MONITOR_STATUS = 'Paused'
|
||||
|
||||
logger.info('Firing up the Background Schedulers now....')
|
||||
try:
|
||||
|
@ -482,21 +491,21 @@ def dbcheck():
|
|||
c.execute('SELECT ReleaseDate from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
try:
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)')
|
||||
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
|
||||
c.execute('DROP TABLE readinglist')
|
||||
except sqlite3.OperationalError:
|
||||
logger.warn('Unable to update readinglist table to new storyarc table format.')
|
||||
|
||||
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT, TorrentID_32P TEXT, LatestIssueID TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT, ImageURL TEXT, ImageURL_ALT TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, NewPublish TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT, AlternateFileName TEXT, ComicImageURL TEXT, ComicImageALTURL TEXT, DynamicComicName TEXT, AllowPacks TEXT, Type TEXT, Corrected_SeriesYear TEXT, Corrected_Type TEXT, TorrentID_32P TEXT, LatestIssueID TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, DigitalDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT, ImageURL TEXT, ImageURL_ALT TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT, Hash TEXT, crc TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT, PROVIDER TEXT, ID TEXT, AltNZBName TEXT, OneOff TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, format TEXT, rowid INTEGER PRIMARY KEY)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT, Volume TEXT, IssueNumber TEXT, DynamicName TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT, StatusChange TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT, DateAdded TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, DigitalDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT, DateAdded TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)')
|
||||
|
@ -505,6 +514,7 @@ def dbcheck():
|
|||
c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)')
|
||||
conn.commit
|
||||
c.close
|
||||
|
||||
|
@ -609,6 +619,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN Corrected_SeriesYear TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT Corrected_Type from comics')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN Corrected_Type TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT TorrentID_32P from comics')
|
||||
except sqlite3.OperationalError:
|
||||
|
@ -619,6 +634,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN LatestIssueID TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT Collects from comics')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE comics ADD COLUMN Collects CLOB')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DynamicComicName from comics')
|
||||
if CONFIG.DYNAMIC_UPDATE < 3:
|
||||
|
@ -661,6 +681,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE issues ADD COLUMN ImageURL_ALT TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DigitalDate from issues')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE issues ADD COLUMN DigitalDate TEXT')
|
||||
|
||||
## -- ImportResults Table --
|
||||
|
||||
try:
|
||||
|
@ -812,6 +837,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE weekly ADD COLUMN annuallink TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT format from weekly')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE weekly ADD COLUMN format TEXT')
|
||||
|
||||
## -- Nzblog Table --
|
||||
|
||||
try:
|
||||
|
@ -892,6 +922,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE annuals ADD COLUMN DateAdded TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DigitalDate from annuals')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE annuals ADD COLUMN DigitalDate TEXT')
|
||||
|
||||
## -- Snatched Table --
|
||||
|
||||
try:
|
||||
|
@ -984,6 +1019,11 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN DateAdded TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT DigitalDate from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT')
|
||||
|
||||
## -- searchresults Table --
|
||||
try:
|
||||
c.execute('SELECT SRID from searchresults')
|
||||
|
|
|
@ -216,6 +216,15 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
|
|||
tmpfilename = re.sub('\(Original deleted\)', '', tmpfilename).strip()
|
||||
tmpf = tmpfilename.decode('utf-8')
|
||||
filepath = os.path.join(comicpath, tmpf)
|
||||
if filename.lower() != tmpf.lower() and tmpf.endswith('(1).cbz'):
|
||||
logger.fdebug('New filename [%s] is named incorrectly due to duplication during metatagging - Making sure it\'s named correctly [%s].' % (tmpf, filename))
|
||||
tmpfilename = filename
|
||||
filepath_new = os.path.join(comicpath, tmpfilename)
|
||||
try:
|
||||
os.rename(filepath, filepath_new)
|
||||
filepath = filepath_new
|
||||
except:
|
||||
logger.warn('%s unable to rename file to accomodate metatagging cbz to the same filename' % module)
|
||||
if not os.path.isfile(filepath):
|
||||
logger.fdebug(module + 'Trying utf-8 conversion.')
|
||||
tmpf = tmpfilename.encode('utf-8')
|
||||
|
|
|
@ -136,6 +136,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
|
|||
'ADD_COMICS': (bool, 'Import', False),
|
||||
'COMIC_DIR': (str, 'Import', None),
|
||||
'IMP_MOVE': (bool, 'Import', False),
|
||||
'IMP_PATHS': (bool, 'Import', False),
|
||||
'IMP_RENAME': (bool, 'Import', False),
|
||||
'IMP_METADATA': (bool, 'Import', False), # should default to False - this is enabled for testing only.
|
||||
|
||||
|
@ -279,6 +280,10 @@ _CONFIG_DEFINITIONS = OrderedDict({
|
|||
'ENABLE_PUBLIC': (bool, 'Torrents', False),
|
||||
'PUBLIC_VERIFY': (bool, 'Torrents', True),
|
||||
|
||||
'ENABLE_DDL': (bool, 'DDL', False),
|
||||
'ALLOW_PACKS': (bool, 'DDL', False),
|
||||
'DDL_LOCATION': (str, 'DDL', None),
|
||||
|
||||
'AUTO_SNATCH': (bool, 'AutoSnatch', False),
|
||||
'AUTO_SNATCH_SCRIPT': (str, 'AutoSnatch', None),
|
||||
'PP_SSHHOST': (str, 'AutoSnatch', None),
|
||||
|
@ -786,6 +791,12 @@ class Config(object):
|
|||
logger.fdebug("Minimum RSS Interval Check delay set for 20 minutes to avoid hammering.")
|
||||
self.RSS_CHECKINTERVAL = 20
|
||||
|
||||
if self.ENABLE_RSS is True and mylar.RSS_STATUS == 'Paused':
|
||||
mylar.RSS_STATUS = 'Waiting'
|
||||
elif self.ENABLE_RSS is False and mylar.RSS_STATUS == 'Waiting':
|
||||
mylar.RSS_STATUS = 'Paused'
|
||||
logger.info('self.enable_rss is %s [%s]' % (self.ENABLE_RSS, mylar.RSS_STATUS))
|
||||
|
||||
if not helpers.is_number(self.CHMOD_DIR):
|
||||
logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777")
|
||||
self.CHMOD_DIR = '0777'
|
||||
|
@ -832,6 +843,9 @@ class Config(object):
|
|||
else:
|
||||
logger.fdebug('Successfully created ComicTagger Settings location.')
|
||||
|
||||
if self.DDL_LOCATION is None:
|
||||
self.DDL_LOCATION = self.CACHE_DIR
|
||||
|
||||
if self.MODE_32P is False and self.RSSFEED_32P is not None:
|
||||
mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P)
|
||||
|
||||
|
@ -942,7 +956,11 @@ class Config(object):
|
|||
PR.append('Experimental')
|
||||
PR_NUM +=1
|
||||
|
||||
PPR = ['32p', 'public torrents', 'nzb.su', 'dognzb', 'Experimental']
|
||||
if self.ENABLE_DDL:
|
||||
PR.append('DDL')
|
||||
PR_NUM +=1
|
||||
|
||||
PPR = ['32p', 'public torrents', 'nzb.su', 'dognzb', 'Experimental', 'DDL']
|
||||
if self.NEWZNAB:
|
||||
for ens in self.EXTRA_NEWZNABS:
|
||||
if str(ens[5]) == '1': # if newznabs are enabled
|
||||
|
|
139
mylar/cv.py
139
mylar/cv.py
|
@ -190,7 +190,7 @@ def getComic(comicid, type, issueid=None, arc=None, arcid=None, arclist=None, co
|
|||
else:
|
||||
tmpidlist += '|' + str(comicidlist[i])
|
||||
in_cnt +=1
|
||||
logger.info('tmpidlist: ' + str(tmpidlist))
|
||||
logger.fdebug('tmpidlist: ' + str(tmpidlist))
|
||||
|
||||
searched = pulldetails(None, 'import', offset=0, comicidlist=tmpidlist)
|
||||
|
||||
|
@ -285,8 +285,11 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
|
||||
desdeck = 0
|
||||
#the description field actually holds the Volume# - so let's grab it
|
||||
desc_soup = None
|
||||
try:
|
||||
descchunk = dom.getElementsByTagName('description')[0].firstChild.wholeText
|
||||
desc_soup = Soup(descchunk, "html.parser")
|
||||
desclinks = desc_soup.findAll('a')
|
||||
comic_desc = drophtml(descchunk)
|
||||
desdeck +=1
|
||||
except:
|
||||
|
@ -312,26 +315,117 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
comic['Aliases'] = 'None'
|
||||
|
||||
comic['ComicVersion'] = 'None' #noversion'
|
||||
#logger.info('comic_desc:' + comic_desc)
|
||||
#logger.info('comic_deck:' + comic_deck)
|
||||
#logger.info('desdeck: ' + str(desdeck))
|
||||
|
||||
#figure out if it's a print / digital edition.
|
||||
comic['Type'] = 'None'
|
||||
if comic_deck != 'None':
|
||||
if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower()]):
|
||||
if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'one shot' in re.sub('-', '', comic_deck.lower()).strip(), 'hardcover' in comic_deck.lower()]):
|
||||
if 'print' in comic_deck.lower():
|
||||
comic['Type'] = 'Print'
|
||||
elif 'digital' in comic_deck.lower():
|
||||
comic['Type'] = 'Digital'
|
||||
comic['Type'] = 'Digital'
|
||||
elif 'paperback' in comic_deck.lower():
|
||||
comic['Type'] = 'TPB'
|
||||
elif 'hardcover' in comic_deck.lower():
|
||||
comic['Type'] = 'HC'
|
||||
elif 'oneshot' in re.sub('-', '', comic_deck.lower()).strip():
|
||||
comic['Type'] = 'One-Shot'
|
||||
|
||||
if comic_desc != 'None' and comic['Type'] == 'None':
|
||||
if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower():
|
||||
comic['Type'] = 'Print'
|
||||
elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower():
|
||||
comic['Type'] = 'Digital'
|
||||
elif all(['paperback' in comic_desc[:60].lower(), 'paperback can be found' not in comic_desc.lower()]) or 'collects' in comic_desc[:60].lower():
|
||||
comic['Type'] = 'TPB'
|
||||
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
|
||||
comic['Type'] = 'HC'
|
||||
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and 'can be found' not in comic_desc.lower():
|
||||
i = 0
|
||||
comic['Type'] = 'One-Shot'
|
||||
avoidwords = ['preceding', 'after the special']
|
||||
while i < 2:
|
||||
if i == 0:
|
||||
cbd = 'one-shot'
|
||||
elif i == 1:
|
||||
cbd = 'one shot'
|
||||
tmp1 = comic_desc[:60].lower().find(cbd)
|
||||
if tmp1 != -1:
|
||||
for x in avoidwords:
|
||||
tmp2 = comic_desc[:tmp1].lower().find(x)
|
||||
if tmp2 != -1:
|
||||
logger.fdebug('FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print')
|
||||
comic['Type'] = 'Print'
|
||||
i = 3
|
||||
break
|
||||
i+=1
|
||||
else:
|
||||
comic['Type'] = 'Print'
|
||||
|
||||
if all([comic_desc != 'None', 'trade paperback' in comic_desc[:30].lower(), 'collecting' in comic_desc[:40].lower()]):
|
||||
#ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145.
|
||||
first_collect = comic_desc.lower().find('collecting')
|
||||
#logger.info('first_collect: %s' % first_collect)
|
||||
#logger.info('comic_desc: %s' % comic_desc)
|
||||
#logger.info('desclinks: %s' % desclinks)
|
||||
issue_list = []
|
||||
micdrop = []
|
||||
if desc_soup is not None:
|
||||
#if it's point form bullets, ignore it cause it's not the current volume stuff.
|
||||
test_it = desc_soup.find('ul')
|
||||
if test_it:
|
||||
for x in test_it.findAll('a'):
|
||||
micdrop.append(x['data-ref-id'])
|
||||
|
||||
for fc in desclinks:
|
||||
#logger.info('fc: %s' % fc)
|
||||
fc_id = fc['data-ref-id']
|
||||
#logger.info('fc_id: %s' % fc_id)
|
||||
if fc_id in micdrop:
|
||||
continue
|
||||
fc_name = fc.findNext(text=True)
|
||||
if fc_id.startswith('4000'):
|
||||
fc_cid = None
|
||||
fc_isid = fc_id
|
||||
iss_start = fc_name.find('#')
|
||||
issuerun = fc_name[iss_start:].strip()
|
||||
fc_name = fc_name[:iss_start].strip()
|
||||
elif fc_id.startswith('4050'):
|
||||
fc_cid = fc_id
|
||||
fc_isid = None
|
||||
issuerun = fc.next_sibling
|
||||
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
|
||||
if len(lines) > 0:
|
||||
for x in sorted(lines, reverse=True):
|
||||
srchline = issuerun.rfind(x)
|
||||
if srchline != -1:
|
||||
try:
|
||||
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
|
||||
issuerun = issuerun[:srchline+len(x)]
|
||||
break
|
||||
except:
|
||||
continue
|
||||
if issuerun.endswith('.') or issuerun.endswith(','):
|
||||
#logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1]))
|
||||
issuerun = issuerun[:-1]
|
||||
if issuerun.endswith(' and '):
|
||||
issuerun = issuerun[:-4].strip()
|
||||
elif issuerun.endswith(' and'):
|
||||
issuerun = issuerun[:-3].strip()
|
||||
|
||||
# except:
|
||||
# pass
|
||||
issue_list.append({'series': fc_name,
|
||||
'comicid': fc_cid,
|
||||
'issueid': fc_isid,
|
||||
'issues': issuerun})
|
||||
#first_collect = cis
|
||||
|
||||
logger.info('Collected issues in volume: %s' % issue_list)
|
||||
comic['Issue_List'] = issue_list
|
||||
else:
|
||||
comic['Issue_List'] = 'None'
|
||||
|
||||
while (desdeck > 0):
|
||||
if desdeck == 1:
|
||||
if comic_desc == 'None':
|
||||
|
@ -412,19 +506,7 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
|
||||
comic['FirstIssueID'] = dom.getElementsByTagName('id')[0].firstChild.wholeText
|
||||
|
||||
# print ("fistIss:" + str(comic['FirstIssueID']))
|
||||
# comicchoice.append({
|
||||
# 'ComicName': comic['ComicName'],
|
||||
# 'ComicYear': comic['ComicYear'],
|
||||
# 'Comicid': comicid,
|
||||
# 'ComicURL': comic['ComicURL'],
|
||||
# 'ComicIssues': comic['ComicIssues'],
|
||||
# 'ComicImage': comic['ComicImage'],
|
||||
# 'ComicVolume': ParseVol,
|
||||
# 'ComicPublisher': comic['ComicPublisher']
|
||||
# })
|
||||
|
||||
# comic['comicchoice'] = comicchoice
|
||||
#logger.info('comic: %s' % comic)
|
||||
return comic
|
||||
|
||||
def GetIssuesInfo(comicid, dom, arcid=None):
|
||||
|
@ -495,6 +577,19 @@ def GetIssuesInfo(comicid, dom, arcid=None):
|
|||
tempissue['StoreDate'] = subtrack.getElementsByTagName('store_date')[0].firstChild.wholeText
|
||||
except:
|
||||
tempissue['StoreDate'] = '0000-00-00'
|
||||
try:
|
||||
digital_desc = subtrack.getElementsByTagName('description')[0].firstChild.wholeText
|
||||
except:
|
||||
tempissue['DigitalDate'] = '0000-00-00'
|
||||
else:
|
||||
tempissue['DigitalDate'] = '0000-00-00'
|
||||
if all(['digital' in digital_desc.lower()[-90:], 'print' in digital_desc.lower()[-90:]]):
|
||||
#get the digital date of issue here...
|
||||
mff = mylar.filechecker.FileChecker()
|
||||
vlddate = mff.checkthedate(digital_desc[-90:], fulldate=True)
|
||||
#logger.fdebug('vlddate: %s' % vlddate)
|
||||
if vlddate:
|
||||
tempissue['DigitalDate'] = vlddate
|
||||
try:
|
||||
tempissue['Issue_Number'] = subtrack.getElementsByTagName('issue_number')[0].firstChild.wholeText
|
||||
except:
|
||||
|
@ -517,6 +612,7 @@ def GetIssuesInfo(comicid, dom, arcid=None):
|
|||
'Issue_Number': tempissue['Issue_Number'],
|
||||
'Issue_Date': tempissue['CoverDate'],
|
||||
'Store_Date': tempissue['StoreDate'],
|
||||
'Digital_Date': tempissue['DigitalDate'],
|
||||
'Issue_Name': tempissue['Issue_Name'],
|
||||
'Image': tempissue['ComicImage'],
|
||||
'ImageALT': tempissue['ComicImageALT']
|
||||
|
@ -531,6 +627,7 @@ def GetIssuesInfo(comicid, dom, arcid=None):
|
|||
'Issue_Number': tempissue['Issue_Number'],
|
||||
'Issue_Date': tempissue['CoverDate'],
|
||||
'Store_Date': tempissue['StoreDate'],
|
||||
'Digital_Date': tempissue['DigitalDate'],
|
||||
'Issue_Name': tempissue['Issue_Name']
|
||||
})
|
||||
|
||||
|
@ -538,6 +635,7 @@ def GetIssuesInfo(comicid, dom, arcid=None):
|
|||
firstdate = tempissue['CoverDate']
|
||||
n-= 1
|
||||
|
||||
#logger.fdebug('issue_info: %s' % issuech)
|
||||
#issue['firstdate'] = firstdate
|
||||
return issuech, firstdate
|
||||
|
||||
|
@ -817,8 +915,7 @@ def GetImportList(results):
|
|||
return serieslist
|
||||
|
||||
def drophtml(html):
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
soup = Soup(html, "html.parser")
|
||||
|
||||
text_parts = soup.findAll(text=True)
|
||||
#print ''.join(text_parts)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Mylar.
|
||||
#
|
||||
# Mylar is free software: you can redistribute it and/or modify
|
||||
|
@ -48,6 +49,7 @@ class FileChecker(object):
|
|||
self.og_watchcomic = watchcomic
|
||||
self.watchcomic = re.sub('\?', '', watchcomic).strip() #strip the ? sepearte since it affects the regex.
|
||||
self.watchcomic = re.sub(u'\u2014', ' - ', watchcomic).strip() #replace the \u2014 with a normal - because this world is f'd up enough to have something like that.
|
||||
self.watchcomic = re.sub(u'\u2013', ' - ', watchcomic).strip() #replace the \u2013 with a normal - because again, people are dumb.
|
||||
self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore')
|
||||
else:
|
||||
self.watchcomic = None
|
||||
|
@ -96,7 +98,7 @@ class FileChecker(object):
|
|||
self.pp_mode = False
|
||||
|
||||
self.failed_files = []
|
||||
self.dynamic_handlers = ['/','-',':','\'',',','&','?','!','+','(',')','\u2014']
|
||||
self.dynamic_handlers = ['/','-',':','\'',',','&','?','!','+','(',')','\u2014','\u2013']
|
||||
self.dynamic_replacements = ['and','the']
|
||||
self.rippers = ['-empire','-empire-hd','minutemen-','-dcp']
|
||||
|
||||
|
@ -325,6 +327,53 @@ class FileChecker(object):
|
|||
#split_file = re.findall('\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
|
||||
split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+|[\w-]+|#?\d\.\d+|#(?<![\w\d])XCV(?![\w\d])+|\)', ret_sf1, re.UNICODE)
|
||||
|
||||
#10-20-2018 ---START -- attempt to detect '01 (of 7.3)'
|
||||
#10-20-2018 -- attempt to detect '36p ctc' as one element
|
||||
spf = []
|
||||
mini = False
|
||||
wrdcnt = 0
|
||||
for x in split_file:
|
||||
if x == 'of':
|
||||
mini = True
|
||||
spf.append(x)
|
||||
elif mini is True:
|
||||
mini = False
|
||||
try:
|
||||
logger.fdebug('checking now: %s' % x)
|
||||
if x.isdigit():
|
||||
logger.fdebug('[MINI-SERIES] MAX ISSUES IN SERIES: %s' % x)
|
||||
spf.append('(of %s)' % x)
|
||||
elif float(x) > 0:
|
||||
logger.fdebug('[MINI-DECIMAL SERIES] MAX ISSUES IN SERIES: %s' % x)
|
||||
spf.append('(of %s)' % x)
|
||||
except Exception as e:
|
||||
spf.append(x)
|
||||
|
||||
elif x == ')':
|
||||
pass
|
||||
elif x == 'p' or x == 'ctc':
|
||||
try:
|
||||
if spf[wrdcnt-1].isdigit():
|
||||
logger.debug('THIS SHOULD BE : %s%s' % (spf[wrdcnt-1], x))
|
||||
newline = '%s%s' % (spf[wrdcnt-1], x)
|
||||
spf[wrdcnt -1] = newline
|
||||
#wrdcnt =-1
|
||||
elif spf[wrdcnt-1][-1] == 'p' and spf[wrdcnt-1][:-1].isdigit() and x == 'ctc':
|
||||
logger.fdebug('THIS SHOULD BE : %s%s' % (spf[wrdcnt-1], x))
|
||||
newline = '%s%s' % (spf[wrdcnt-1], x)
|
||||
spf[wrdcnt -1] = newline
|
||||
#wrdcnt =-1
|
||||
except Exception as e:
|
||||
spf.append(x)
|
||||
else:
|
||||
spf.append(x)
|
||||
wrdcnt +=1
|
||||
|
||||
if len(spf) > 0:
|
||||
split_file = spf
|
||||
logger.fdebug('NEWLY SPLIT REORGD: %s' % split_file)
|
||||
#10-20-2018 ---END
|
||||
|
||||
if len(split_file) == 1:
|
||||
logger.fdebug('Improperly formatted filename - there is no seperation using appropriate characters between wording.')
|
||||
ret_sf1 = re.sub('\-',' ', ret_sf1).strip()
|
||||
|
@ -339,7 +388,7 @@ class FileChecker(object):
|
|||
lastissue_label = None
|
||||
lastissue_position = 0
|
||||
lastmod_position = 0
|
||||
|
||||
booktype = 'issue'
|
||||
#exceptions that are considered alpha-numeric issue numbers
|
||||
exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU')
|
||||
|
||||
|
@ -416,7 +465,7 @@ class FileChecker(object):
|
|||
count = match.group()
|
||||
found = True
|
||||
|
||||
if not found:
|
||||
if found is False:
|
||||
match = re.search('(?<=\(of\s)\d+(?=\))', sf, re.IGNORECASE)
|
||||
if match:
|
||||
count = match.group()
|
||||
|
@ -431,7 +480,7 @@ class FileChecker(object):
|
|||
logger.fdebug('Issue Number SHOULD BE: ' + str(lastissue_label))
|
||||
validcountchk = True
|
||||
|
||||
if all([lastissue_position == (split_file.index(sf) -1), lastissue_label is not None and '#' not in sf]):
|
||||
if all([lastissue_position == (split_file.index(sf) -1), lastissue_label is not None, '#' not in sf, sf != 'p']):
|
||||
#find it in the original file to see if there's a decimal between.
|
||||
findst = lastissue_mod_position+1
|
||||
if findst > len(modfilename):
|
||||
|
@ -439,11 +488,17 @@ class FileChecker(object):
|
|||
|
||||
if modfilename[findst] != '.' or modfilename[findst] != '#': #findst != '.' and findst != '#':
|
||||
if sf.isdigit():
|
||||
logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue')
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)})
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
'validcountchk': validcountchk})
|
||||
seper_num = False
|
||||
for x in datecheck:
|
||||
if x['position'] == split_file.index(sf, lastissue_position):
|
||||
seper_num = True
|
||||
if seper_num is False:
|
||||
logger.fdebug('2 seperate numbers detected. Assuming 2nd number is the actual issue')
|
||||
|
||||
#possible_issuenumbers.append({'number': sf,
|
||||
# 'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)})
|
||||
# 'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
# 'validcountchk': validcountchk})
|
||||
|
||||
#used to see if the issue is an alpha-numeric (ie. 18.NOW, 50-X, etc)
|
||||
lastissue_position = split_file.index(sf, lastissue_position)
|
||||
|
@ -481,7 +536,6 @@ class FileChecker(object):
|
|||
|
||||
else:
|
||||
if ('#' in sf or sf.isdigit()) or validcountchk:
|
||||
logger.fdebug('validated: ' + sf)
|
||||
if validcountchk:
|
||||
#if it's not a decimal but the digits are back-to-back, then it's something else.
|
||||
possible_issuenumbers.append({'number': lastissue_label,
|
||||
|
@ -508,7 +562,7 @@ class FileChecker(object):
|
|||
'validcountchk': validcountchk})
|
||||
|
||||
#now we try to find the series title &/or volume lablel.
|
||||
if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower()] ) and sf.lower() not in {'one','two','three','four','five','six'}:
|
||||
if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower(), 'part' in sf.lower()] ) and sf.lower() not in {'one','two','three','four','five','six'}:
|
||||
if any([ split_file[split_file.index(sf)].isdigit(), split_file[split_file.index(sf)][3:].isdigit(), split_file[split_file.index(sf)][1:].isdigit() ]):
|
||||
volume = re.sub("[^0-9]", "", sf)
|
||||
if volumeprior:
|
||||
|
@ -533,7 +587,7 @@ class FileChecker(object):
|
|||
volumeprior_label = sf
|
||||
sep_volume = True
|
||||
logger.fdebug('volume label detected, but vol. number is not adjacent, adjusting scope to include number.')
|
||||
elif 'volume' in sf.lower():
|
||||
elif 'volume' in sf.lower() or all(['part' in sf.lower(), len(sf) == 4]):
|
||||
volume = re.sub("[^0-9]", "", sf)
|
||||
if volume.isdigit():
|
||||
volume_found['volume'] = volume
|
||||
|
@ -593,11 +647,58 @@ class FileChecker(object):
|
|||
else:
|
||||
raise ValueError
|
||||
except ValueError, e:
|
||||
#10-20-2018 - to detect issue numbers such as #000.0000½
|
||||
if lastissue_label is not None and lastissue_position == int(split_file.index(sf))-1 and sf == 'XCV':
|
||||
logger.info('this should be: %s%s' % (lastissue_label, sf))
|
||||
pi = []
|
||||
for x in possible_issuenumbers:
|
||||
if (x['number'] == lastissue_label and x['position'] == lastissue_position) or (x['number'] == sf and x['position'] == split_file.index(sf, lastissue_position)):
|
||||
pass
|
||||
else:
|
||||
pi.append({'number': x['number'],
|
||||
'position': x['position'],
|
||||
'mod_position': x['mod_position'],
|
||||
'validcountchk': x['validcountchk']})
|
||||
|
||||
lastissue_label = '%s%s' % (lastissue_label, sf)
|
||||
pi.append({'number': lastissue_label,
|
||||
'position': lastissue_position,
|
||||
'mod_position': lastmod_position,
|
||||
'validcountchk': validcountchk})
|
||||
|
||||
if len(pi) > 0:
|
||||
possible_issuenumbers = pi
|
||||
|
||||
elif sf.lower() == 'of' and lastissue_label is not None and lastissue_position == int(split_file.index(sf))-1:
|
||||
logger.info('MINI-SERIES DETECTED')
|
||||
|
||||
else:
|
||||
if any([re.sub('[\(\)]', '', sf.lower()).strip() == 'tpb', re.sub('[\(\)]', '', sf.lower()).strip() == 'digital tpb']):
|
||||
logger.info('TRADE PAPERBACK DETECTED. NOT DETECTING ISSUE NUMBER - ASSUMING VOLUME')
|
||||
booktype = 'TPB'
|
||||
try:
|
||||
if volume_found['volume'] is not None:
|
||||
possible_issuenumbers.append({'number': volume_found['volume'],
|
||||
'position': volume_found['position'],
|
||||
'mod_position': self.char_file_position(modfilename, volume_found['volume'], lastmod_position),
|
||||
'validcountchk': validcountchk})
|
||||
except:
|
||||
possible_issuenumbers.append({'number': '1',
|
||||
'position': split_file.index(sf, lastissue_position), #modfilename.find(sf)})
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
'validcountchk': validcountchk})
|
||||
|
||||
elif any([sf.lower() == 'gn', sf.lower() == 'graphic novel']):
|
||||
logger.info('GRAPHIC NOVEL DETECTED. NOT DETECTING ISSUE NUMBER - ASSUMING VOLUME')
|
||||
booktype = 'GN'
|
||||
else:
|
||||
if 'could not convert string to float' not in str(e):
|
||||
logger.fdebug('[%s] Error detecting issue # - ignoring this result : %s' % (e, sf))
|
||||
|
||||
volumeprior = False
|
||||
volumeprior_label = None
|
||||
sep_volume = False
|
||||
pass
|
||||
#logger.fdebug('Error detecting issue # - ignoring this result : ' + str(sf))
|
||||
|
||||
#keep track of where in the original modfilename the positions are in order to check against it for decimal places, etc.
|
||||
file_length += len(sf) + 1 #1 for space
|
||||
|
@ -742,13 +843,19 @@ class FileChecker(object):
|
|||
issue_number_position -=1
|
||||
|
||||
if issue_number is None:
|
||||
logger.fdebug('No issue number present in filename.')
|
||||
if any([booktype == 'TPB', booktype == 'GN']):
|
||||
logger.info('%s detected. Volume assumption is number: %s' % (booktype, volume_found))
|
||||
else:
|
||||
if len(volume_found) > 0:
|
||||
logger.info('UNKNOWN TPB/GN detected. Volume assumption is number: %s' % (volume_found))
|
||||
else:
|
||||
logger.info('No issue number present in filename.')
|
||||
else:
|
||||
logger.fdebug('issue verified as : ' + issue_number)
|
||||
issue_volume = None
|
||||
if len(volume_found) > 0:
|
||||
issue_volume = 'v' + str(volume_found['volume'])
|
||||
if all([highest_series_pos + 1 != volume_found['position'], highest_series_pos != volume_found['position'] + 1, sep_volume == False]):
|
||||
if all([highest_series_pos + 1 != volume_found['position'], highest_series_pos != volume_found['position'] + 1, sep_volume == False, booktype == 'issue', len(possible_issuenumbers) > 0]):
|
||||
logger.fdebug('Extra item(s) are present between the volume label and the issue number. Checking..')
|
||||
split_file.insert(int(issue_number_position), split_file.pop(volume_found['position'])) #highest_series_pos-1, split_file.pop(volume_found['position']))
|
||||
logger.fdebug('new split: ' + str(split_file))
|
||||
|
@ -761,10 +868,13 @@ class FileChecker(object):
|
|||
else:
|
||||
highest_series_pos = volume_found['position']
|
||||
logger.fdebug('Volume detected as : ' + issue_volume)
|
||||
|
||||
if all([len(volume_found) == 0, booktype != 'issue']) or all([len(volume_found) == 0, issue_number_position == len(split_file)]):
|
||||
issue_volume = 'v1'
|
||||
|
||||
#at this point it should be in a SERIES ISSUE VOLUME YEAR kind of format
|
||||
#if the position of the issue number is greater than the highest series position, make it the highest series position.
|
||||
if issue_number_position > highest_series_pos:
|
||||
if issue_number_position != len(split_file) and issue_number_position > highest_series_pos:
|
||||
if not volume_found:
|
||||
highest_series_pos = issue_number_position
|
||||
else:
|
||||
|
@ -807,16 +917,13 @@ class FileChecker(object):
|
|||
tmpval = yearposition - issue_number_position
|
||||
else:
|
||||
tmpval = 1
|
||||
#logger.fdebug('TMPVAL: %s' % tmpval)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
if tmpval > 2:
|
||||
logger.fdebug('There are %s extra words between the issue # and the year position. Deciphering if issue title or part of series title.' % tmpval)
|
||||
tmpval1 = ' '.join(split_file[issue_number_position+1:yearposition])
|
||||
#logger.info('%s' % tmpval)
|
||||
if split_file[issue_number_position+1] == '-':
|
||||
#logger.info('dash detected')
|
||||
usevalue = ' '.join(split_file[issue_number_position+2:yearposition])
|
||||
splitv = split_file[issue_number_position+2:yearposition]
|
||||
else:
|
||||
|
@ -904,8 +1011,15 @@ class FileChecker(object):
|
|||
if splitvalue is not None:
|
||||
logger.fdebug('[SPLITVALUE] possible issue title: %s' % splitvalue)
|
||||
alt_series = '%s %s' % (series_name, splitvalue)
|
||||
alt_issue = splitvalue
|
||||
if booktype != 'issue':
|
||||
if alt_issue is not None:
|
||||
alt_issue = re.sub('tpb', '', splitvalue, flags=re.I).strip()
|
||||
if alt_series is not None:
|
||||
alt_series = re.sub('tpb', '', alt_series, flags=re.I).strip()
|
||||
if alt_series is not None:
|
||||
if booktype != 'issue':
|
||||
if alt_series is not None:
|
||||
alt_series = re.sub('tpb', '', alt_series, flags=re.I).strip()
|
||||
logger.fdebug('Alternate series / issue title: %s [%s]' % (alt_series, alt_issue))
|
||||
|
||||
#if the filename is unicoded, it won't match due to the unicode translation. Keep the unicode as well as the decoded.
|
||||
|
@ -921,28 +1035,33 @@ class FileChecker(object):
|
|||
series_name = re.sub('special', '', series_name, flags=re.I).strip()
|
||||
series_name_decoded = re.sub('special', '', series_name_decoded, flags=re.I).strip()
|
||||
|
||||
if issue_number is None or series_name is None:
|
||||
logger.fdebug('Cannot parse the filename properly. I\'m going to make note of this filename so that my evil ruler can make it work.')
|
||||
if series_name is not None:
|
||||
dreplace = self.dynamic_replace(series_name)['mod_seriesname']
|
||||
if (any([issue_number is None, series_name is None]) and booktype == 'issue'):
|
||||
|
||||
if all([issue_number is None, booktype == 'issue', issue_volume is not None]):
|
||||
logger.info('Possible UKNOWN TPB/GN detected - no issue number present, no clarification in filename, but volume present with series title')
|
||||
else:
|
||||
dreplace = None
|
||||
return {'parse_status': 'failure',
|
||||
'sub': path_list,
|
||||
'comicfilename': filename,
|
||||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'dynamic_name': dreplace,
|
||||
'issue_number': issue_number,
|
||||
'justthedigits': issue_number, #redundant but it's needed atm
|
||||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'annual_comicid': None,
|
||||
'scangroup': scangroup,
|
||||
'reading_order': None}
|
||||
logger.fdebug('Cannot parse the filename properly. I\'m going to make note of this filename so that my evil ruler can make it work.')
|
||||
|
||||
if series_name is not None:
|
||||
dreplace = self.dynamic_replace(series_name)['mod_seriesname']
|
||||
else:
|
||||
dreplace = None
|
||||
return {'parse_status': 'failure',
|
||||
'sub': path_list,
|
||||
'comicfilename': filename,
|
||||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'dynamic_name': dreplace,
|
||||
'issue_number': issue_number,
|
||||
'justthedigits': issue_number, #redundant but it's needed atm
|
||||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'annual_comicid': None,
|
||||
'scangroup': scangroup,
|
||||
'reading_order': None}
|
||||
|
||||
if self.justparse:
|
||||
return {'parse_status': 'success',
|
||||
|
@ -962,17 +1081,18 @@ class FileChecker(object):
|
|||
'reading_order': reading_order}
|
||||
|
||||
series_info = {}
|
||||
series_info = {'sub': path_list,
|
||||
'comicfilename': filename,
|
||||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup}
|
||||
series_info = {'sub': path_list,
|
||||
'type': re.sub('\.','', filetype).strip(),
|
||||
'comicfilename': filename,
|
||||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup}
|
||||
|
||||
return self.matchIT(series_info)
|
||||
|
||||
|
@ -1029,7 +1149,9 @@ class FileChecker(object):
|
|||
|
||||
if mylar.CONFIG.ANNUALS_ON and 'special' not in nspace_watchcomic.lower():
|
||||
if 'special' in series_name.lower():
|
||||
justthedigits = 'Special ' + series_info['issue_number']
|
||||
justthedigits = 'Special'
|
||||
if series_info['issue_number'] is not None:
|
||||
justthedigits += ' %s' % series_info['issue_number']
|
||||
nspace_seriesname = re.sub('special', '', nspace_seriesname.lower()).strip()
|
||||
nspace_seriesname_decoded = re.sub('special', '', nspace_seriesname_decoded.lower()).strip()
|
||||
if alt_series is not None and 'special' in alt_series.lower():
|
||||
|
@ -1182,7 +1304,7 @@ class FileChecker(object):
|
|||
tcrc = helpers.crc(os.path.join(dirname, fname).decode(mylar.SYS_ENCODING))
|
||||
crcchk = [x for x in pp_crclist if tcrc == x['crc']]
|
||||
if crcchk:
|
||||
#logger.fdebug('%s Already post-processed this item %s - Ignoring' % fname)
|
||||
#logger.fdebug('[FILECHECKEER] Already post-processed this item %s - Ignoring' % fname)
|
||||
continue
|
||||
|
||||
if os.path.splitext(fname)[1].lower().endswith(comic_ext):
|
||||
|
@ -1233,6 +1355,7 @@ class FileChecker(object):
|
|||
mod_watchcomic = mod_watchcomic[:wd] + spacer + mod_watchcomic[wd+len(wdrm):]
|
||||
|
||||
series_name = re.sub(u'\u2014', ' - ', series_name)
|
||||
series_name = re.sub(u'\u2013', ' - ', series_name)
|
||||
seriesdynamic_handlers_match = [x for x in self.dynamic_handlers if x.lower() in series_name.lower()]
|
||||
#logger.fdebug('series dynamic handlers recognized : ' + str(seriesdynamic_handlers_match))
|
||||
seriesdynamic_replacements_match = [x for x in self.dynamic_replacements if x.lower() in series_name.lower()]
|
||||
|
@ -1323,7 +1446,7 @@ class FileChecker(object):
|
|||
return {'AS_Alt': AS_Alt,
|
||||
'AS_Tuple': AS_Tuple}
|
||||
|
||||
def checkthedate(self, txt):
|
||||
def checkthedate(self, txt, fulldate=False, cnt=0):
|
||||
# txt='''\
|
||||
# Jan 19, 1990
|
||||
# January 19, 1990
|
||||
|
@ -1334,17 +1457,51 @@ class FileChecker(object):
|
|||
# Jan 1990
|
||||
# January1990'''
|
||||
|
||||
fmts = ('%Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','%b %Y','%B%Y','%b %d,%Y','%m-%Y','%B %Y','%Y-%m-%d','%Y-%m','%Y%m')
|
||||
|
||||
fmts = ('%Y','%b %d, %Y','%B %d, %Y','%B %d %Y','%m/%d/%Y','%m/%d/%y','(%m/%d/%Y)','%b %Y','%B%Y','%b %d,%Y','%m-%Y','%B %Y','%Y-%m-%d','%Y-%m','%Y%m')
|
||||
mnths = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
|
||||
parsed=[]
|
||||
for e in txt.splitlines():
|
||||
for fmt in fmts:
|
||||
try:
|
||||
t = dt.datetime.strptime(e, fmt)
|
||||
parsed.append((e, fmt, t))
|
||||
break
|
||||
except ValueError as err:
|
||||
pass
|
||||
|
||||
if fulldate is False:
|
||||
for e in txt.splitlines():
|
||||
for fmt in fmts:
|
||||
try:
|
||||
t = dt.datetime.strptime(e, fmt)
|
||||
parsed.append((e, fmt, t))
|
||||
break
|
||||
except ValueError as err:
|
||||
pass
|
||||
else:
|
||||
for e in txt.split():
|
||||
if cnt == 0:
|
||||
for x in mnths:
|
||||
mnth = re.sub('\.', '', e.lower())
|
||||
if x.lower() in mnth and len(mnth) <= 4:
|
||||
add_date = x + ' '
|
||||
cnt+=1
|
||||
break
|
||||
|
||||
elif cnt == 1:
|
||||
issnumb = re.sub(',', '', e).strip()
|
||||
if issnumb.isdigit() and int(issnumb) < 31:
|
||||
add_date += issnumb + ', '
|
||||
cnt+=1
|
||||
elif cnt == 2:
|
||||
possyear = helpers.cleanhtml(re.sub('\.', '', e).strip())
|
||||
if possyear.isdigit() and int(possyear) > 1970 and int(possyear) < 2020:
|
||||
add_date += possyear
|
||||
cnt +=1
|
||||
if cnt == 3:
|
||||
return self.checkthedate(add_date, fulldate=False, cnt=-1)
|
||||
|
||||
|
||||
if cnt <= 0:
|
||||
for fmt in fmts:
|
||||
try:
|
||||
t = dt.datetime.strptime(e, fmt)
|
||||
parsed.append((e, fmt, t))
|
||||
break
|
||||
except ValueError as err:
|
||||
pass
|
||||
|
||||
# check that all the cases are handled
|
||||
success={t[0] for t in parsed}
|
||||
|
@ -1352,14 +1509,19 @@ class FileChecker(object):
|
|||
if e not in success:
|
||||
pass #print e
|
||||
|
||||
dateyear = None
|
||||
dateline = None
|
||||
|
||||
#logger.info('parsed: %s' % parsed)
|
||||
|
||||
for t in parsed:
|
||||
# logger.fdebug('"{:20}" => "{:20}" => {}'.format(*t)
|
||||
dateyear = t[2].year
|
||||
#logger.fdebug('"{:20}" => "{:20}" => {}'.format(*t))
|
||||
if fulldate is False and cnt != -1:
|
||||
dateline = t[2].year
|
||||
else:
|
||||
dateline = t[2].strftime('%Y-%m-%d')
|
||||
break
|
||||
|
||||
return dateyear
|
||||
return dateline
|
||||
|
||||
def validateAndCreateDirectory(dir, create=False, module=None):
|
||||
if module is None:
|
||||
|
|
|
@ -0,0 +1,560 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Mylar.
|
||||
#
|
||||
# Mylar is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Mylar is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
import os
|
||||
import mylar
|
||||
from mylar import helpers, db, logger
|
||||
|
||||
class FileHandlers(object):
|
||||
|
||||
def __init__(self, comic=None, issue=None, ComicID=None, IssueID=None):
|
||||
|
||||
self.myDB = db.DBConnection()
|
||||
if ComicID is not None:
|
||||
self.comicid = ComicID
|
||||
self.comic = self.myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
|
||||
elif comic is not None:
|
||||
self.comic = comic
|
||||
self.comicid = None
|
||||
else:
|
||||
self.comic = None
|
||||
self.comicid = None
|
||||
|
||||
if IssueID is not None:
|
||||
self.issueid = IssueID
|
||||
self.issue = self.myDB.select('SELECT * FROM issues WHERE IssueID=?', [IssueID])
|
||||
elif issue is not None:
|
||||
self.issue = issue
|
||||
self.issueid = None
|
||||
else:
|
||||
self.issue = None
|
||||
self.issueid = None
|
||||
|
||||
def folder_create(self, booktype=None):
|
||||
# dictionary needs to passed called comic with {'ComicPublisher', 'CorrectedType, 'Type', 'ComicYear', 'ComicName', 'ComicVersion'}
|
||||
# or pass in comicid value from __init__
|
||||
|
||||
# setup default location here
|
||||
u_comicnm = self.comic['ComicName']
|
||||
# let's remove the non-standard characters here that will break filenaming / searching.
|
||||
comicname_filesafe = helpers.filesafe(u_comicnm)
|
||||
comicdir = comicname_filesafe
|
||||
|
||||
series = comicdir
|
||||
if series[-1:] == '.':
|
||||
series[:-1]
|
||||
|
||||
publisher = re.sub('!', '', self.comic['ComicPublisher']) # thanks Boom!
|
||||
publisher = helpers.filesafe(publisher)
|
||||
|
||||
if booktype is not None:
|
||||
if self.comic['Corrected_Type'] is not None:
|
||||
booktype = self.comic['Corrected_Type']
|
||||
else:
|
||||
booktype = booktype
|
||||
else:
|
||||
booktype = self.comic['Type']
|
||||
|
||||
if any([booktype is None, booktype == 'None', booktype == 'Print']) or all([booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]):
|
||||
chunk_fb = re.sub('\$Type', '', mylar.CONFIG.FOLDER_FORMAT)
|
||||
chunk_b = re.compile(r'\s+')
|
||||
chunk_folder_format = chunk_b.sub(' ', chunk_fb)
|
||||
else:
|
||||
chunk_folder_format = mylar.CONFIG.FOLDER_FORMAT
|
||||
|
||||
if any([self.comic['ComicVersion'] is None, booktype != 'Print']):
|
||||
comicVol = 'None'
|
||||
else:
|
||||
comicVol = self.comic['ComicVersion']
|
||||
|
||||
#if comversion is None, remove it so it doesn't populate with 'None'
|
||||
if comicVol == 'None':
|
||||
chunk_f_f = re.sub('\$VolumeN', '', chunk_folder_format)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_folder_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug('No version # found for series, removing from folder format')
|
||||
logger.fdebug("new folder format: " + str(chunk_folder_format))
|
||||
|
||||
#do work to generate folder path
|
||||
values = {'$Series': series,
|
||||
'$Publisher': publisher,
|
||||
'$Year': self.comic['ComicYear'],
|
||||
'$series': series.lower(),
|
||||
'$publisher': publisher.lower(),
|
||||
'$VolumeY': 'V' + self.comic['ComicYear'],
|
||||
'$VolumeN': comicVol.upper(),
|
||||
'$Annual': 'Annual',
|
||||
'$Type': booktype
|
||||
}
|
||||
try:
|
||||
if mylar.CONFIG.FOLDER_FORMAT == '':
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + comic['SeriesYear'] + ")")
|
||||
else:
|
||||
chunk_folder_format = re.sub('[()|[]]', '', chunk_folder_format).strip()
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values))
|
||||
|
||||
except Exception as e:
|
||||
if 'TypeError' in e:
|
||||
if mylar.CONFIG.DESTINATION_DIR is None:
|
||||
logger.error('[ERROR] %s' % e)
|
||||
logger.error('No Comic Location specified. This NEEDS to be set before anything can be added successfully.')
|
||||
return
|
||||
logger.error('[ERROR] %s' % e)
|
||||
logger.error('Cannot determine Comic Location path properly. Check your Comic Location and Folder Format for any errors.')
|
||||
return
|
||||
|
||||
if mylar.CONFIG.DESTINATION_DIR == "":
|
||||
logger.error('There is no Comic Location Path specified - please specify one in Config/Web Interface.')
|
||||
return
|
||||
|
||||
#enforce proper slashes here..
|
||||
cnt1 = comlocation.count('\\')
|
||||
cnt2 = comlocation.count('/')
|
||||
if cnt1 > cnt2 and '/' in chunk_folder_format:
|
||||
comlocation = re.sub('/', '\\', comlocation)
|
||||
|
||||
if mylar.CONFIG.REPLACE_SPACES:
|
||||
#mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
|
||||
comlocation = comlocation.replace(' ', mylar.CONFIG.REPLACE_CHAR)
|
||||
|
||||
return comlocation
|
||||
|
||||
def rename_file(self, ofilename, issue=None, annualize=None, arc=False, file_format=None): #comicname, issue, comicyear=None, issueid=None)
|
||||
comicid = self.comicid # it's coming in unicoded...
|
||||
issueid = self.issueid
|
||||
|
||||
if file_format is None:
|
||||
file_format = mylar.CONFIG.FILE_FORMAT
|
||||
|
||||
logger.fdebug(type(comicid))
|
||||
logger.fdebug(type(issueid))
|
||||
logger.fdebug('comicid: %s' % comicid)
|
||||
logger.fdebug('issue# as per cv: %s' % issue)
|
||||
logger.fdebug('issueid:' + str(issueid))
|
||||
|
||||
if issueid is None:
|
||||
logger.fdebug('annualize is ' + str(annualize))
|
||||
if arc:
|
||||
#this has to be adjusted to be able to include story arc issues that span multiple arcs
|
||||
chkissue = self.myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
|
||||
else:
|
||||
chkissue = self.myDB.selectone("SELECT * from issues WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
|
||||
if all([chkissue is None, annualize is None, not mylar.CONFIG.ANNUALS_ON]):
|
||||
chkissue = self.myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
|
||||
|
||||
if chkissue is None:
|
||||
#rechk chkissue against int value of issue #
|
||||
if arc:
|
||||
chkissue = self.myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [comicid, issuedigits(issue)]).fetchone()
|
||||
else:
|
||||
chkissue = self.myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [comicid, issuedigits(issue)]).fetchone()
|
||||
if all([chkissue is None, annualize == 'yes', mylar.CONFIG.ANNUALS_ON]):
|
||||
chkissue = self.myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [comicid, issuedigits(issue)]).fetchone()
|
||||
|
||||
if chkissue is None:
|
||||
logger.error('Invalid Issue_Number - please validate.')
|
||||
return
|
||||
else:
|
||||
logger.info('Int Issue_number compare found. continuing...')
|
||||
issueid = chkissue['IssueID']
|
||||
else:
|
||||
issueid = chkissue['IssueID']
|
||||
|
||||
#use issueid to get publisher, series, year, issue number
|
||||
logger.fdebug('issueid is now : ' + str(issueid))
|
||||
if arc:
|
||||
issueinfo = self.myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND IssueID=? AND StoryArc=?", [comicid, issueid, arc]).fetchone()
|
||||
else:
|
||||
issueinfo = self.myDB.selectone("SELECT * from issues WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
|
||||
if issueinfo is None:
|
||||
logger.fdebug('not an issue, checking against annuals')
|
||||
issueinfo = self.myDB.selectone("SELECT * from annuals WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
|
||||
if issueinfo is None:
|
||||
logger.fdebug('Unable to rename - cannot locate issue id within db')
|
||||
return
|
||||
else:
|
||||
annualize = True
|
||||
|
||||
if issueinfo is None:
|
||||
logger.fdebug('Unable to rename - cannot locate issue id within db')
|
||||
return
|
||||
|
||||
#remap the variables to a common factor.
|
||||
if arc:
|
||||
issuenum = issueinfo['IssueNumber']
|
||||
issuedate = issueinfo['IssueDate']
|
||||
publisher = issueinfo['IssuePublisher']
|
||||
series = issueinfo['ComicName']
|
||||
seriesfilename = series #Alternate FileNaming is not available with story arcs.
|
||||
seriesyear = issueinfo['SeriesYear']
|
||||
arcdir = helpers.filesafe(issueinfo['StoryArc'])
|
||||
if mylar.CONFIG.REPLACE_SPACES:
|
||||
arcdir = arcdir.replace(' ', mylar.CONFIG.REPLACE_CHAR)
|
||||
if mylar.CONFIG.STORYARCDIR:
|
||||
storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR, "StoryArcs", arcdir)
|
||||
logger.fdebug('Story Arc Directory set to : ' + storyarcd)
|
||||
else:
|
||||
logger.fdebug('Story Arc Directory set to : ' + mylar.CONFIG.GRABBAG_DIR)
|
||||
storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR, mylar.CONFIG.GRABBAG_DIR)
|
||||
|
||||
comlocation = storyarcd
|
||||
comversion = None #need to populate this.
|
||||
|
||||
else:
|
||||
issuenum = issueinfo['Issue_Number']
|
||||
issuedate = issueinfo['IssueDate']
|
||||
publisher = self.comic['ComicPublisher']
|
||||
series = self.comic['ComicName']
|
||||
if self.comic['AlternateFileName'] is None or self.comic['AlternateFileName'] == 'None':
|
||||
seriesfilename = series
|
||||
else:
|
||||
seriesfilename = self.comic['AlternateFileName']
|
||||
logger.fdebug('Alternate File Naming has been enabled for this series. Will rename series title to : ' + seriesfilename)
|
||||
seriesyear = self.comic['ComicYear']
|
||||
comlocation = self.comic['ComicLocation']
|
||||
comversion = self.comic['ComicVersion']
|
||||
|
||||
unicodeissue = issuenum
|
||||
|
||||
if type(issuenum) == unicode:
|
||||
vals = {u'\xbd':'.5',u'\xbc':'.25',u'\xbe':'.75',u'\u221e':'9999999999',u'\xe2':'9999999999'}
|
||||
else:
|
||||
vals = {'\xbd':'.5','\xbc':'.25','\xbe':'.75','\u221e':'9999999999','\xe2':'9999999999'}
|
||||
x = [vals[key] for key in vals if key in issuenum]
|
||||
if x:
|
||||
issuenum = x[0]
|
||||
logger.fdebug('issue number formatted: %s' % issuenum)
|
||||
|
||||
#comicid = issueinfo['ComicID']
|
||||
#issueno = str(issuenum).split('.')[0]
|
||||
issue_except = 'None'
|
||||
issue_exceptions = ['AU',
|
||||
'INH',
|
||||
'NOW',
|
||||
'AI',
|
||||
'MU',
|
||||
'A',
|
||||
'B',
|
||||
'C',
|
||||
'X',
|
||||
'O']
|
||||
valid_spaces = ('.', '-')
|
||||
for issexcept in issue_exceptions:
|
||||
if issexcept.lower() in issuenum.lower():
|
||||
logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
|
||||
v_chk = [v for v in valid_spaces if v in issuenum]
|
||||
if v_chk:
|
||||
iss_space = v_chk[0]
|
||||
logger.fdebug('character space denoted as : ' + iss_space)
|
||||
else:
|
||||
logger.fdebug('character space not denoted.')
|
||||
iss_space = ''
|
||||
# if issexcept == 'INH':
|
||||
# issue_except = '.INH'
|
||||
if issexcept == 'NOW':
|
||||
if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
|
||||
# issue_except = '.NOW'
|
||||
|
||||
issue_except = iss_space + issexcept
|
||||
logger.fdebug('issue_except denoted as : ' + issue_except)
|
||||
issuenum = re.sub("[^0-9]", "", issuenum)
|
||||
break
|
||||
|
||||
# if 'au' in issuenum.lower() and issuenum[:1].isdigit():
|
||||
# issue_except = ' AU'
|
||||
# elif 'ai' in issuenum.lower() and issuenum[:1].isdigit():
|
||||
# issuenum = re.sub("[^0-9]", "", issuenum)
|
||||
# issue_except = ' AI'
|
||||
# elif 'inh' in issuenum.lower() and issuenum[:1].isdigit():
|
||||
# issuenum = re.sub("[^0-9]", "", issuenum)
|
||||
# issue_except = '.INH'
|
||||
# elif 'now' in issuenum.lower() and issuenum[:1].isdigit():
|
||||
# if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
|
||||
# issuenum = re.sub("[^0-9]", "", issuenum)
|
||||
# issue_except = '.NOW'
|
||||
if '.' in issuenum:
|
||||
iss_find = issuenum.find('.')
|
||||
iss_b4dec = issuenum[:iss_find]
|
||||
if iss_find == 0:
|
||||
iss_b4dec = '0'
|
||||
iss_decval = issuenum[iss_find +1:]
|
||||
if iss_decval.endswith('.'):
|
||||
iss_decval = iss_decval[:-1]
|
||||
if int(iss_decval) == 0:
|
||||
iss = iss_b4dec
|
||||
issdec = int(iss_decval)
|
||||
issueno = iss
|
||||
else:
|
||||
if len(iss_decval) == 1:
|
||||
iss = iss_b4dec + "." + iss_decval
|
||||
issdec = int(iss_decval) * 10
|
||||
else:
|
||||
iss = iss_b4dec + "." + iss_decval.rstrip('0')
|
||||
issdec = int(iss_decval.rstrip('0')) * 10
|
||||
issueno = iss_b4dec
|
||||
else:
|
||||
iss = issuenum
|
||||
issueno = iss
|
||||
# issue zero-suppression here
|
||||
if mylar.CONFIG.ZERO_LEVEL == "0":
|
||||
zeroadd = ""
|
||||
else:
|
||||
if mylar.CONFIG.ZERO_LEVEL_N == "none": zeroadd = ""
|
||||
elif mylar.CONFIG.ZERO_LEVEL_N == "0x": zeroadd = "0"
|
||||
elif mylar.CONFIG.ZERO_LEVEL_N == "00x": zeroadd = "00"
|
||||
|
||||
logger.fdebug('Zero Suppression set to : ' + str(mylar.CONFIG.ZERO_LEVEL_N))
|
||||
prettycomiss = None
|
||||
|
||||
if issueno.isalpha():
|
||||
logger.fdebug('issue detected as an alpha.')
|
||||
prettycomiss = str(issueno)
|
||||
else:
|
||||
try:
|
||||
x = float(issuenum)
|
||||
#validity check
|
||||
if x < 0:
|
||||
logger.info('I\'ve encountered a negative issue #: %s. Trying to accomodate.' % issueno)
|
||||
prettycomiss = '-' + str(zeroadd) + str(issueno[1:])
|
||||
elif x == 9999999999:
|
||||
logger.fdebug('Infinity issue found.')
|
||||
issuenum = 'infinity'
|
||||
elif x >= 0:
|
||||
pass
|
||||
else:
|
||||
raise ValueError
|
||||
except ValueError, e:
|
||||
logger.warn('Unable to properly determine issue number [ %s] - you should probably log this on github for help.' % issueno)
|
||||
return
|
||||
|
||||
if prettycomiss is None and len(str(issueno)) > 0:
|
||||
#if int(issueno) < 0:
|
||||
# self._log("issue detected is a negative")
|
||||
# prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
|
||||
if int(issueno) < 10:
|
||||
logger.fdebug('issue detected less than 10')
|
||||
if '.' in iss:
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(int(issueno))
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
if issue_except != 'None':
|
||||
prettycomiss = str(prettycomiss) + issue_except
|
||||
logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
|
||||
elif int(issueno) >= 10 and int(issueno) < 100:
|
||||
logger.fdebug('issue detected greater than 10, but less than 100')
|
||||
if mylar.CONFIG.ZERO_LEVEL_N == "none":
|
||||
zeroadd = ""
|
||||
else:
|
||||
zeroadd = "0"
|
||||
if '.' in iss:
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(int(issueno))
|
||||
else:
|
||||
prettycomiss = str(zeroadd) + str(iss)
|
||||
if issue_except != 'None':
|
||||
prettycomiss = str(prettycomiss) + issue_except
|
||||
logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '.Issue will be set as : ' + str(prettycomiss))
|
||||
else:
|
||||
logger.fdebug('issue detected greater than 100')
|
||||
if issuenum == 'infinity':
|
||||
prettycomiss = 'infinity'
|
||||
else:
|
||||
if '.' in iss:
|
||||
if int(iss_decval) > 0:
|
||||
issueno = str(iss)
|
||||
prettycomiss = str(issueno)
|
||||
if issue_except != 'None':
|
||||
prettycomiss = str(prettycomiss) + issue_except
|
||||
logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
|
||||
elif len(str(issueno)) == 0:
|
||||
prettycomiss = str(issueno)
|
||||
logger.fdebug('issue length error - cannot determine length. Defaulting to None: ' + str(prettycomiss))
|
||||
|
||||
logger.fdebug('Pretty Comic Issue is : ' + str(prettycomiss))
|
||||
if mylar.CONFIG.UNICODE_ISSUENUMBER:
|
||||
logger.fdebug('Setting this to Unicode format as requested: %s' % prettycomiss)
|
||||
prettycomiss = unicodeissue
|
||||
|
||||
issueyear = issuedate[:4]
|
||||
month = issuedate[5:7].replace('-', '').strip()
|
||||
month_name = helpers.fullmonth(month)
|
||||
if month_name is None:
|
||||
month_name = 'None'
|
||||
logger.fdebug('Issue Year : ' + str(issueyear))
|
||||
logger.fdebug('Publisher: ' + publisher)
|
||||
logger.fdebug('Series: ' + series)
|
||||
logger.fdebug('Year: ' + str(seriesyear))
|
||||
logger.fdebug('Comic Location: ' + comlocation)
|
||||
|
||||
if self.comic['Corrected_Type'] is not None:
|
||||
if self.comic['Type'] != self.comic['Corrected_Type']:
|
||||
booktype = self.comic['Corrected_Type']
|
||||
else:
|
||||
booktype = self.comic['Type']
|
||||
else:
|
||||
booktype = self.comic['Type']
|
||||
|
||||
if booktype == 'Print' or all([booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]):
|
||||
chunk_fb = re.sub('\$Type', '', file_format)
|
||||
chunk_b = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_b.sub(' ', chunk_fb)
|
||||
else:
|
||||
chunk_file_format = file_format
|
||||
|
||||
if any([comversion is None, booktype != 'Print']):
|
||||
comversion = 'None'
|
||||
|
||||
#if comversion is None, remove it so it doesn't populate with 'None'
|
||||
if comversion == 'None':
|
||||
chunk_f_f = re.sub('\$VolumeN', '', chunk_file_format)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug('No version # found for series, removing from filename')
|
||||
logger.fdebug("new format: " + str(chunk_file_format))
|
||||
|
||||
if annualize is None:
|
||||
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug('not an annual - removing from filename paramaters')
|
||||
logger.fdebug('new format: ' + str(chunk_file_format))
|
||||
|
||||
else:
|
||||
logger.fdebug('chunk_file_format is: ' + str(chunk_file_format))
|
||||
if mylar.CONFIG.ANNUALS_ON:
|
||||
if 'annual' in series.lower():
|
||||
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
|
||||
#if it's an annual, but $annual isn't specified in file_format, we need to
|
||||
#force it in there, by default in the format of $Annual $Issue
|
||||
#prettycomiss = "Annual " + str(prettycomiss)
|
||||
logger.fdebug('[%s][ANNUALS-ON][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
else:
|
||||
#because it exists within title, strip it then use formatting tag for placement of wording.
|
||||
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug('[%s][ANNUALS-ON][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
else:
|
||||
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
|
||||
#if it's an annual, but $annual isn't specified in file_format, we need to
|
||||
#force it in there, by default in the format of $Annual $Issue
|
||||
prettycomiss = "Annual %s" % prettycomiss
|
||||
logger.fdebug('[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
else:
|
||||
logger.fdebug('[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
|
||||
else:
|
||||
#if annuals aren't enabled, then annuals are being tracked as independent series.
|
||||
#annualize will be true since it's an annual in the seriesname.
|
||||
if 'annual' in series.lower():
|
||||
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
|
||||
#if it's an annual, but $annual isn't specified in file_format, we need to
|
||||
#force it in there, by default in the format of $Annual $Issue
|
||||
#prettycomiss = "Annual " + str(prettycomiss)
|
||||
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' (series, prettycomiss))
|
||||
else:
|
||||
#because it exists within title, strip it then use formatting tag for placement of wording.
|
||||
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
else:
|
||||
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
|
||||
#if it's an annual, but $annual isn't specified in file_format, we need to
|
||||
#force it in there, by default in the format of $Annual $Issue
|
||||
prettycomiss = "Annual %s" % prettycomiss
|
||||
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
else:
|
||||
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
|
||||
|
||||
|
||||
logger.fdebug('Annual detected within series title of ' + series + '. Not auto-correcting issue #')
|
||||
|
||||
seriesfilename = seriesfilename.encode('ascii', 'ignore').strip()
|
||||
filebad = [':', ',', '/', '?', '!', '\'', '\"', '\*'] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
|
||||
for dbd in filebad:
|
||||
if dbd in seriesfilename:
|
||||
if any([dbd == '/', dbd == '*']):
|
||||
repthechar = '-'
|
||||
else:
|
||||
repthechar = ''
|
||||
seriesfilename = seriesfilename.replace(dbd, repthechar)
|
||||
logger.fdebug('Altering series name due to filenaming restrictions: ' + seriesfilename)
|
||||
|
||||
publisher = re.sub('!', '', publisher)
|
||||
|
||||
file_values = {'$Series': seriesfilename,
|
||||
'$Issue': prettycomiss,
|
||||
'$Year': issueyear,
|
||||
'$series': series.lower(),
|
||||
'$Publisher': publisher,
|
||||
'$publisher': publisher.lower(),
|
||||
'$VolumeY': 'V' + str(seriesyear),
|
||||
'$VolumeN': comversion,
|
||||
'$monthname': month_name,
|
||||
'$month': month,
|
||||
'$Annual': 'Annual',
|
||||
'$Type': booktype
|
||||
}
|
||||
|
||||
extensions = ('.cbr', '.cbz', '.cb7')
|
||||
|
||||
if ofilename.lower().endswith(extensions):
|
||||
path, ext = os.path.splitext(ofilename)
|
||||
|
||||
if file_format == '':
|
||||
logger.fdebug('Rename Files is not enabled - keeping original filename.')
|
||||
#check if extension is in nzb_name - will screw up otherwise
|
||||
if ofilename.lower().endswith(extensions):
|
||||
nfilename = ofilename[:-4]
|
||||
else:
|
||||
nfilename = ofilename
|
||||
else:
|
||||
chunk_file_format = re.sub('[()|[]]', '', chunk_file_format).strip()
|
||||
nfilename = helpers.replace_all(chunk_file_format, file_values)
|
||||
if mylar.CONFIG.REPLACE_SPACES:
|
||||
#mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
|
||||
nfilename = nfilename.replace(' ', mylar.CONFIG.REPLACE_CHAR)
|
||||
|
||||
nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower()
|
||||
logger.fdebug('New Filename: ' + nfilename)
|
||||
|
||||
if mylar.CONFIG.LOWERCASE_FILENAMES:
|
||||
nfilename = nfilename.lower()
|
||||
dst = os.path.join(comlocation, nfilename)
|
||||
else:
|
||||
dst = os.path.join(comlocation, nfilename)
|
||||
|
||||
logger.fdebug('Source: ' + ofilename)
|
||||
logger.fdebug('Destination: ' + dst)
|
||||
|
||||
rename_this = {"destination_dir": dst,
|
||||
"nfilename": nfilename,
|
||||
"issueid": issueid,
|
||||
"comicid": comicid}
|
||||
|
||||
return rename_this
|
||||
|
|
@ -32,10 +32,10 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
searchName = searchName.replace(x, ' ', cnt)
|
||||
|
||||
searchName = re.sub('\s+', ' ', searchName)
|
||||
searchName = re.sub("[\,\:|'%20']", "", searchName).strip()
|
||||
logger.fdebug("searchname: %s" % searchName)
|
||||
logger.fdebug("issue: %s" % searchIssue)
|
||||
logger.fdebug("year: %s" % searchYear)
|
||||
searchName = re.sub("[\,\:]", "", searchName).strip()
|
||||
#logger.fdebug("searchname: %s" % searchName)
|
||||
#logger.fdebug("issue: %s" % searchIssue)
|
||||
#logger.fdebug("year: %s" % searchYear)
|
||||
encodeSearch = urllib.quote_plus(searchName)
|
||||
splitSearch = encodeSearch.split(" ")
|
||||
|
||||
|
|
|
@ -821,6 +821,18 @@ def updateComicLocation():
|
|||
|
||||
publisher = re.sub('!', '', dl['ComicPublisher']) # thanks Boom!
|
||||
year = dl['ComicYear']
|
||||
|
||||
if dl['Corrected_Type'] is not None:
|
||||
booktype = dl['Corrected_Type']
|
||||
else:
|
||||
booktype = dl['Type']
|
||||
if booktype == 'Print' or all([booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]):
|
||||
chunk_fb = re.sub('\$Type', '', mylar.CONFIG.FOLDER_FORMAT)
|
||||
chunk_b = re.compile(r'\s+')
|
||||
chunk_folder_format = chunk_b.sub(' ', chunk_fb)
|
||||
else:
|
||||
chunk_folder_format = mylar.CONFIG.FOLDER_FORMAT
|
||||
|
||||
comversion = dl['ComicVersion']
|
||||
if comversion is None:
|
||||
comversion = 'None'
|
||||
|
@ -841,7 +853,8 @@ def updateComicLocation():
|
|||
'$publisher': publisher.lower(),
|
||||
'$VolumeY': 'V' + str(year),
|
||||
'$VolumeN': comversion,
|
||||
'$Annual': 'Annual'
|
||||
'$Annual': 'Annual',
|
||||
'$Type': booktype
|
||||
}
|
||||
|
||||
#set the paths here with the seperator removed allowing for cross-platform altering.
|
||||
|
@ -1424,7 +1437,9 @@ def havetotals(refreshit=None):
|
|||
"percent": percent,
|
||||
"totalissues": totalissues,
|
||||
"haveissues": haveissues,
|
||||
"DateAdded": comic['LastUpdated']})
|
||||
"DateAdded": comic['LastUpdated'],
|
||||
"Type": comic['Type'],
|
||||
"Corrected_Type": comic['Corrected_Type']})
|
||||
|
||||
return comics
|
||||
|
||||
|
@ -1842,17 +1857,31 @@ def listPull(weeknumber, year):
|
|||
library[row['ComicID']] = row['ComicID']
|
||||
return library
|
||||
|
||||
def listLibrary():
|
||||
def listLibrary(comicid=None):
|
||||
import db
|
||||
library = {}
|
||||
myDB = db.DBConnection()
|
||||
list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid group by a.comicid")
|
||||
if comicid is None:
|
||||
if mylar.CONFIG.ANNUALS_ON is True:
|
||||
list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid group by a.comicid")
|
||||
else:
|
||||
list = myDB.select("SELECT comicid, status FROM Comics group by comicid")
|
||||
else:
|
||||
if mylar.CONFIG.ANNUALS_ON is True:
|
||||
list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid WHERE a.comicid=? group by a.comicid", [re.sub('4050-', '', comicid).strip()])
|
||||
else:
|
||||
list = myDB.select("SELECT comicid, status FROM Comics WHERE comicid=? group by comicid", [re.sub('4050-', '', comicid).strip()])
|
||||
|
||||
for row in list:
|
||||
library[row['ComicID']] = {'comicid': row['ComicID'],
|
||||
'status': row['Status']}
|
||||
if row['ReleaseComicID'] is not None:
|
||||
library[row['ReleaseComicID']] = {'comicid': row['ComicID'],
|
||||
'status': row['Status']}
|
||||
try:
|
||||
if row['ReleaseComicID'] is not None:
|
||||
library[row['ReleaseComicID']] = {'comicid': row['ComicID'],
|
||||
'status': row['Status']}
|
||||
except:
|
||||
pass
|
||||
|
||||
return library
|
||||
|
||||
def listStoryArcs():
|
||||
|
@ -2865,15 +2894,29 @@ def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
|
|||
torrent_info['snatch_status'] = snatch_status
|
||||
return torrent_info
|
||||
|
||||
def weekly_info(week=None, year=None):
|
||||
def weekly_info(week=None, year=None, current=None):
|
||||
#find the current week and save it as a reference point.
|
||||
todaydate = datetime.datetime.today()
|
||||
current_weeknumber = todaydate.strftime("%U")
|
||||
|
||||
if current is not None:
|
||||
c_weeknumber = int(current[:current.find('-')])
|
||||
c_weekyear = int(current[current.find('-')+1:])
|
||||
else:
|
||||
c_weeknumber = week
|
||||
c_weekyear = year
|
||||
|
||||
if week:
|
||||
weeknumber = int(week)
|
||||
year = int(year)
|
||||
|
||||
#monkey patch for 2018/2019 - week 52/week 0
|
||||
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2018]):
|
||||
weeknumber = 0
|
||||
year = 2019
|
||||
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2019]):
|
||||
weeknumber = 51
|
||||
year = 2018
|
||||
|
||||
#view specific week (prev_week, next_week)
|
||||
startofyear = date(year,1,1)
|
||||
week0 = startofyear - timedelta(days=startofyear.isoweekday())
|
||||
|
@ -2884,11 +2927,20 @@ def weekly_info(week=None, year=None):
|
|||
else:
|
||||
#find the given week number for the current day
|
||||
weeknumber = current_weeknumber
|
||||
year = todaydate.strftime("%Y")
|
||||
|
||||
#monkey patch for 2018/2019 - week 52/week 0
|
||||
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2018]):
|
||||
weeknumber = 0
|
||||
year = 2019
|
||||
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2019]):
|
||||
weeknumber = 51
|
||||
year = 2018
|
||||
|
||||
stweek = datetime.datetime.strptime(todaydate.strftime('%Y-%m-%d'), '%Y-%m-%d')
|
||||
startweek = stweek - timedelta(days = (stweek.weekday() + 1) % 7)
|
||||
midweek = startweek + timedelta(days = 3)
|
||||
endweek = startweek + timedelta(days = 6)
|
||||
year = todaydate.strftime("%Y")
|
||||
|
||||
prev_week = int(weeknumber) - 1
|
||||
prev_year = year
|
||||
|
@ -3211,7 +3263,8 @@ def disable_provider(site, newznab=False):
|
|||
mylar.CONFIG.DOGNZB = False
|
||||
elif site == 'experimental':
|
||||
mylar.CONFIG.EXPERIMENTAL = False
|
||||
|
||||
elif site == '32P':
|
||||
mylar.CONFIG.ENABLE_32P = False
|
||||
|
||||
def date_conversion(originaldate):
|
||||
c_obj_date = datetime.datetime.strptime(originaldate, "%Y-%m-%d %H:%M:%S")
|
||||
|
@ -3229,7 +3282,10 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
|
|||
if job is None:
|
||||
dbupdate_newstatus = 'Waiting'
|
||||
dbupdate_nextrun = None
|
||||
rss_newstatus = 'Waiting'
|
||||
if mylar.CONFIG.ENABLE_RSS is True:
|
||||
rss_newstatus = 'Waiting'
|
||||
else:
|
||||
rss_newstatus = 'Paused'
|
||||
rss_nextrun = None
|
||||
weekly_newstatus = 'Waiting'
|
||||
weekly_nextrun = None
|
||||
|
@ -3237,7 +3293,10 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
|
|||
search_nextrun = None
|
||||
version_newstatus = 'Waiting'
|
||||
version_nextrun = None
|
||||
monitor_newstatus = 'Waiting'
|
||||
if mylar.CONFIG.ENABLE_CHECK_FOLDER is True:
|
||||
monitor_newstatus = 'Waiting'
|
||||
else:
|
||||
monitor_newstatus = 'Paused'
|
||||
monitor_nextrun = None
|
||||
|
||||
job_info = myDB.select('SELECT DISTINCT * FROM jobhistory')
|
||||
|
@ -3247,31 +3306,37 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
|
|||
if mylar.SCHED_DBUPDATE_LAST is None:
|
||||
mylar.SCHED_DBUPDATE_LAST = ji['prev_run_timestamp']
|
||||
dbupdate_newstatus = ji['status']
|
||||
mylar.UPDATER_STATUS = dbupdate_newstatus
|
||||
dbupdate_nextrun = ji['next_run_timestamp']
|
||||
elif 'search' in ji['JobName'].lower():
|
||||
if mylar.SCHED_SEARCH_LAST is None:
|
||||
mylar.SCHED_SEARCH_LAST = ji['prev_run_timestamp']
|
||||
search_newstatus = ji['status']
|
||||
mylar.SEARCH_STATUS = search_newstatus
|
||||
search_nextrun = ji['next_run_timestamp']
|
||||
elif 'rss' in ji['JobName'].lower():
|
||||
if mylar.SCHED_RSS_LAST is None:
|
||||
mylar.SCHED_RSS_LAST = ji['prev_run_timestamp']
|
||||
rss_newstatus = ji['status']
|
||||
mylar.RSS_STATUS = rss_newstatus
|
||||
rss_nextrun = ji['next_run_timestamp']
|
||||
elif 'weekly' in ji['JobName'].lower():
|
||||
if mylar.SCHED_WEEKLY_LAST is None:
|
||||
mylar.SCHED_WEEKLY_LAST = ji['prev_run_timestamp']
|
||||
weekly_newstatus = ji['status']
|
||||
mylar.WEEKLY_STATUS = weekly_newstatus
|
||||
weekly_nextrun = ji['next_run_timestamp']
|
||||
elif 'version' in ji['JobName'].lower():
|
||||
if mylar.SCHED_VERSION_LAST is None:
|
||||
mylar.SCHED_VERSION_LAST = ji['prev_run_timestamp']
|
||||
version_newstatus = ji['status']
|
||||
mylar.VERSION_STATUS = version_newstatus
|
||||
version_nextrun = ji['next_run_timestamp']
|
||||
elif 'monitor' in ji['JobName'].lower():
|
||||
if mylar.SCHED_MONITOR_LAST is None:
|
||||
mylar.SCHED_MONITOR_LAST = ji['prev_run_timestamp']
|
||||
monitor_newstatus = ji['status']
|
||||
mylar.MONITOR_STATUS = monitor_newstatus
|
||||
monitor_nextrun = ji['next_run_timestamp']
|
||||
|
||||
monitors = {'weekly': mylar.SCHED_WEEKLY_LAST,
|
||||
|
@ -3290,21 +3355,27 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
|
|||
elif 'update' in jobinfo.lower():
|
||||
prev_run_timestamp = mylar.SCHED_DBUPDATE_LAST
|
||||
newstatus = dbupdate_newstatus
|
||||
mylar.UPDATER_STATUS = newstatus
|
||||
elif 'search' in jobinfo.lower():
|
||||
prev_run_timestamp = mylar.SCHED_SEARCH_LAST
|
||||
newstatus = search_newstatus
|
||||
mylar.SEARCH_STATUS = newstatus
|
||||
elif 'rss' in jobinfo.lower():
|
||||
prev_run_timestamp = mylar.SCHED_RSS_LAST
|
||||
newstatus = rss_newstatus
|
||||
mylar.RSS_STATUS = newstatus
|
||||
elif 'weekly' in jobinfo.lower():
|
||||
prev_run_timestamp = mylar.SCHED_WEEKLY_LAST
|
||||
newstatus = weekly_newstatus
|
||||
mylar.WEEKLY_STATUS = newstatus
|
||||
elif 'version' in jobinfo.lower():
|
||||
prev_run_timestamp = mylar.SCHED_VERSION_LAST
|
||||
newstatus = version_newstatus
|
||||
mylar.VERSION_STATUS = newstatus
|
||||
elif 'monitor' in jobinfo.lower():
|
||||
prev_run_timestamp = mylar.SCHED_MONITOR_LAST
|
||||
newstatus = monitor_newstatus
|
||||
mylar.MONITOR_STATUS = newstatus
|
||||
|
||||
jobname = jobinfo[:jobinfo.find('(')-1].strip()
|
||||
#logger.fdebug('jobinfo: %s' % jobinfo)
|
||||
|
|
|
@ -22,6 +22,7 @@ import sys
|
|||
import shlex
|
||||
import datetime
|
||||
import re
|
||||
import json
|
||||
import urllib
|
||||
import urllib2
|
||||
import shutil
|
||||
|
@ -31,7 +32,7 @@ import cherrypy
|
|||
import requests
|
||||
|
||||
import mylar
|
||||
from mylar import logger, helpers, db, mb, cv, parseit, filechecker, search, updater, moveit, comicbookdb
|
||||
from mylar import logger, filers, helpers, db, mb, cv, parseit, filechecker, search, updater, moveit, comicbookdb
|
||||
|
||||
|
||||
def is_exists(comicid):
|
||||
|
@ -48,7 +49,7 @@ def is_exists(comicid):
|
|||
return False
|
||||
|
||||
|
||||
def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=None, calledfrom=None, annload=None, chkwant=None, issuechk=None, issuetype=None, latestissueinfo=None, csyear=None):
|
||||
def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=None, calledfrom=None, annload=None, chkwant=None, issuechk=None, issuetype=None, latestissueinfo=None, csyear=None, fixed_type=None):
|
||||
myDB = db.DBConnection()
|
||||
|
||||
controlValueDict = {"ComicID": comicid}
|
||||
|
@ -57,10 +58,14 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
if dbcomic is None:
|
||||
newValueDict = {"ComicName": "Comic ID: %s" % (comicid),
|
||||
"Status": "Loading"}
|
||||
comlocation = None
|
||||
if all([imported, mylar.CONFIG.IMP_PATHS is True]):
|
||||
comlocation = os.path.dirname(imported['filelisting'][0]['comiclocation'])
|
||||
else:
|
||||
comlocation = None
|
||||
oldcomversion = None
|
||||
series_status = 'Loading'
|
||||
lastissueid = None
|
||||
aliases = None
|
||||
else:
|
||||
if chkwant is not None:
|
||||
logger.fdebug('ComicID: ' + str(comicid) + ' already exists. Not adding from the future pull list at this time.')
|
||||
|
@ -75,6 +80,8 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
newValueDict = {"Status": "Loading"}
|
||||
comlocation = dbcomic['ComicLocation']
|
||||
lastissueid = dbcomic['LatestIssueID']
|
||||
aliases = dbcomic['AlternateSearch']
|
||||
logger.info('aliases currently: %s' % aliases)
|
||||
|
||||
if not latestissueinfo:
|
||||
latestissueinfo = []
|
||||
|
@ -112,6 +119,9 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
else:
|
||||
sortname = comic['ComicName']
|
||||
|
||||
comic['Corrected_Type'] = fixed_type
|
||||
if fixed_type is not None and fixed_type != comic['Type']:
|
||||
logger.info('Forced Comic Type to : %s' % comic['Corrected_Type'])
|
||||
|
||||
logger.info('Now adding/updating: ' + comic['ComicName'])
|
||||
#--Now that we know ComicName, let's try some scraping
|
||||
|
@ -196,59 +206,16 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
comicname_filesafe = helpers.filesafe(u_comicnm)
|
||||
|
||||
if comlocation is None:
|
||||
comicdir = comicname_filesafe
|
||||
series = comicdir
|
||||
if series[-1:] == '.':
|
||||
series[:-1]
|
||||
|
||||
publisher = re.sub('!', '', comic['ComicPublisher']) # thanks Boom!
|
||||
publisher = helpers.filesafe(publisher)
|
||||
year = SeriesYear
|
||||
if comicVol is None:
|
||||
comicVol = 'None'
|
||||
#if comversion is None, remove it so it doesn't populate with 'None'
|
||||
if comicVol == 'None':
|
||||
chunk_f_f = re.sub('\$VolumeN', '', mylar.CONFIG.FOLDER_FORMAT)
|
||||
chunk_f = re.compile(r'\s+')
|
||||
chunk_folder_format = chunk_f.sub(' ', chunk_f_f)
|
||||
logger.fdebug('No version # found for series, removing from folder format')
|
||||
logger.fdebug("new folder format: " + str(chunk_folder_format))
|
||||
else:
|
||||
chunk_folder_format = mylar.CONFIG.FOLDER_FORMAT
|
||||
comic_values = {'ComicName': comic['ComicName'],
|
||||
'ComicPublisher': comic['ComicPublisher'],
|
||||
'ComicYear': SeriesYear,
|
||||
'ComicVersion': comicVol,
|
||||
'Type': comic['Type'],
|
||||
'Corrected_Type': comic['Corrected_Type']}
|
||||
|
||||
#do work to generate folder path
|
||||
|
||||
values = {'$Series': series,
|
||||
'$Publisher': publisher,
|
||||
'$Year': year,
|
||||
'$series': series.lower(),
|
||||
'$publisher': publisher.lower(),
|
||||
'$VolumeY': 'V' + str(year),
|
||||
'$VolumeN': comicVol.upper(),
|
||||
'$Annual': 'Annual'
|
||||
}
|
||||
try:
|
||||
if mylar.CONFIG.FOLDER_FORMAT == '':
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, comicdir, " (" + SeriesYear + ")")
|
||||
else:
|
||||
comlocation = os.path.join(mylar.CONFIG.DESTINATION_DIR, helpers.replace_all(chunk_folder_format, values))
|
||||
except Exception as e:
|
||||
if 'TypeError' in e:
|
||||
if mylar.CONFIG.DESTINATION_DIR is None:
|
||||
logger.error('[ERROR] %s' % e)
|
||||
logger.error('No Comic Location specified. This NEEDS to be set before anything can be added successfully.')
|
||||
return
|
||||
logger.error('[ERROR] %s' % e)
|
||||
logger.error('Cannot determine Comic Location path properly. Check your Comic Location and Folder Format for any errors.')
|
||||
return
|
||||
|
||||
#comlocation = mylar.CONFIG.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
|
||||
if mylar.CONFIG.DESTINATION_DIR == "":
|
||||
logger.error('There is no Comic Location Path specified - please specify one in Config/Web Interface.')
|
||||
return
|
||||
if mylar.CONFIG.REPLACE_SPACES:
|
||||
#mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
|
||||
comlocation = comlocation.replace(' ', mylar.CONFIG.REPLACE_CHAR)
|
||||
dothedew = filers.FileHandlers(comic=comic_values)
|
||||
comlocation = dothedew.folder_create()
|
||||
|
||||
#moved this out of the above loop so it will chk for existance of comlocation in case moved
|
||||
#if it doesn't exist - create it (otherwise will bugger up later on)
|
||||
|
@ -308,6 +275,25 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
tmpseriesname = as_dinfo['mod_seriesname']
|
||||
dynamic_seriesname = re.sub('[\|\s]','', tmpseriesname.lower()).strip()
|
||||
|
||||
if comic['Issue_List'] != 'None':
|
||||
issue_list = json.dumps(comic['Issue_List'])
|
||||
else:
|
||||
issue_list = None
|
||||
|
||||
if comic['Aliases'] != 'None':
|
||||
if all([aliases is not None, aliases != 'None']):
|
||||
for x in aliases.split('##'):
|
||||
aliaschk = [x for y in comic['Aliases'].split('##') if y == x]
|
||||
if aliaschk and x not in aliases.split('##'):
|
||||
aliases += '##' + ''.join(x)
|
||||
else:
|
||||
if x not in aliases.split('##'):
|
||||
aliases += '##' + x
|
||||
else:
|
||||
aliases = comic['Aliases']
|
||||
else:
|
||||
aliases = aliases
|
||||
|
||||
controlValueDict = {"ComicID": comicid}
|
||||
newValueDict = {"ComicName": comic['ComicName'],
|
||||
"ComicSortName": sortname,
|
||||
|
@ -323,10 +309,12 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
"ComicPublisher": comic['ComicPublisher'],
|
||||
# "Description": Cdesc, #.dencode('utf-8', 'replace'),
|
||||
"DetailURL": comic['ComicURL'],
|
||||
# "AlternateSearch": comic['Aliases'],
|
||||
"AlternateSearch": aliases,
|
||||
# "ComicPublished": gcdinfo['resultPublished'],
|
||||
"ComicPublished": "Unknown",
|
||||
"Type": comic['Type'],
|
||||
"Corrected_Type": comic['Corrected_Type'],
|
||||
"Collects": issue_list,
|
||||
"DateAdded": helpers.today(),
|
||||
"Status": "Loading"}
|
||||
|
||||
|
@ -887,6 +875,7 @@ def issue_collection(issuedata, nostatus):
|
|||
"Issue_Number": issue['Issue_Number'],
|
||||
"IssueDate": issue['IssueDate'],
|
||||
"ReleaseDate": issue['ReleaseDate'],
|
||||
"DigitalDate": issue['DigitalDate'],
|
||||
"Int_IssueNumber": issue['Int_IssueNumber'],
|
||||
"ImageURL": issue['ImageURL'],
|
||||
"ImageURL_ALT": issue['ImageURL_ALT']
|
||||
|
@ -1005,6 +994,7 @@ def manualAnnual(manual_comicid=None, comicname=None, comicyear=None, comicid=No
|
|||
'IssueName': cleanname,
|
||||
'IssueDate': str(firstval['Issue_Date']),
|
||||
'ReleaseDate': str(firstval['Store_Date']),
|
||||
'DigitalDate': str(firstval['Digital_Date']),
|
||||
'Status': astatus,
|
||||
'ReleaseComicName': sr['ComicName']})
|
||||
n+=1
|
||||
|
@ -1018,6 +1008,7 @@ def manualAnnual(manual_comicid=None, comicname=None, comicyear=None, comicid=No
|
|||
"Int_IssueNumber": helpers.issuedigits(ann['Issue_Number']),
|
||||
"IssueDate": ann['IssueDate'],
|
||||
"ReleaseDate": ann['ReleaseDate'],
|
||||
"DigitalDate": ann['DigitalDate'],
|
||||
"IssueName": ann['IssueName'],
|
||||
"ComicID": ann['ComicID'], #this is the series ID
|
||||
"ReleaseComicID": ann['ReleaseComicID'], #this is the series ID for the annual(s)
|
||||
|
@ -1100,6 +1091,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
issname = cleanname
|
||||
issdate = str(firstval['Issue_Date'])
|
||||
storedate = str(firstval['Store_Date'])
|
||||
digitaldate = str(firstval['Digital_Date'])
|
||||
int_issnum = None
|
||||
if issnum.isdigit():
|
||||
int_issnum = int(issnum) * 1000
|
||||
|
@ -1264,6 +1256,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
"Issue_Number": issnum,
|
||||
"IssueDate": issdate,
|
||||
"ReleaseDate": storedate,
|
||||
"DigitalDate": digitaldate,
|
||||
"Int_IssueNumber": int_issnum,
|
||||
"ImageURL": firstval['Image'],
|
||||
"ImageURL_ALT": firstval['ImageALT']})
|
||||
|
@ -1286,6 +1279,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
issue_collection(issuedata, nostatus='True')
|
||||
|
||||
styear = str(SeriesYear)
|
||||
if firstdate is not None:
|
||||
if SeriesYear != firstdate[:4]:
|
||||
logger.fdebug('Series start date (%s) crosses over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV will say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate))
|
||||
styear = str(firstdate[:4])
|
||||
|
||||
if firstdate[5:7] == '00':
|
||||
stmonth = "?"
|
||||
|
@ -1467,6 +1464,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, annualslis
|
|||
issname = cleanname
|
||||
issdate = str(firstval['Issue_Date'])
|
||||
stdate = str(firstval['Store_Date'])
|
||||
digdate = str(firstval['Digital_Date'])
|
||||
int_issnum = helpers.issuedigits(issnum)
|
||||
|
||||
iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone()
|
||||
|
@ -1494,6 +1492,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, annualslis
|
|||
"Int_IssueNumber": int_issnum,
|
||||
"IssueDate": issdate,
|
||||
"ReleaseDate": stdate,
|
||||
"DigitalDate": digdate,
|
||||
"IssueName": issname,
|
||||
"ComicID": comicid,
|
||||
"IssueID": issid,
|
||||
|
|
|
@ -57,17 +57,19 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
|
|||
cbz_retry = 0
|
||||
|
||||
mylar.IMPORT_STATUS = 'Now attempting to parse files for additional information'
|
||||
|
||||
myDB = db.DBConnection()
|
||||
#mylar.IMPORT_PARSED_COUNT #used to count what #/totalfiles the filename parser is currently on
|
||||
for r, d, f in os.walk(dir):
|
||||
for files in f:
|
||||
mylar.IMPORT_FILES +=1
|
||||
if 'cvinfo' in files:
|
||||
cv_location.append(r)
|
||||
logger.fdebug('CVINFO found: ' + os.path.join(r))
|
||||
if any(files.lower().endswith('.' + x.lower()) for x in extensions):
|
||||
comic = files
|
||||
comicpath = os.path.join(r, files)
|
||||
if mylar.CONFIG.IMP_PATHS is True:
|
||||
if myDB.select('SELECT * FROM comics JOIN issues WHERE issues.Status="Downloaded" AND ComicLocation=? AND issues.Location=?', [r.decode(mylar.SYS_ENCODING, 'replace'), files.decode(mylar.SYS_ENCODING, 'replace')]):
|
||||
logger.info('Skipped known issue path: %s' % comicpath)
|
||||
continue
|
||||
|
||||
comic = files
|
||||
comicsize = os.path.getsize(comicpath)
|
||||
logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes')
|
||||
|
||||
|
@ -148,6 +150,10 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
|
|||
cbz_retry +=1
|
||||
continue
|
||||
|
||||
if 'cvinfo' in files:
|
||||
cv_location.append(r)
|
||||
logger.fdebug('CVINFO found: ' + os.path.join(r))
|
||||
|
||||
mylar.IMPORT_TOTALFILES = comiccnt
|
||||
logger.info('I have successfully discovered & parsed a total of ' + str(comiccnt) + ' files....analyzing now')
|
||||
logger.info('I have not been able to determine what ' + str(len(failure_list)) + ' files are')
|
||||
|
@ -156,8 +162,8 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
|
|||
mylar.IMPORT_STATUS = 'Successfully parsed ' + str(comiccnt) + ' files'
|
||||
#return queue.put(valreturn)
|
||||
|
||||
logger.fdebug(utter_failure_list)
|
||||
myDB = db.DBConnection()
|
||||
if len(utter_failure_list) > 0:
|
||||
logger.fdebug('Failure list: %s' % utter_failure_list)
|
||||
|
||||
#let's load in the watchlist to see if we have any matches.
|
||||
logger.info("loading in the watchlist to see if a series is being watched already...")
|
||||
|
@ -504,7 +510,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
|
|||
for x in issueid_list:
|
||||
reverse_issueids.append(x['issueid'])
|
||||
|
||||
vals = None
|
||||
vals = []
|
||||
if len(reverse_issueids) > 0:
|
||||
mylar.IMPORT_STATUS = 'Now Reverse looking up ' + str(len(reverse_issueids)) + ' IssueIDs to get the ComicIDs'
|
||||
vals = mylar.cv.getComic(None, 'import', comicidlist=reverse_issueids)
|
||||
|
@ -601,6 +607,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
|
|||
import_cv_ids = 0
|
||||
else:
|
||||
import_cv_ids = 0
|
||||
cvimport_comicids = None
|
||||
|
||||
return {'import_by_comicids': import_by_comicids,
|
||||
'import_count': len(import_by_comicids),
|
||||
|
|
|
@ -83,12 +83,13 @@ def locg(pulldate=None,weeknumber=None,year=None):
|
|||
'annuallink': x['link'],
|
||||
'year': x['year'],
|
||||
'volume': x['volume'],
|
||||
'seriesyear': x['seriesyear']})
|
||||
'seriesyear': x['seriesyear'],
|
||||
'format': x['type']})
|
||||
shipdate = x['shipdate']
|
||||
|
||||
myDB = db.DBConnection()
|
||||
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, rowid INTEGER PRIMARY KEY)")
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)")
|
||||
|
||||
#clear out the upcoming table here so they show the new values properly.
|
||||
if pulldate == '00000000':
|
||||
|
@ -123,7 +124,8 @@ def locg(pulldate=None,weeknumber=None,year=None):
|
|||
'ANNUALLINK': x['annuallink'],
|
||||
'YEAR': x['year'],
|
||||
'VOLUME': x['volume'],
|
||||
'SERIESYEAR': x['seriesyear']}
|
||||
'SERIESYEAR': x['seriesyear'],
|
||||
'FORMAT': x['format']}
|
||||
myDB.upsert("weekly", newValueDict, controlValueDict)
|
||||
|
||||
logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber))
|
||||
|
|
|
@ -373,7 +373,7 @@ def findComic(name, mode, issue, limityear=None, type=None):
|
|||
xmltype = 'Print'
|
||||
elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower():
|
||||
xmltype = 'Digital'
|
||||
elif 'paperback' in xmldesc[:60].lower() and 'paperback can be found' not in xmldesc.lower():
|
||||
elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc.lower():
|
||||
xmltype = 'TPB'
|
||||
elif 'hardcover' in xmldesc[:60].lower() and 'hardcover can be found' not in xmldesc.lower():
|
||||
xmltype = 'HC'
|
||||
|
@ -458,8 +458,8 @@ def storyarcinfo(xmlid):
|
|||
|
||||
try:
|
||||
logger.fdebug('story_arc ascension')
|
||||
issuecount = len( arcdom.getElementsByTagName('issue') )
|
||||
issuedom = arcdom.getElementsByTagName('issue')
|
||||
issuecount = len( issuedom ) #arcdom.getElementsByTagName('issue') )
|
||||
isc = 0
|
||||
arclist = ''
|
||||
ordernum = 1
|
||||
|
|
|
@ -147,7 +147,7 @@ class NMA:
|
|||
if snatched_nzb:
|
||||
if snatched_nzb[-1] == '\.': snatched_nzb = snatched_nzb[:-1]
|
||||
event = snline
|
||||
description = "Mylar has snatched: " + snatched_nzb + " from " + prov + " and has sent it to " + sent_to
|
||||
description = "Mylar has snatched: " + snatched_nzb + " from " + prov + " and " + sent_to
|
||||
else:
|
||||
event = prline
|
||||
description = prline2
|
||||
|
@ -246,7 +246,7 @@ class PUSHOVER:
|
|||
if snatched_nzb:
|
||||
if snatched_nzb[-1] == '\.':
|
||||
snatched_nzb = snatched_nzb[:-1]
|
||||
message = "Mylar has snatched: " + snatched_nzb + " from " + prov + " and has sent it to " + sent_to
|
||||
message = "Mylar has snatched: " + snatched_nzb + " from " + prov + " and " + sent_to
|
||||
|
||||
data = {'token': mylar.CONFIG.PUSHOVER_APIKEY,
|
||||
'user': mylar.CONFIG.PUSHOVER_USERKEY,
|
||||
|
@ -359,7 +359,7 @@ class BOXCAR:
|
|||
# if no username was given then use the one from the config
|
||||
if snatched_nzb:
|
||||
title = snline
|
||||
message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
|
||||
message = "Mylar has snatched: " + snatched_nzb + " and " + sent_to
|
||||
else:
|
||||
title = prline
|
||||
message = prline2
|
||||
|
@ -412,7 +412,7 @@ class PUSHBULLET:
|
|||
if snatched:
|
||||
if snatched[-1] == '.': snatched = snatched[:-1]
|
||||
event = snline
|
||||
message = "Mylar has snatched: " + snatched + " from " + prov + " and has sent it to " + sent_to
|
||||
message = "Mylar has snatched: " + snatched + " from " + prov + " and " + sent_to
|
||||
else:
|
||||
event = prline + ' complete!'
|
||||
message = prline2
|
||||
|
@ -489,7 +489,7 @@ class SLACK:
|
|||
module += '[NOTIFIER]'
|
||||
|
||||
if all([sent_to is not None, prov is not None]):
|
||||
attachment_text += ' from %s and sent to %s' % (prov, sent_to)
|
||||
attachment_text += ' from %s and %s' % (prov, sent_to)
|
||||
elif sent_to is None:
|
||||
attachment_text += ' from %s' % prov
|
||||
else:
|
||||
|
|
|
@ -1,4 +1,17 @@
|
|||
#!/usr/bin/python
|
||||
# This file is part of Mylar.
|
||||
#
|
||||
# Mylar is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Mylar is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os, sys
|
||||
import re
|
||||
|
@ -14,7 +27,7 @@ import random
|
|||
from StringIO import StringIO
|
||||
|
||||
import mylar
|
||||
from mylar import db, logger, ftpsshup, helpers, auth32p, utorrent
|
||||
from mylar import db, logger, ftpsshup, helpers, auth32p, utorrent, helpers
|
||||
import torrent.clients.transmission as transmission
|
||||
import torrent.clients.deluge as deluge
|
||||
import torrent.clients.qbittorrent as qbittorrent
|
||||
|
@ -53,9 +66,12 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
# pickfeed = '2'
|
||||
# loopit = 1
|
||||
loopit = 1
|
||||
|
||||
if pickfeed == 'Public':
|
||||
#we need to cycle through both DEM + WWT feeds
|
||||
loopit = 2
|
||||
pickfeed = '999'
|
||||
# since DEM is dead, just remove the loop entirely
|
||||
# #we need to cycle through both DEM + WWT feeds
|
||||
# loopit = 2
|
||||
|
||||
lp = 0
|
||||
totalcount = 0
|
||||
|
@ -79,7 +95,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
|
||||
feedtype = None
|
||||
|
||||
if pickfeed == "1" and mylar.CONFIG.ENABLE_32P: # 32pages new releases feed.
|
||||
if pickfeed == "1" and mylar.CONFIG.ENABLE_32P is True: # 32pages new releases feed.
|
||||
feed = 'https://32pag.es/feeds.php?feed=torrents_all&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey']
|
||||
feedtype = ' from the New Releases RSS Feed for comics'
|
||||
verify = bool(mylar.CONFIG.VERIFY_32P)
|
||||
|
@ -115,7 +131,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
feed = mylar.WWTURL + 'rss.php?cat=132,50'
|
||||
feedtype = ' from the New Releases RSS Feed from WorldWideTorrents'
|
||||
verify = bool(mylar.CONFIG.PUBLIC_VERIFY)
|
||||
elif int(pickfeed) >= 7 and feedinfo is not None:
|
||||
elif int(pickfeed) >= 7 and feedinfo is not None and mylar.CONFIG.ENABLE_32P is True:
|
||||
#personal 32P notification feeds.
|
||||
#get the info here
|
||||
feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname']
|
||||
|
@ -861,6 +877,9 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
if site == '32P':
|
||||
url = 'https://32pag.es/torrents.php'
|
||||
|
||||
if mylar.CONFIG.ENABLE_32P is False:
|
||||
return "fail"
|
||||
|
||||
if mylar.CONFIG.VERIFY_32P == 1 or mylar.CONFIG.VERIFY_32P == True:
|
||||
verify = True
|
||||
else:
|
||||
|
@ -888,8 +907,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
feed32p = auth32p.info32p(reauthenticate=True)
|
||||
feedinfo = feed32p.authenticate()
|
||||
if feedinfo == "disable":
|
||||
mylar.CONFIG.ENABLE_32P = 0
|
||||
#mylar.config_write()
|
||||
helpers.disable_provider('32P')
|
||||
return "fail"
|
||||
if mylar.CONFIG.PASSKEY_32P is None or mylar.AUTHKEY_32P is None or mylar.KEYS_32P is None:
|
||||
logger.error('[RSS] Unable to sign-on to 32P to validate settings and initiate download sequence. Please enter/check your username password in the configuration.')
|
||||
|
@ -1011,8 +1029,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
feedinfo = feed32p.authenticate()
|
||||
|
||||
if feedinfo == "disable":
|
||||
mylar.CONFIG.ENABLE_32P = 0
|
||||
#mylar.config_write()
|
||||
helpers.disable_provider('32P')
|
||||
return "fail"
|
||||
|
||||
logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
|
||||
|
|
|
@ -54,7 +54,6 @@ class tehMain():
|
|||
logger.info('[RSS-FEEDS] Initiating Torrent RSS Check.')
|
||||
if mylar.CONFIG.ENABLE_PUBLIC:
|
||||
logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on Demonoid / WorldWideTorrents.')
|
||||
#rsscheck.torrents(pickfeed='3') #TP.SE RSS Check (has to be page-parsed)
|
||||
rsscheck.torrents(pickfeed='Public') #TPSE = DEM RSS Check + WWT RSS Check
|
||||
if mylar.CONFIG.ENABLE_32P is True:
|
||||
logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on 32P.')
|
||||
|
@ -75,8 +74,7 @@ class tehMain():
|
|||
if feedinfo != "disable":
|
||||
pass
|
||||
else:
|
||||
mylar.CONFIG.ENABLE_32P = False
|
||||
#mylar.config_write()
|
||||
helpers.disable_provider('32P')
|
||||
else:
|
||||
feedinfo = mylar.FEEDINFO_32P
|
||||
|
||||
|
|
920
mylar/search.py
920
mylar/search.py
File diff suppressed because it is too large
Load Diff
|
@ -10,11 +10,11 @@ from lib.qbittorrent import client
|
|||
class TorrentClient(object):
|
||||
def __init__(self):
|
||||
self.conn = None
|
||||
|
||||
|
||||
def connect(self, host, username, password):
|
||||
if self.conn is not None:
|
||||
return self.connect
|
||||
|
||||
|
||||
if not host:
|
||||
return {'status': False}
|
||||
|
||||
|
@ -31,7 +31,7 @@ class TorrentClient(object):
|
|||
logger.error('Could not connect to qBittorrent ' + host)
|
||||
else:
|
||||
return self.client
|
||||
|
||||
|
||||
def find_torrent(self, hash):
|
||||
logger.debug('Finding Torrent hash: ' + hash)
|
||||
torrent_info = self.get_torrent(hash)
|
||||
|
@ -53,10 +53,10 @@ class TorrentClient(object):
|
|||
|
||||
|
||||
def load_torrent(self, filepath):
|
||||
|
||||
|
||||
if not filepath.startswith('magnet'):
|
||||
logger.info('filepath to torrent file set to : ' + filepath)
|
||||
|
||||
|
||||
if self.client._is_authenticated is True:
|
||||
logger.info('Checking if Torrent Exists!')
|
||||
|
||||
|
@ -81,18 +81,30 @@ class TorrentClient(object):
|
|||
#multiple copies of the same issues that's already downloaded
|
||||
else:
|
||||
logger.info('Torrent not added yet, trying to add it now!')
|
||||
if any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']):
|
||||
down_dir = None
|
||||
else:
|
||||
down_dir = mylar.CONFIG.QBITTORRENT_FOLDER
|
||||
logger.info('Forcing Download location to: %s' % down_dir)
|
||||
|
||||
if filepath.startswith('magnet'):
|
||||
try:
|
||||
tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
if down_dir is not None:
|
||||
tid = self.client.download_from_link(filepath, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
else:
|
||||
tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
except Exception as e:
|
||||
logger.debug('Torrent not added')
|
||||
return {'status': False}
|
||||
else:
|
||||
logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.')
|
||||
logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.')
|
||||
else:
|
||||
try:
|
||||
torrent_content = open(filepath, 'rb')
|
||||
tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
if down_dir is not None:
|
||||
tid = self.client.download_from_file(torrent_content, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
else:
|
||||
tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
except Exception as e:
|
||||
logger.debug('Torrent not added')
|
||||
return {'status': False}
|
||||
|
|
|
@ -36,7 +36,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
|
|||
if mylar.CONFIG.UPDATE_ENDED:
|
||||
logger.info('Updating only Continuing Series (option enabled) - this might cause problems with the pull-list matching for rebooted series')
|
||||
comiclist = []
|
||||
completelist = myDB.select('SELECT LatestDate, ComicPublished, ForceContinuing, NewPublish, LastUpdated, ComicID, ComicName, Corrected_SeriesYear, ComicYear from comics WHERE Status="Active" or Status="Loading" order by LastUpdated DESC, LatestDate ASC')
|
||||
completelist = myDB.select('SELECT LatestDate, ComicPublished, ForceContinuing, NewPublish, LastUpdated, ComicID, ComicName, Corrected_SeriesYear, Corrected_Type, ComicYear from comics WHERE Status="Active" or Status="Loading" order by LastUpdated DESC, LatestDate ASC')
|
||||
for comlist in completelist:
|
||||
if comlist['LatestDate'] is None:
|
||||
recentstatus = 'Loading'
|
||||
|
@ -65,15 +65,16 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
|
|||
"ComicID": comlist['ComicID'],
|
||||
"ComicName": comlist['ComicName'],
|
||||
"ComicYear": comlist['ComicYear'],
|
||||
"Corrected_SeriesYear": comlist['Corrected_SeriesYear']})
|
||||
"Corrected_SeriesYear": comlist['Corrected_SeriesYear'],
|
||||
"Corrected_Type": comlist['Corrected_Type']})
|
||||
|
||||
else:
|
||||
comiclist = myDB.select('SELECT LatestDate, LastUpdated, ComicID, ComicName, ComicYear, Corrected_SeriesYear from comics WHERE Status="Active" or Status="Loading" order by LastUpdated DESC, latestDate ASC')
|
||||
comiclist = myDB.select('SELECT LatestDate, LastUpdated, ComicID, ComicName, ComicYear, Corrected_SeriesYear, Corrected_Type from comics WHERE Status="Active" or Status="Loading" order by LastUpdated DESC, latestDate ASC')
|
||||
else:
|
||||
comiclist = []
|
||||
comiclisting = ComicIDList
|
||||
for cl in comiclisting:
|
||||
comiclist += myDB.select('SELECT ComicID, ComicName, ComicYear, Corrected_SeriesYear, LastUpdated from comics WHERE ComicID=? order by LastUpdated DESC, LatestDate ASC', [cl])
|
||||
comiclist += myDB.select('SELECT ComicID, ComicName, ComicYear, Corrected_SeriesYear, Corrected_Type, LastUpdated from comics WHERE ComicID=? order by LastUpdated DESC, LatestDate ASC', [cl])
|
||||
|
||||
if all([sched is False, calledfrom is None]):
|
||||
logger.info('Starting update for %i active comics' % len(comiclist))
|
||||
|
@ -86,6 +87,10 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
|
|||
for comic in sorted(comiclist, key=operator.itemgetter('LastUpdated'), reverse=True):
|
||||
dspyear = comic['ComicYear']
|
||||
csyear = None
|
||||
fixed_type = None
|
||||
|
||||
if comic['Corrected_Type'] is not None:
|
||||
fixed_type = comic['Corrected_Type']
|
||||
|
||||
if comic['Corrected_SeriesYear'] is not None:
|
||||
csyear = comic['Corrected_SeriesYear']
|
||||
|
@ -180,7 +185,7 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
|
|||
logger.fdebug("Refreshing the series and pulling in new data using only CV.")
|
||||
|
||||
if whack == False:
|
||||
chkstatus = mylar.importer.addComictoDB(ComicID, mismatch, calledfrom='dbupdate', annload=annload, csyear=csyear)
|
||||
chkstatus = mylar.importer.addComictoDB(ComicID, mismatch, calledfrom='dbupdate', annload=annload, csyear=csyear, fixed_type=fixed_type)
|
||||
if chkstatus['status'] == 'complete':
|
||||
#delete the data here if it's all valid.
|
||||
logger.fdebug("Deleting all old issue data to make sure new data is clean...")
|
||||
|
@ -692,7 +697,8 @@ def nzblog(IssueID, NZBName, ComicName, SARC=None, IssueArcID=None, id=None, pro
|
|||
if chkd is None:
|
||||
pass
|
||||
else:
|
||||
if chkd['AltNZBName'] is None or chkd['AltNZBName'] == '':
|
||||
altnames = chkd['AltNZBName']
|
||||
if any([altnames is None, altnames == '']):
|
||||
#we need to wipe the entry so we can re-update with the alt-nzbname if required
|
||||
myDB.action('DELETE FROM nzblog WHERE IssueID=? and Provider=?', [IssueID, prov])
|
||||
logger.fdebug('Deleted stale entry from nzblog for IssueID: ' + str(IssueID) + ' [' + prov + ']')
|
||||
|
@ -925,6 +931,15 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
|||
altnames = rescan['AlternateSearch'] + '##'
|
||||
else:
|
||||
altnames = ''
|
||||
|
||||
if (all([rescan['Type'] != 'Print', rescan['Type'] != 'Digital', rescan['Type'] != 'None', rescan['Type'] is not None]) and rescan['Corrected_Type'] != 'Print') or rescan['Corrected_Type'] == 'TPB':
|
||||
if rescan['Type'] == 'One-Shot' and rescan['Corrected_Type'] is None:
|
||||
booktype = 'One-Shot'
|
||||
else:
|
||||
booktype = 'TPB'
|
||||
else:
|
||||
booktype = None
|
||||
|
||||
annscan = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [ComicID])
|
||||
if annscan is None:
|
||||
pass
|
||||
|
@ -964,9 +979,13 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
|||
files_arc = arcval.listFiles()
|
||||
fca.append(files_arc)
|
||||
comiccnt = int(files_arc['comiccount'])
|
||||
|
||||
fcb = []
|
||||
fc = {}
|
||||
#if len(fca) > 0:
|
||||
|
||||
is_cnt = myDB.select("SELECT COUNT(*) FROM issues WHERE ComicID=?", [ComicID])
|
||||
iscnt = is_cnt[0][0]
|
||||
|
||||
for ca in fca:
|
||||
i = 0
|
||||
while True:
|
||||
|
@ -974,16 +993,26 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
|||
cla = ca['comiclist'][i]
|
||||
except (IndexError, KeyError) as e:
|
||||
break
|
||||
fcb.append({"ComicFilename": cla['ComicFilename'],
|
||||
"ComicLocation": cla['ComicLocation'],
|
||||
"ComicSize": cla['ComicSize'],
|
||||
"JusttheDigits": cla['JusttheDigits'],
|
||||
"AnnualComicID": cla['AnnualComicID']})
|
||||
|
||||
try:
|
||||
if all([booktype == 'TPB', iscnt > 1]) or all([booktype == 'One-Shot', iscnt == 1]):
|
||||
if cla['SeriesVolume'] is not None:
|
||||
just_the_digits = re.sub('[^0-9]', '', cla['SeriesVolume']).strip()
|
||||
else:
|
||||
just_the_digits = re.sub('[^0-9]', '', cla['JusttheDigits']).strip()
|
||||
else:
|
||||
just_the_digits = cla['JusttheDigits']
|
||||
except Exception as e:
|
||||
logger.warn('[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s]' % (e,cla))
|
||||
else:
|
||||
fcb.append({"ComicFilename": cla['ComicFilename'],
|
||||
"ComicLocation": cla['ComicLocation'],
|
||||
"ComicSize": cla['ComicSize'],
|
||||
"JusttheDigits": just_the_digits,
|
||||
"AnnualComicID": cla['AnnualComicID']})
|
||||
i+=1
|
||||
|
||||
fc['comiclist'] = fcb
|
||||
is_cnt = myDB.select("SELECT COUNT(*) FROM issues WHERE ComicID=?", [ComicID])
|
||||
iscnt = is_cnt[0][0]
|
||||
#iscnt = rescan['Total']
|
||||
|
||||
havefiles = 0
|
||||
if mylar.CONFIG.ANNUALS_ON:
|
||||
|
@ -1064,10 +1093,13 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
|||
return
|
||||
else:
|
||||
break
|
||||
temploc = tmpfc['JusttheDigits'].replace('_', ' ')
|
||||
|
||||
temploc = re.sub('[\#\']', '', temploc)
|
||||
logger.fdebug(module + ' temploc: ' + temploc)
|
||||
if tmpfc['JusttheDigits'] is not None:
|
||||
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
|
||||
temploc = re.sub('[\#\']', '', temploc)
|
||||
logger.fdebug('temploc: %s' % temploc)
|
||||
else:
|
||||
temploc = None
|
||||
|
||||
if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]):
|
||||
#remove the extension here
|
||||
extensions = ('.cbr', '.cbz', '.cb7')
|
||||
|
|
|
@ -26,6 +26,7 @@ from datetime import timedelta, date
|
|||
import re
|
||||
import json
|
||||
import copy
|
||||
import ntpath
|
||||
|
||||
from mako.template import Template
|
||||
from mako.lookup import TemplateLookup
|
||||
|
@ -179,12 +180,27 @@ class WebInterface(object):
|
|||
else:
|
||||
comicImage = comic['ComicImage']
|
||||
comicpublisher = helpers.publisherImages(comic['ComicPublisher'])
|
||||
|
||||
if comic['Collects'] is not None:
|
||||
issues_list = json.loads(comic['Collects'])
|
||||
else:
|
||||
issues_list = None
|
||||
#logger.info('issues_list: %s' % issues_list)
|
||||
|
||||
if comic['Corrected_Type'] == 'TPB':
|
||||
force_type = 1
|
||||
elif comic['Corrected_Type'] == 'Print':
|
||||
force_type = 2
|
||||
else:
|
||||
force_type = 0
|
||||
|
||||
comicConfig = {
|
||||
"fuzzy_year0": helpers.radio(int(usethefuzzy), 0),
|
||||
"fuzzy_year1": helpers.radio(int(usethefuzzy), 1),
|
||||
"fuzzy_year2": helpers.radio(int(usethefuzzy), 2),
|
||||
"skipped2wanted": helpers.checked(skipped2wanted),
|
||||
"force_continuing": helpers.checked(force_continuing),
|
||||
"force_type": helpers.checked(force_type),
|
||||
"delete_dir": helpers.checked(mylar.CONFIG.DELETE_REMOVE_DIR),
|
||||
"allow_packs": helpers.checked(int(allowpacks)),
|
||||
"corrected_seriesyear": comic['ComicYear'],
|
||||
|
@ -196,6 +212,7 @@ class WebInterface(object):
|
|||
"publisher_image_alt": comicpublisher['publisher_image_alt'],
|
||||
"publisher_imageH": comicpublisher['publisher_imageH'],
|
||||
"publisher_imageW": comicpublisher['publisher_imageW'],
|
||||
"issue_list": issues_list,
|
||||
"ComicImage": comicImage + '?' + datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
|
||||
}
|
||||
|
||||
|
@ -223,6 +240,7 @@ class WebInterface(object):
|
|||
"Int_IssueNumber": ann['Int_IssueNumber'],
|
||||
"IssueName": issuename,
|
||||
"IssueDate": ann['IssueDate'],
|
||||
"DigitalDate": ann['DigitalDate'],
|
||||
"Status": ann['Status'],
|
||||
"Location": ann['Location'],
|
||||
"ComicID": ann['ComicID'],
|
||||
|
@ -574,6 +592,7 @@ class WebInterface(object):
|
|||
st_issueid = str(storyarcid) + "_" + str(random.randint(1000,9999))
|
||||
issnum = arcval['Issue_Number']
|
||||
issdate = str(arcval['Issue_Date'])
|
||||
digitaldate = str(arcval['Digital_Date'])
|
||||
storedate = str(arcval['Store_Date'])
|
||||
|
||||
int_issnum = helpers.issuedigits(issnum)
|
||||
|
@ -603,6 +622,7 @@ class WebInterface(object):
|
|||
"Issue_Number": issnum,
|
||||
"IssueDate": issdate,
|
||||
"ReleaseDate": storedate,
|
||||
"DigitalDate": digitaldate,
|
||||
"ReadingOrder": readingorder, #n +1,
|
||||
"Int_IssueNumber": int_issnum,
|
||||
"Manual": manual_mod})
|
||||
|
@ -644,7 +664,8 @@ class WebInterface(object):
|
|||
"TotalIssues": storyarcissues,
|
||||
"ReadingOrder": AD['ReadingOrder'],
|
||||
"IssueDate": AD['IssueDate'],
|
||||
"ReleaseDate": AD['ReleaseDate'],
|
||||
"ReleaseDate": AD['ReleaseDate'],
|
||||
"DigitalDate": AD['DigitalDate'],
|
||||
"SeriesYear": seriesYear,
|
||||
"IssuePublisher": issuePublisher,
|
||||
"CV_ArcID": arcid,
|
||||
|
@ -657,8 +678,10 @@ class WebInterface(object):
|
|||
logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.')
|
||||
self.ArcWatchlist(storyarcid)
|
||||
if arcrefresh:
|
||||
logger.info('%s Successfully Refreshed %s' % (module, storyarcname))
|
||||
return
|
||||
else:
|
||||
logger.info('%s Successfully Added %s' % (module, storyarcname))
|
||||
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
|
||||
addStoryArc.exposed = True
|
||||
|
||||
|
@ -898,6 +921,8 @@ class WebInterface(object):
|
|||
if comic['ComicName'] is None: ComicName = "None"
|
||||
else: ComicName = comic['ComicName']
|
||||
seriesdir = comic['ComicLocation']
|
||||
seriesyear = comic['ComicYear']
|
||||
seriesvol = comic['ComicVersion']
|
||||
logger.info(u"Deleting all traces of Comic: " + ComicName)
|
||||
myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
|
||||
myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
|
||||
|
@ -912,10 +937,12 @@ class WebInterface(object):
|
|||
shutil.rmtree(seriesdir)
|
||||
except:
|
||||
logger.warn('Unable to remove directory after removing series from Mylar.')
|
||||
else:
|
||||
logger.info('Successfully removed directory: %s' % (seriesdir))
|
||||
else:
|
||||
logger.warn('Unable to remove directory as it does not exist in : ' + seriesdir)
|
||||
myDB.action('DELETE from readlist WHERE ComicID=?', [ComicID])
|
||||
|
||||
logger.info('Successful deletion of %s %s (%s) from your watchlist' % (ComicName, seriesvol, seriesyear))
|
||||
helpers.ComicSort(sequence='update')
|
||||
raise cherrypy.HTTPRedirect("home")
|
||||
deleteSeries.exposed = True
|
||||
|
@ -1352,6 +1379,7 @@ class WebInterface(object):
|
|||
controlValueDict = {"IssueArcID": IssueArcID}
|
||||
newStatus = {"Status": "Wanted"}
|
||||
myDB.upsert("storyarcs", newStatus, controlValueDict)
|
||||
logger.info('[STORY-ARCS] Now Queuing %s (%s) #%s for search' % (ComicName, ComicYear, ComicIssue))
|
||||
s = mylar.SEARCH_QUEUE.put({'issueid': IssueArcID, 'comicname': ComicName, 'seriesyear': ComicYear, 'comicid': ComicID, 'issuenumber': ComicIssue})
|
||||
#foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=Publisher, IssueDate=IssueDate, StoreDate=ReleaseDate, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=dateload['Volume'], SARC=SARC, IssueArcID=IssueArcID)
|
||||
#if foundcom['status'] is True:
|
||||
|
@ -1372,7 +1400,7 @@ class WebInterface(object):
|
|||
except:
|
||||
ComicYear == now.year
|
||||
if Publisher == 'COMICS': Publisher = None
|
||||
logger.info(u"Marking " + ComicName + " " + ComicIssue + " as wanted...")
|
||||
logger.info('Now Queuing %s %s for search' % (ComicName, ComicIssue))
|
||||
s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': ComicYear, 'comicid': ComicID, 'issuenumber': ComicIssue})
|
||||
#foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=Publisher, IssueDate=IssueDate, StoreDate=IssueDate, IssueID=IssueID, ComicID=ComicID, AlternateSearch=None, mode=mode, UseFuzzy=None, ComicVersion=ComicVersion, allow_packs=False, manual=manual)
|
||||
if manual is True:
|
||||
|
@ -1393,6 +1421,7 @@ class WebInterface(object):
|
|||
ComicVersion = cdname['ComicVersion']
|
||||
ComicName = cdname['ComicName']
|
||||
TorrentID_32p = cdname['TorrentID_32P']
|
||||
BookType = cdname['Type']
|
||||
controlValueDict = {"IssueID": IssueID}
|
||||
newStatus = {"Status": "Wanted"}
|
||||
if mode == 'want':
|
||||
|
@ -1438,7 +1467,13 @@ class WebInterface(object):
|
|||
#Publisher = miy['ComicPublisher']
|
||||
#UseAFuzzy = miy['UseFuzzy']
|
||||
#ComicVersion = miy['ComicVersion']
|
||||
s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': SeriesYear, 'comicid': ComicID, 'issuenumber': ComicIssue})
|
||||
if BookType == 'TPB':
|
||||
logger.info('[%s] Now Queueing %s (%s) for search' % (BookType, ComicName, SeriesYear))
|
||||
elif ComicIssue is None:
|
||||
logger.info('Now Queueing %s (%s) for search' % (ComicName, SeriesYear))
|
||||
else:
|
||||
logger.info('Now Queueing %s (%s) #%s for search' % (ComicName, SeriesYear, ComicIssue))
|
||||
s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': SeriesYear, 'comicid': ComicID, 'issuenumber': ComicIssue, 'booktype': BookType})
|
||||
# foundcom, prov = search.search_init(ComicName, ComicIssue, ComicYear, SeriesYear, Publisher, issues['IssueDate'], storedate, IssueID, AlternateSearch, UseAFuzzy, ComicVersion, mode=mode, ComicID=ComicID, manualsearch=manualsearch, filesafe=ComicName_Filesafe, allow_packs=AllowPacks, torrentid_32p=TorrentID_32p)
|
||||
# if foundcom['status'] is True:
|
||||
# # file check to see if issue exists and update 'have' count
|
||||
|
@ -1585,7 +1620,7 @@ class WebInterface(object):
|
|||
return
|
||||
pullSearch.exposed = True
|
||||
|
||||
def pullist(self, week=None, year=None, generateonly=False):
|
||||
def pullist(self, week=None, year=None, generateonly=False, current=None):
|
||||
myDB = db.DBConnection()
|
||||
autowant = []
|
||||
if generateonly is False:
|
||||
|
@ -1600,7 +1635,7 @@ class WebInterface(object):
|
|||
weeklyresults = []
|
||||
wantedcount = 0
|
||||
|
||||
weekinfo = helpers.weekly_info(week, year)
|
||||
weekinfo = helpers.weekly_info(week, year, current)
|
||||
|
||||
popit = myDB.select("SELECT * FROM sqlite_master WHERE name='weekly' and type='table'")
|
||||
if popit:
|
||||
|
@ -1682,7 +1717,8 @@ class WebInterface(object):
|
|||
"HAVEIT": haveit,
|
||||
"LINK": linkit,
|
||||
"HASH": None,
|
||||
"AUTOWANT": False
|
||||
"AUTOWANT": False,
|
||||
"FORMAT": weekly['format']
|
||||
})
|
||||
else:
|
||||
if any(x['ComicName'].lower() == weekly['COMIC'].lower() for x in autowant):
|
||||
|
@ -1698,7 +1734,8 @@ class WebInterface(object):
|
|||
"HAVEIT": haveit,
|
||||
"LINK": linkit,
|
||||
"HASH": None,
|
||||
"AUTOWANT": True
|
||||
"AUTOWANT": True,
|
||||
"FORMAT": weekly['format']
|
||||
})
|
||||
else:
|
||||
weeklyresults.append({
|
||||
|
@ -1713,7 +1750,8 @@ class WebInterface(object):
|
|||
"HAVEIT": haveit,
|
||||
"LINK": linkit,
|
||||
"HASH": None,
|
||||
"AUTOWANT": False
|
||||
"AUTOWANT": False,
|
||||
"FORMAT": weekly['format']
|
||||
})
|
||||
|
||||
if tmp_status == 'Wanted':
|
||||
|
@ -2156,23 +2194,48 @@ class WebInterface(object):
|
|||
|
||||
annualDelete.exposed = True
|
||||
|
||||
def previewRename(self, comicidlist):
|
||||
def previewRename(self, **args): #comicid=None, comicidlist=None):
|
||||
file_format = mylar.CONFIG.FILE_FORMAT
|
||||
myDB = db.DBConnection()
|
||||
resultlist = []
|
||||
for comicid in comicidlist:
|
||||
comic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone()
|
||||
for k,v in args.items():
|
||||
if any([k == 'x', k == 'y']):
|
||||
continue
|
||||
elif 'file_format' in k:
|
||||
file_format = str(v)
|
||||
elif 'comicid' in k:
|
||||
if type(v) is list:
|
||||
comicid = str(' '.join(v))
|
||||
elif type(v) is unicode:
|
||||
comicid = re.sub('[\]\[\']', '', v.decode('utf-8').encode('ascii')).strip()
|
||||
else:
|
||||
comicid = v
|
||||
|
||||
if comicid is not None and type(comicid) is not list:
|
||||
comicidlist = []
|
||||
comicidlist.append(comicid)
|
||||
for cid in comicidlist:
|
||||
comic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [cid]).fetchone()
|
||||
comicdir = comic['ComicLocation']
|
||||
comicname = comic['ComicName']
|
||||
issue = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Location is not None ORDER BY ReleaseDate", [comicid]).fetchone()
|
||||
if 'annual' in issue['Location'].lower():
|
||||
annualize = 'yes'
|
||||
else:
|
||||
annualize = None
|
||||
renameiss = helpers.rename_param(comicid, comicname, issue['Issue_Number'], issue['Location'], comicyear=None, issueid=issue['IssueID'], annualize=annualize)
|
||||
resultlist.append({'original': issue['Location'],
|
||||
'new': renameiss['nfilename']})
|
||||
|
||||
issuelist = myDB.select("SELECT * FROM issues WHERE ComicID=? AND Location is not NULL ORDER BY ReleaseDate", [str(cid)])
|
||||
if issuelist:
|
||||
for issue in issuelist:
|
||||
if 'annual' in issue['Location'].lower():
|
||||
annualize = 'yes'
|
||||
else:
|
||||
annualize = None
|
||||
import filers
|
||||
rniss = filers.FileHandlers(ComicID=str(cid), IssueID=issue['IssueID'])
|
||||
renameiss = rniss.rename_file(issue['Location'], annualize=annualize, file_format=file_format)
|
||||
#renameiss = helpers.rename_param(comicid, comicname, issue['Issue_Number'], issue['Location'], comicyear=None, issueid=issue['IssueID'], annualize=annualize)
|
||||
resultlist.append({'issueid': renameiss['issueid'],
|
||||
'comicid': renameiss['comicid'],
|
||||
'original': issue['Location'],
|
||||
'new': renameiss['nfilename']})
|
||||
|
||||
logger.info('resultlist: %s' % resultlist)
|
||||
return serve_template(templatename="previewrename.html", title="Preview Renamer", resultlist=resultlist, file_format=file_format, comicid=comicidlist)
|
||||
previewRename.exposed = True
|
||||
|
||||
def manualRename(self, comicid):
|
||||
|
@ -2266,6 +2329,10 @@ class WebInterface(object):
|
|||
else:
|
||||
next_run = None
|
||||
if 'rss' in jb['JobName'].lower():
|
||||
if jb['Status'] == 'Waiting' and mylar.CONFIG.ENABLE_RSS is False:
|
||||
mylar.RSS_STATUS = 'Paused'
|
||||
elif jb['Status'] == 'Paused' and mylar.CONFIG.ENABLE_RSS is True:
|
||||
mylar.RSS_STATUS = 'Waiting'
|
||||
status = mylar.RSS_STATUS
|
||||
interval = str(mylar.CONFIG.RSS_CHECKINTERVAL) + ' mins'
|
||||
if 'weekly' in jb['JobName'].lower():
|
||||
|
@ -2285,6 +2352,9 @@ class WebInterface(object):
|
|||
status = mylar.VERSION_STATUS
|
||||
interval = str(mylar.CONFIG.CHECK_GITHUB_INTERVAL) + 'mins'
|
||||
|
||||
if status != jb['Status'] and not('rss' in jb['JobName'].lower()):
|
||||
status = jb['Status']
|
||||
|
||||
tmp.append({'prev_run_datetime': prev_run,
|
||||
'next_run_datetime': next_run,
|
||||
'interval': interval,
|
||||
|
@ -2306,17 +2376,32 @@ class WebInterface(object):
|
|||
if jobid is not None:
|
||||
myDB = db.DBConnection()
|
||||
if mode == 'pause':
|
||||
mylar.SCHED.pause_job(jobid)
|
||||
try:
|
||||
mylar.SCHED.pause_job(jobid)
|
||||
except:
|
||||
pass
|
||||
logger.info('[%s] Paused scheduled runtime.' % job)
|
||||
ctrl = {'JobName': job}
|
||||
val = {'Status': 'Paused'}
|
||||
if jobid == 'rss':
|
||||
mylar.CONFIG.ENABLE_RSS = False
|
||||
elif jobid == 'monitor':
|
||||
mylar.CONFIG.ENABLE_CHECK_FOLDER = False
|
||||
myDB.upsert('jobhistory', val, ctrl)
|
||||
elif mode == 'resume':
|
||||
mylar.SCHED.resume_job(jobid)
|
||||
try:
|
||||
mylar.SCHED.resume_job(jobid)
|
||||
except:
|
||||
pass
|
||||
logger.info('[%s] Resumed scheduled runtime.' % job)
|
||||
ctrl = {'JobName': job}
|
||||
val = {'Status': 'Waiting'}
|
||||
myDB.upsert('jobhistory', val, ctrl)
|
||||
if jobid == 'rss':
|
||||
mylar.CONFIG.ENABLE_RSS = True
|
||||
elif jobid == 'monitor':
|
||||
mylar.CONFIG.ENABLE_CHECK_FOLDER = True
|
||||
|
||||
helpers.job_management()
|
||||
else:
|
||||
logger.warn('%s cannot be matched against any scheduled jobs - maybe you should restart?' % job)
|
||||
|
@ -2329,7 +2414,7 @@ class WebInterface(object):
|
|||
logger.info('[%s] Now force submitting job for jobid %s' % (jb, jobid))
|
||||
if any([jobid == 'rss', jobid == 'weekly', jobid =='search', jobid == 'version', jobid == 'updater', jobid == 'monitor']):
|
||||
jb.modify(next_run_time=datetime.datetime.utcnow())
|
||||
break
|
||||
break
|
||||
schedulerForceCheck.exposed = True
|
||||
|
||||
def manageComics(self):
|
||||
|
@ -2817,6 +2902,9 @@ class WebInterface(object):
|
|||
logger.fdebug('[%s] Issue to renumber sequence from : %s' % (issuearcid, valid_readingorder))
|
||||
reading_seq = 1
|
||||
for rc in sorted(readchk, key=itemgetter('ReadingOrder'), reverse=False):
|
||||
filename = None
|
||||
if rc['Location'] is not None:
|
||||
filename = ntpath.basename(rc['Location'])
|
||||
if str(issuearcid) == str(rc['IssueArcID']):
|
||||
logger.fdebug('new order sequence detected at #: %s' % valid_readingorder)
|
||||
if valid_readingorder > int(rc['ReadingOrder']):
|
||||
|
@ -2839,10 +2927,8 @@ class WebInterface(object):
|
|||
else:
|
||||
#valid_readingorder
|
||||
if valid_readingorder < old_reading_seq:
|
||||
logger.info('2')
|
||||
reading_seq = int(rc['ReadingOrder'])
|
||||
else:
|
||||
logger.info('3')
|
||||
reading_seq = oldreading_seq +1
|
||||
logger.fdebug('old sequence discovered at %s to %s' % (oldreading_seq, reading_seq))
|
||||
oldreading_seq = None
|
||||
|
@ -2855,7 +2941,8 @@ class WebInterface(object):
|
|||
logger.fdebug('reordering existing sequence as lower sequence has changed. Altering from %s to %s' % (rc['ReadingOrder'], reading_seq))
|
||||
new_readorder.append({'IssueArcID': IssueArcID,
|
||||
'IssueID': issueid,
|
||||
'ReadingOrder': reading_seq})
|
||||
'ReadingOrder': reading_seq,
|
||||
'filename': filename})
|
||||
|
||||
#we resequence in the following way:
|
||||
# everything before the new reading number stays the same
|
||||
|
@ -2866,6 +2953,14 @@ class WebInterface(object):
|
|||
#newrl = 0
|
||||
for rl in sorted(new_readorder, key=itemgetter('ReadingOrder'), reverse=False):
|
||||
|
||||
if rl['filename'] is not None:
|
||||
try:
|
||||
if int(rl['ReadingOrder']) != int(rl['filename'][:rl['filename'].find('-')]) and mylar.CONFIG.READ2FILENAME is True:
|
||||
logger.fdebug('Order-Change: %s TO %s' % (int(rl['filename'][:rl['filename'].find('-')]), int(rl['ReadingOrder'])))
|
||||
logger.fdebug('%s to %s' % (rl['filename'], helpers.renamefile_readingorder(rl['ReadingOrder']) + '-' + rl['filename'][rl['filename'].find('-')+1:]))
|
||||
except:
|
||||
pass
|
||||
|
||||
rl_ctrl = {"IssueID": rl['IssueID'],
|
||||
"IssueArcID": rl['IssueArcID'],
|
||||
"StoryArcID": storyarcid}
|
||||
|
@ -3156,7 +3251,8 @@ class WebInterface(object):
|
|||
logger.info("No Story Arcs to search")
|
||||
else:
|
||||
#cycle through the story arcs here for matches on the watchlist
|
||||
arcdir = helpers.filesafe(ArcWatch[0]['StoryArc'])
|
||||
arcname = ArcWatch[0]['StoryArc']
|
||||
arcdir = helpers.filesafe(arcname)
|
||||
arcpub = ArcWatch[0]['Publisher']
|
||||
if arcpub is None:
|
||||
arcpub = ArcWatch[0]['IssuePublisher']
|
||||
|
@ -3441,6 +3537,8 @@ class WebInterface(object):
|
|||
myDB.upsert("storyarcs", newVal, ctrlVal)
|
||||
logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as " + issue['Status'])
|
||||
|
||||
arcstats = self.storyarc_main(StoryArcID)
|
||||
logger.info('[STORY-ARCS] Completed Missing/Recheck Files for %s [%s / %s]' % (arcname, arcstats['Have'], arcstats['TotalIssues']))
|
||||
return
|
||||
|
||||
ArcWatchlist.exposed = True
|
||||
|
@ -3837,7 +3935,7 @@ class WebInterface(object):
|
|||
return mylar.IMPORT_STATUS
|
||||
Check_ImportStatus.exposed = True
|
||||
|
||||
def comicScan(self, path, scan=0, libraryscan=0, redirect=None, autoadd=0, imp_move=0, imp_rename=0, imp_metadata=0, forcescan=0):
|
||||
def comicScan(self, path, scan=0, libraryscan=0, redirect=None, autoadd=0, imp_move=0, imp_paths=0, imp_rename=0, imp_metadata=0, forcescan=0):
|
||||
import Queue
|
||||
queue = Queue.Queue()
|
||||
|
||||
|
@ -3848,10 +3946,15 @@ class WebInterface(object):
|
|||
# #to handle long paths, let's append the '\\?\' to the path to allow for unicode windows api access
|
||||
# path = "\\\\?\\" + path
|
||||
mylar.CONFIG.COMIC_DIR = path
|
||||
mylar.CONFIG.IMP_MOVE = imp_move
|
||||
mylar.CONFIG.IMP_RENAME = imp_rename
|
||||
mylar.CONFIG.IMP_METADATA = imp_metadata
|
||||
#mylar.config_write()
|
||||
mylar.CONFIG.IMP_MOVE = bool(imp_move)
|
||||
mylar.CONFIG.IMP_RENAME = bool(imp_rename)
|
||||
mylar.CONFIG.IMP_METADATA = bool(imp_metadata)
|
||||
mylar.CONFIG.IMP_PATHS = bool(imp_paths)
|
||||
|
||||
mylar.CONFIG.configure(update=True)
|
||||
# Write the config
|
||||
logger.info('Now updating config...')
|
||||
mylar.CONFIG.writeconfig()
|
||||
|
||||
logger.info('forcescan is: ' + str(forcescan))
|
||||
if mylar.IMPORTLOCK and forcescan == 1:
|
||||
|
@ -4603,6 +4706,7 @@ class WebInterface(object):
|
|||
"extra_torznabs": sorted(mylar.CONFIG.EXTRA_TORZNABS, key=itemgetter(4), reverse=True),
|
||||
"newznab": helpers.checked(mylar.CONFIG.NEWZNAB),
|
||||
"extra_newznabs": sorted(mylar.CONFIG.EXTRA_NEWZNABS, key=itemgetter(5), reverse=True),
|
||||
"enable_ddl": helpers.checked(mylar.CONFIG.ENABLE_DDL),
|
||||
"enable_rss": helpers.checked(mylar.CONFIG.ENABLE_RSS),
|
||||
"rss_checkinterval": mylar.CONFIG.RSS_CHECKINTERVAL,
|
||||
"rss_last": rss_sclast,
|
||||
|
@ -4772,13 +4876,18 @@ class WebInterface(object):
|
|||
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
|
||||
manual_annual_add.exposed = True
|
||||
|
||||
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None, torrentid_32p=None):
|
||||
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, force_type=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None, torrentid_32p=None):
|
||||
myDB = db.DBConnection()
|
||||
chk1 = myDB.selectone('SELECT ComicLocation FROM comics WHERE ComicID=?', [ComicID]).fetchone()
|
||||
if chk1 is None:
|
||||
chk1 = myDB.selectone('SELECT ComicLocation, Corrected_Type FROM comics WHERE ComicID=?', [ComicID]).fetchone()
|
||||
if chk1[0] is None:
|
||||
orig_location = com_location
|
||||
else:
|
||||
orig_location = chk1['ComicLocation']
|
||||
orig_location = chk1[0]
|
||||
if chk1[1] is None:
|
||||
orig_type = None
|
||||
else:
|
||||
orig_type = chk1[1]
|
||||
|
||||
#--- this is for multiple search terms............
|
||||
#--- works, just need to redo search.py to accomodate multiple search terms
|
||||
ffs_alt = []
|
||||
|
@ -4802,7 +4911,7 @@ class WebInterface(object):
|
|||
asearch = str(alt_search)
|
||||
|
||||
controlValueDict = {'ComicID': ComicID}
|
||||
newValues = {"ComicLocation": com_location}
|
||||
newValues = {}
|
||||
if asearch is not None:
|
||||
if re.sub(r'\s', '', asearch) == '':
|
||||
newValues['AlternateSearch'] = "None"
|
||||
|
@ -4834,6 +4943,23 @@ class WebInterface(object):
|
|||
else:
|
||||
newValues['ForceContinuing'] = 1
|
||||
|
||||
if force_type == '1':
|
||||
newValues['Corrected_Type'] = 'TPB'
|
||||
elif force_type == '2':
|
||||
newValues['Corrected_Type'] = 'Print'
|
||||
else:
|
||||
newValues['Corrected_Type'] = None
|
||||
|
||||
if orig_type != force_type:
|
||||
if '$Type' in mylar.CONFIG.FOLDER_FORMAT and com_location == orig_location:
|
||||
#rename folder to accomodate new forced TPB format.
|
||||
import filers
|
||||
x = filers.FileHandlers(ComicID=ComicID)
|
||||
newcom_location = x.folder_create(booktype=newValues['Corrected_Type'])
|
||||
if newcom_location is not None:
|
||||
com_location = newcom_location
|
||||
|
||||
|
||||
if allow_packs is None:
|
||||
newValues['AllowPacks'] = 0
|
||||
else:
|
||||
|
@ -4864,6 +4990,9 @@ class WebInterface(object):
|
|||
if not checkdirectory:
|
||||
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
|
||||
return
|
||||
|
||||
newValues['ComicLocation'] = com_location
|
||||
|
||||
myDB.upsert("comics", newValues, controlValueDict)
|
||||
logger.fdebug('Updated Series options!')
|
||||
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
|
||||
|
@ -4935,7 +5064,7 @@ class WebInterface(object):
|
|||
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
|
||||
'prowl_enabled', 'prowl_onsnatch', 'nma_enabled', 'nma_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
|
||||
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
|
||||
'opds_enable', 'opds_authentication', 'opds_metainfo']
|
||||
'opds_enable', 'opds_authentication', 'opds_metainfo'] #, 'enable_ddl']
|
||||
|
||||
for checked_config in checked_configs:
|
||||
if checked_config not in kwargs:
|
||||
|
@ -5891,9 +6020,9 @@ class WebInterface(object):
|
|||
if sresults is not None:
|
||||
updater.foundsearch(dsr['ComicID'], dsr['IssueID'], mode='series', provider=dsr['tmpprov'], hash=sresults['t_hash'])
|
||||
except:
|
||||
return json.dumps({'result': 'failure'})
|
||||
return False #json.dumps({'result': 'failure'})
|
||||
else:
|
||||
return json.dumps({'result': 'success'})
|
||||
return True #json.dumps({'result': 'success'})
|
||||
|
||||
download_specific_release.exposed = True
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ def pullit(forcecheck=None, weeknumber=None, year=None):
|
|||
except (sqlite3.OperationalError, TypeError), msg:
|
||||
logger.info(u"Error Retrieving weekly pull list - attempting to adjust")
|
||||
myDB.action("DROP TABLE weekly")
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)")
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, format TEXT, rowid INTEGER PRIMARY KEY)")
|
||||
pulldate = '00000000'
|
||||
logger.fdebug(u"Table re-created, trying to populate")
|
||||
else:
|
||||
|
@ -440,7 +440,7 @@ def pullit(forcecheck=None, weeknumber=None, year=None):
|
|||
logger.info(u"Populating the NEW Weekly Pull list into Mylar for week " + str(weeknumber))
|
||||
|
||||
myDB.action("drop table if exists weekly")
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)")
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, format TEXT, rowid INTEGER PRIMARY KEY)")
|
||||
|
||||
csvfile = open(newfl, "rb")
|
||||
creader = csv.reader(csvfile, delimiter='\t')
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import lib.requests as requests
|
||||
import requests
|
||||
from bs4 import BeautifulSoup, UnicodeDammit
|
||||
import urlparse
|
||||
import re
|
||||
|
@ -30,7 +30,7 @@ from mylar import logger, helpers
|
|||
class wwt(object):
|
||||
|
||||
def __init__(self, name, issue):
|
||||
self.url = 'https://worldwidetorrents.me/'
|
||||
self.url = mylar.WWTURL
|
||||
self.query = name + ' ' + str(int(issue)) #'Batman White Knight'
|
||||
logger.info('query set to : %s' % self.query)
|
||||
pass
|
||||
|
|
Loading…
Reference in New Issue