Merge branch 'development'

This commit is contained in:
evilhero 2018-10-20 14:05:34 -04:00
commit 3e12599502
32 changed files with 777 additions and 365 deletions

39
data/css/alphabetSearch.css Executable file
View File

@ -0,0 +1,39 @@
div.alphabet {
position: relative;
display: table;
width: 100%;
margin-bottom: 1em;
}
div.alphabet span {
display: table-cell;
color: #3174c7;
cursor: pointer;
text-align: center;
width: 3.5%
}
div.alphabet span:hover {
text-decoration: underline;
}
div.alphabet span.active {
color: black;
}
div.alphabet span.empty {
color: red;
}
div.alphabetInfo {
display: block;
position: absolute;
background-color: #111;
border-radius: 3px;
color: white;
top: 2em;
height: 1.8em;
padding-top: 0.4em;
text-align: center;
z-index: 1;
}

View File

@ -1,83 +0,0 @@
div.alphabet {
clear:both;
position:relative;
margin:0.5em 0;
}
@media screen and (max-width:963px){
div.alphabet {
text-align:center;
}
}
div.alphabet ul {
display:inline-block;
margin:0;
padding:0;
list-style:none;
}
div.alphabet li {
display:inline-block;
}
div.alphabet a {
display:inline-block;
cursor:pointer;
text-align:center;
text-decoration:none;
box-sizing:content-box;
padding:0.2em 0.1em;
min-width:1.3em;
color:#333 !important;
border:1px solid transparent;
border-radius:2px;
}
div.alphabet a:hover {
color:#FFF !important;
border:1px solid #111;
background-color:#585858;
background:linear-gradient(to bottom, #585858 0%, #111 100%);
}
div.alphabet a:active {
outline:none;
background-color:#2b2b2b;
background:linear-gradient(to bottom, #2b2b2b 0%, #0c0c0c 100%);
box-shadow:inset 0 0 3px #111;
}
div.alphabet a.empty {
color:#888 !important;
}
div.alphabet a.active,
div.alphabet a.active.empty {
color:#333 !important;
border:1px solid #979797;
background-color:#FFF;
background:linear-gradient(to bottom, #fff 0%, #dcdcdc 100%)
}
div.alphabet .alphabet-info-display {
margin-right:0.5em;
}
div.alphabet div.alphabet-info {
position:absolute;
border:1px solid #111;
background-color:#585858;
background:linear-gradient(to bottom, #585858 0%, #111 100%);
border-radius:2px;
color:#FFF;
margin-top:0.2em;
padding:0.2em 0.4em;
text-align:center;
opacity:0;
z-index:9999;
}
tr.alphabet-group, tr.alphabet-group:hover {
background-color:rgba(0,0,0,0.15) !important;
}

View File

@ -100,8 +100,6 @@
%if mylar.DONATEBUTTON:
<a href="config#donate"><img src="interfaces/default/images/donate.png" height="25" width="120"></a>
%endif
<a href="https://twitter.com/mylarcomics" class="twitter-follow-button" data-show-count="false">@mylarcomics</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
</div>
<div id="version">
Version: <em>${mylar.CURRENT_VERSION}</em>

View File

@ -21,7 +21,7 @@
<div id="paddingheader">
<h1 class="clearfix"><img src="interfaces/default/images/icon_gear.png" alt="settings"/>Settings</h1>
</div>
<form action="configUpdate" method="post" class="form" id="configUpdate">
<form action="configUpdate" method="post" id="configUpdate">
<div id="tabs">
<ul>
<li><a href="#tabs-1">Information</a></li>
@ -72,6 +72,7 @@
<legend>Bragging Rights</legend>
<div>
<label><strong># of Series you're watching: </strong> ${comicinfo['COUNT_COMICS']}</br></label>
<label><strong># of Series you're watching that are continuing: </strong> ${comicinfo['CCONTCOUNT']}</br></label>
<label><strong># of Issues you're watching: </strong> ${comicinfo['COUNT_ISSUES']}</br></label>
<label><strong># of Issues you actually have: </strong> ${comicinfo['COUNT_HAVES']}</br></label>
<label><strong> ... total HD-space being used: </strong> ${comicinfo['COUNT_SIZE']}</br></label>
@ -100,6 +101,23 @@
<legend>Branch history</legend>
<div>${config['branch_history']}</div>
</fieldset>
<fieldset>
<div class="row">
<label style="font-size:16px;font-weight:bold;">Provider Usage (${config['dltotals']})</label>
<a href="#" onclick="show_stats();"><span style="vertical-align:bottom;text-align:center;float:right;">Show Stats</span></a>
</div>
<div id="stats" style="display:none">
</br>
%for dl in config['dlstats']:
<%
dlline = '%s: %s snatches' % (dl[0], dl[1])
if dl[0] == 'newznab':
dlline += '<small> (*erroneous collection data)</small>'
%>
${dlline}</br>
%endfor
</div>
</fieldset>
</td>
</tr>
</table>
@ -337,7 +355,7 @@
<label>SABnzbd Password:</label>
<input type="password" name="sab_password" id="sab_password" value="${config['sab_pass']| h}" size="20">
</div>
<div Class="row">
<div class="row">
<div class="populatesab">
<label>SABnzbd API:</label>
<input type="text" name="sab_apikey" id="sab_apikey" value="${config['sab_api']}" size="28">
@ -629,7 +647,7 @@
<div class="row">
<label>Deluge Host:Port </label>
<input type="text" name="deluge_host" value="${config['deluge_host']}" size="30">
<small>port uses the deluge daemon port (remote connection to daemon has to be enabled)</small>
<small>(ie. 192.168.1.2:58846) port uses the deluge daemon port (remote connection to daemon has to be enabled)</small>
</div>
<div class="row">
<label>Deluge Username</label>
@ -1447,10 +1465,10 @@
<input type="button" value="Save Changes" onclick="doAjaxCall('configUpdate',$(this),'tabs',true);return false;" data-success="Changes saved successfully">
<div class="message">
<p><span class="ui-icon ui-icon-info" style="float: left; margin-right: .3em;"></span>Web Interface changes require a restart to take effect</p>
</div>
</div>
</form>
</div>
</div>
</form>
</%def>
<%def name="javascriptIncludes()">
@ -1481,6 +1499,15 @@
document.getElementById("auth_options").style.display = "none";
}
}
function show_stats()
{
var x = document.getElementById("stats");
if (x.style.display === "none") {
x.style.display = "block";
} else {
x.style.display = "none";
}
}
</script>
<script>
function initThisPage()

View File

@ -357,6 +357,19 @@ table.display tr.even.gradeF {
background-color: #FF5858;
}
table.display tr.odd.gradeT1 {
background-color: #FFDDDD;
}
table.display tr.even.gradeT1 {
background-color: #FFDDDD;
}
table.display tr.odd.gradeT2 {
background-color: #FFDDAA;
}
table.display tr.even.gradeT2 {
background-color: #FFDDAA;
}
table.display tr.gradeL #status {
background: url("../images/loader_black.gif") no-repeat scroll 15px center transparent;
font-size: 11px;
@ -373,6 +386,8 @@ table.display tr.gradeP td,
table.display tr.gradeD td,
table.display tr.gradeT td,
table.display tr.gradeF td,
table.display tr.gradeT1 td,
table.display tr.gradeT2 td,
table.display tr.gradeZ td {border-bottom: 1px solid #FFF;}
table.display tr:last-child td {
border-bottom: 1px solid #eee;
@ -478,6 +493,23 @@ table.display_no_select tr.odd.gradeZ {
table.display_no_select tr.even.gradeZ {
background-color: white;
}
table.display_no_select tr.odd.gradeT1 {
background-color: #FFDDDD;
}
table.display_no_select tr.even.gradeT1 {
background-color: white;
}
table.display_no_select tr.odd.gradeT2 {
background-color: #FFDDAA;
}
table.display_no_select tr.even.gradeT2 {
background-color: white;
}
table.display_no_select tr.gradeL #status {
background: url("../images/loader_black.gif") no-repeat scroll 15px center transparent;
font-size: 11px;
@ -494,6 +526,8 @@ table.display_no_select tr.gradeP td,
table.display_no_select tr.gradeD td,
table.display_no_select tr.gradeT td,
table.display_no_select tr.gradeF td,
table.display_no_select tr.gradeT1 td,
table.display_no_select tr.gradeT2 td,
table.display_no_select tr.gradeZ td {border-bottom: 1px solid #FFF;}
table.display_no_select tr:last-child td {
border-bottom: 1px solid #eee;

View File

@ -1960,6 +1960,7 @@ DIV.progress-container > DIV
}
#upcoming_table th#type,
#wanted_table th#type,
#wanted_table th#tier,
#searchresults_table th#score {
min-width: 75px;
text-align: center;
@ -2010,6 +2011,7 @@ DIV.progress-container > DIV
}
#upcoming_table td#type,
#wanted_table td#type,
#wanted_table td#tier,
#searchresults_table td#score {
min-width: 75px;
text-align: center;

View File

@ -101,22 +101,22 @@
<%def name="headIncludes()">
<link rel="stylesheet" href="interfaces/default/css/data_table.css">
// <link type="text/css" href="css/dataTables.alphabetSearch.css" rel="stylesheet">
<link rel="stylesheet" href="css/alphabetSearch.css">
</%def>
<%def name="javascriptIncludes()">
<script src="js/libs/jquery.dataTables.min.js"></script>
<script src="js/libs/full_numbers_no_ellipses.js"></script>
// <script src="js/dataTables.alphabetSearch.min.js"></script>
<script src="js/alphabetSearch.js"></script>
<script>
function initThisPage() {
$.fn.DataTable.ext.pager.numbers_length = 3;
// var table = $('#series_table').dataTable( {
$('#series_table').dataTable( {
"destroy": true,
"sDom": '<"clear"f><"clear"lp><"clear">rt<"clear"ip>',
// "sDom": '<"clear"Af><"clear"lp><"clear">rt<"clear"ip>',
var alphaindex = "${alphaindex}";
if (alphaindex == "True") {
var table = $('#series_table').dataTable( {
"destroy": true,
"sDom": '<"clear"Af><"clear"lp><"clear">rt<"clear"ip>',
"columnDefs": [
{ "orderable": false, "targets": [5, 7, 10] },
{ "visible": false, "targets": [5, 7, 10] },
@ -133,15 +133,44 @@
"info":"Showing _START_ to _END_ of _TOTAL_ results",
"infoEmpty":"Showing 0 to 0 of 0 results",
"infoFiltered":"(filtered from _MAX_ total results)",
"search" : ""},
"search" : ""
},
"stateSave": true,
"stateDuration": 0,
"pageLength": 25,
"pagingType": "simple_numbers"
// alphabetSearch: {
// column:1
// }
});
resetFilters("comic");
"pagingType": "simple_numbers",
alphabetSearch: { column:1 }
});
} else {
var table = $('#series_table').dataTable( {
"destroy": true,
"sDom": '<"clear"f><"clear"lp><"clear">rt<"clear"ip>',
"columnDefs": [
{ "orderable": false, "targets": [5, 7, 10] },
{ "visible": false, "targets": [5, 7, 10] },
{ "type": 'num', "targets": 5 },
{ "type": 'num', "targets": 7 },
{ "orderData": [ 5, 7 ], "targets": 6 },
{ "orderData": 10, "targets": 9 },
{ "order": [[7, 'asc'],[1, 'asc']] }
],
"lengthMenu": [[10, 15, 25, 50, 100, 200, -1], [10, 15, 25, 50, 100, 200, 'All' ]],
"language": {
"lengthMenu":"Show _MENU_ results per page",
"emptyTable": "No results",
"info":"Showing _START_ to _END_ of _TOTAL_ results",
"infoEmpty":"Showing 0 to 0 of 0 results",
"infoFiltered":"(filtered from _MAX_ total results)",
"search" : ""
},
"stateSave": true,
"stateDuration": 0,
"pageLength": 25,
"pagingType": "simple_numbers",
alphabetSearch: { column: 1 }
});
}
resetFilters("comic");
}
$(document).ready(function(){

View File

@ -21,6 +21,7 @@
<div id="checkboxControls" style="float: right; vertical-align: middle; margin: 5px 3px 3px 3px;">
<div style="padding-bottom: 5px;">
<label for="Wanted" class="checkbox inline Wanted"><input type="checkbox" id="Wanted" checked="checked" /> Wanted: <b>${isCounts['Wanted']}</b></label>
<label for="WantedTier" class="checkbox inline WantedTier">Tiered: <b>${isCounts['WantedTier']}</b></label>
%if mylar.CONFIG.UPCOMING_SNATCHED is True:
%if int(isCounts['Snatched']) > 0:
<label for="Snatched" class="checkbox inline Snatched"><input type="checkbox" id="Snatched" checked="checked" /> Snatched: <b>${isCounts['Snatched']}</b></label>
@ -46,7 +47,7 @@
</select>
<input type="hidden" value="Go">
</div>
<small style="float: right; vertical-align: middle;">Date shown in SearchTier is when the issue was added to the Wanted list</small>
<table class="display" id="wanted_table">
<thead>
<tr>
@ -54,6 +55,7 @@
<th id="comicname">Comic</th>
<th id="issuenumber">Issue</th>
<th id="reldate">Release Date</th>
<th id="tier">SearchTier</th>
<th id="options">Options</th>
</tr>
</thead>
@ -62,12 +64,25 @@
<%
if issue['Status'] == 'Wanted':
grade = 'X'
try:
if issue['DateAdded'] <= mylar.SEARCH_TIER_DATE:
tier = "2nd"
grade = 'T2'
else:
tier = "1st [%s]" % issue['DateAdded']
grade = 'X'
except:
tier = "1st [%s]" % issue['DateAdded']
grade = 'T2'
elif issue['Status'] == 'Snatched':
grade = 'C'
elif issue['Status'] == 'Failed':
grade = 'F'
else:
grade = 'Z'
%>
<tr class="${issue['Status']} grade${grade}">
@ -96,6 +111,15 @@
else:
adjcomicname = issue['ComicName']
endif
try:
if issue['DateAdded'] <= mylar.SEARCH_TIER_DATE:
tier = "2nd"
else:
tier = "1st [%s]" % issue['DateAdded']
except:
tier = "1st [%s]" % issue['DateAdded']
%>
<td id="select"><input type="checkbox" name="${issueid}" class="checkbox" value="${issueid}"/></td>
<td id="comicname">
@ -107,6 +131,11 @@
</td>
<td id="issuenumber">${issuenumber}</td>
<td id="reldate">${issue['IssueDate']}</td>
%if issue['Status'] == 'Wanted':
<td id="tier" style="text-align:center;">${tier}</td>
%else:
<td id="tier"></td>
%endif
<td id="options">
<!--
<a class="menu_link_edit" id="choose_specific_download" title="Choose Specific Download" href="javascript:void(0)" onclick="getAvailableDownloads('${issueid}')"><i class="fa fa-search"></i><img src="interfaces/default/images/magnifier.png" height="25" width="25" class="highqual" /></a>
@ -131,7 +160,6 @@
%endfor
</tbody>
</table>
</form>
</div>

176
data/js/alphabetSearch.js Normal file
View File

@ -0,0 +1,176 @@
/*! AlphabetSearch for DataTables v1.0.0
* 2014 SpryMedia Ltd - datatables.net/license
*/
/**
* @summary AlphabetSearch
* @description Show an alphabet aloneside a table providing search input options
* See http://datatables.net/blog/2014-09-22 for details
* @version 1.0.0
* @file dataTables.alphabetSearch.js
* @author SpryMedia Ltd (www.sprymedia.co.uk)
* @contact www.sprymedia.co.uk/contact
* @copyright Copyright 2014 SpryMedia Ltd.
*
* This source file is free software, available under the following license:
* MIT license - http://datatables.net/license/mit
*
* This source file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.
*
* For details please refer to: http://www.datatables.net
*/
(function(){
// Search function
$.fn.dataTable.Api.register( 'alphabetSearch()', function ( searchTerm ) {
this.iterator( 'table', function ( context ) {
context.alphabetSearch = searchTerm;
} );
return this;
} );
// Recalculate the alphabet display for updated data
$.fn.dataTable.Api.register( 'alphabetSearch.recalc()', function ( searchTerm ) {
this.iterator( 'table', function ( context ) {
draw(
new $.fn.dataTable.Api( context ),
$('div.alphabet', this.table().container())
);
} );
return this;
} );
// Search plug-in
$.fn.dataTable.ext.search.push( function ( context, searchData ) {
// Ensure that there is a search applied to this table before running it
if ( ! context.alphabetSearch ) {
return true;
}
if ( context.alphabetSearch.match('nonalpha') && !(searchData[1].charAt(0).match(/^[a-zA-Z]/)) ) {
return true;
}
if ( searchData[1].charAt(0) === context.alphabetSearch ) {
return true;
}
return false;
} );
// Private support methods
function bin ( data ) {
var letter, bins = {};
var nonalpha = 0;
bins['nonalpha'] = 0;
for ( var i=0, ien=data.length ; i<ien ; i++ ) {
letter = data[i].charAt(13).toUpperCase();
if ( !letter.match(/^[A-Z]/) ) {
bins['nonalpha']++;
}
else if ( bins[letter] ) {
bins[letter]++;
}
else {
bins[letter] = 1;
}
}
return bins;
}
function draw ( table, alphabet )
{
alphabet.empty();
alphabet.append( 'Search: ' );
var columnData = table.column(1).data();
var bins = bin( columnData );
$('<span class="clear active"/>')
.data( 'letter', '' )
.data( 'match-count', columnData.length )
.html( 'None' )
.appendTo( alphabet );
$('<span class="clear active"/>')
.data( 'letter', 'nonalpha' )
.data( 'match-count', bins['nonalpha'] || 0 )
.html( '0-9' )
.appendTo( alphabet );
for ( var i=0 ; i<26 ; i++ ) {
var letter = String.fromCharCode( 65 + i );
$('<span/>')
.data( 'letter', letter )
.data( 'match-count', bins[letter] || 0 )
.addClass( ! bins[letter] ? 'empty' : '' )
.html( letter )
.appendTo( alphabet );
}
$('<div class="alphabetInfo"></div>')
.appendTo( alphabet );
}
$.fn.dataTable.AlphabetSearch = function ( context ) {
var table = new $.fn.dataTable.Api( context );
var alphabet = $('<div class="alphabet"/>');
draw( table, alphabet );
// Trigger a search
alphabet.on( 'click', 'span', function () {
alphabet.find( '.active' ).removeClass( 'active' );
$(this).addClass( 'active' );
table
.alphabetSearch( $(this).data('letter') )
.draw();
} );
// Mouse events to show helper information
alphabet
.on( 'mouseenter', 'span', function () {
alphabet
.find('div.alphabetInfo')
.css( {
opacity: 1,
left: $(this).position().left,
width: $(this).width()
} )
.html( $(this).data('match-count') );
} )
.on( 'mouseleave', 'span', function () {
alphabet
.find('div.alphabetInfo')
.css('opacity', 0);
} );
// API method to get the alphabet container node
this.node = function () {
return alphabet;
};
};
$.fn.DataTable.AlphabetSearch = $.fn.dataTable.AlphabetSearch;
// Register a search plug-in
$.fn.dataTable.ext.feature.push( {
fnInit: function ( settings ) {
var search = new $.fn.dataTable.AlphabetSearch( settings );
return search.node();
},
cFeature: 'A'
} );
}());

View File

@ -1,94 +0,0 @@
/*! AlphabetSearch for DataTables v1.2.4
* 2014 SpryMedia Ltd - datatables.net/license
* Gyrocode - MIT License
*/
(function() { $.fn.dataTable.Api.register("alphabetSearch()", function(searchTerm) { this.iterator("table", function(context) { context.alphabetSearch.letter = searchTerm; }); return this; });
$.fn.dataTable.Api.register("alphabetSearch.recalc()", function() { this.iterator("table", function(context) { draw(new $.fn.dataTable.Api(context), $("div.alphabet", this.table()
.container()), context); }); return this; });
$.fn.dataTable.ext.search.push(function(context, searchData) { if (!context.hasOwnProperty("alphabetSearch")) { return true; } if (!context.alphabetSearch.letterSearch) { return true; } var letter = searchData[context.alphabetSearch.column].toString()
.replace(/<.*?>/g, "")
.charAt(0)
.toUpperCase(); if (context.alphabetSearch.letterSearch !== "#") { if (letter === context.alphabetSearch.letterSearch) { return true; } } else { if (/[^a-zA-Z]/.test(letter)) { return true; } } return false; });
$.fn.dataTable.ext.order["alphabetSearch"] = function(context, col) { var order_col = this.api()
.order()[0][0]; var order_method = this.api()
.order()[0][1]; if (order_col !== context.alphabetSearch.column) { context.alphabetSearch.pass = 0; } var data = this.api()
.column(col, { order: "index" })
.data()
.map(function(value, index) { var letter = value.replace(/<.*?>/g, "")
.charAt(0)
.toUpperCase(); return (order_col === context.alphabetSearch.column) ? ((!context.alphabetSearch.pass) ? "" : ((order_method === "asc") ? letter : String.fromCharCode(65535 - letter.charCodeAt(0)))) : letter; }); if (order_col === context.alphabetSearch.column) { if (!context.alphabetSearchPass) { context.alphabetSearch.pass = 0; } context.alphabetSearch.pass = (context.alphabetSearch.pass + 1) % 2; } return data; };
function bin(data) { var letter, bins = {}; for (var i = 0, ien = data.length; i < ien; i++) { letter = data[i].toString()
.replace(/<.*?>/g, "")
.charAt(0)
.toUpperCase(); if (/[^a-zA-Z]/.test(letter)) { letter = "#"; } if (bins[letter]) { bins[letter]++; } else { bins[letter] = 1; } } return bins; }
function draw(table, alphabet, context) { alphabet.empty(); if (context.oLanguage.alphabetSearch.infoDisplay !== "") { $('<span class="alphabet-info-display"></span>')
.html(context.oLanguage.alphabetSearch.infoDisplay)
.appendTo(alphabet); } var columnData = table.column(context.alphabetSearch.column, { search: "applied" })
.data(); var bins = bin(columnData); var alphabetList = $("<ul/>");
$("<a/>")
.attr("href", "javascript:;")
.data("letter", "")
.data("match-count", columnData.length)
.addClass(((!context.alphabetSearch.letter) ? "active" : ""))
.html("<span>" + context.oLanguage.alphabetSearch.infoAll + "</span>")
.wrap("<li/>")
.parent()
.appendTo(alphabetList); for (var i = 0; i < context.oLanguage.alphabetSearch.alphabet.length; i++) { var letter = context.oLanguage.alphabetSearch.alphabet[i];
$("<a/>")
.attr("href", "javascript:;")
.data("letter", letter)
.data("match-count", bins[letter] || 0)
.addClass((!bins[letter] ? "empty" : "") + ((context.alphabetSearch.letter === letter) ? " active" : ""))
.html("<span>" + letter + "</span>")
.wrap("<li/>")
.parent()
.appendTo(alphabetList); } alphabetList.appendTo(alphabet);
$('<div class="alphabet-info"></div>')
.appendTo(alphabet); if (context.alphabetSearch.letter) { context.alphabetSearch.letterSearch = context.alphabetSearch.letter;
table.draw();
context.alphabetSearch.letterSearch = ""; } table.one("search", function(e, context) { var api = new $.fn.dataTable.Api(context);
api.alphabetSearch.recalc(); }); } $.fn.dataTable.AlphabetSearch = function(context) { var table = new $.fn.dataTable.Api(context); var alphabet = $('<div class="alphabet"/>');
context.oLanguage.alphabetSearch = $.extend({ "alphabet": "#ABCDEFGHIJKLMNOPQRSTUVWXYZ", "infoDisplay": "Display:", "infoAll": "All" }, ((context.oLanguage.alphabetSearch) ? context.oLanguage.alphabetSearch : {}));
context.oLanguage.alphabetSearch.alphabet.toUpperCase();
context.alphabetSearch = $.extend({ column: 0 }, $.isPlainObject(context.oInit.alphabetSearch) ? context.oInit.alphabetSearch : {}, { letter: "", letterSearch: "", pass: 0 }); if (context.alphabetSearch.column >= 0 && context.alphabetSearch.column < context.aoColumns.length) { context.aoColumns[context.alphabetSearch.column].sSortDataType = "alphabetSearch"; } if (context.hasOwnProperty("aaSortingFixed") && typeof context.aaSortingFixed === "object") { if ($.isArray(context.aaSortingFixed)) { if (context.aaSortingFixed.length && !$.isArray(context.aaSortingFixed[0])) { context.aaSortingFixed = [
[context.alphabetSearch.column, "asc"], context.aaSortingFixed
]; } else { context.aaSortingFixed.unshift([context.alphabetSearch.column, "asc"]); } } else { if (!context.aaSortingFixed.hasOwnProperty("pre")) { context.aaSortingFixed.pre = []; } if (context.aaSortingFixed.pre.length && !$.isArray(context.aaSortingFixed.pre[0])) { context.aaSortingFixed.pre = [
[context.alphabetSearch.column, "asc"], context.aaSortingFixed.pre
]; } else { context.aaSortingFixed.pre.unshift([context.alphabetSearch.column, "asc"]); } } } else { context.aaSortingFixed = [context.alphabetSearch.column, "asc"]; } draw(table, alphabet, context);
alphabet.on("click", "a", function(e) { e.preventDefault();
alphabet.find(".active")
.removeClass("active");
$(this)
.addClass("active");
table.alphabetSearch($(this)
.data("letter"))
.draw(); });
alphabet.on("mouseenter", "a", function() { var $el = $(this); var el_pos = $el.position(); var $alphabet_info = $(".alphabet-info", alphabet);
$alphabet_info.html($el.data("match-count"));
$alphabet_info.css({ opacity: 1, left: el_pos.left + Math.round(($el.outerWidth() - $alphabet_info.outerWidth()) / 2), top: $(this)
.position()
.top + $el.outerHeight() }); })
.on("mouseleave", "a", function() { alphabet.find("div.alphabet-info")
.css("opacity", 0); });
table.on("draw", function(e, context) { var api = new $.fn.dataTable.Api(context); var col_total = api.columns()
.nodes()
.length; var rows = api.rows({ page: "current" })
.nodes(); var group_last = null;
api.column(context.alphabetSearch.column, { page: "current" })
.data()
.each(function(name, index) { var group = name.replace(/<.*?>/g, "")
.charAt(0)
.toUpperCase(); if (group_last !== group) { $(rows)
.eq(index)
.before('<tr class="alphabet-group" style="display:none;"><td colspan="' + col_total + '">' + group + "</td></tr>");
group_last = group; } }); if (!rows.length && context.alphabetSearch) { var letter = context.alphabetSearch.letter;
$(api.table()
.body())
.prepend('<tr class="alphabet-group" style="display:none;"><td colspan="' + col_total + '">' + letter + "</td></tr>"); } });
this.node = function() { return alphabet; }; };
$.fn.DataTable.AlphabetSearch = $.fn.dataTable.AlphabetSearch;
$.fn.dataTable.ext.feature.push({ fnInit: function(settings) { var search = new $.fn.dataTable.AlphabetSearch(settings); return search.node(); }, cFeature: "A" }); }());

View File

@ -1,3 +1,4 @@
#!/usr/bin/env
# Script name
NAME=mylar

View File

@ -3,7 +3,7 @@ Instructions on setting up mylar as a systemd serivce that will run on startup/v
1 - copy the mylar.service to /lib/systemd/system/mylar.service
2 - create a symbolic link to it: ln -s /lib/systemd/system/mylar.service /etc/systemd/system/mylar.service
3 - copy mylar.default to /etc/default/mylar (make sure it's renamed from mylar.default to just mylar)
4 - copy mylar.nitd to /etc/init.d/mylar (rename it to just mylar) and then 'sudo chmod +x /etc/init.d/mylar'
4 - copy mylar.initd to /etc/init.d/mylar (rename it to just mylar) and then 'sudo chmod +x /etc/init.d/mylar'
5 - edit the /etc/default/mylar file to your defaults (make sure to set MYLAR_USER & MYLAR_HOME as they're required)
6 - make systemd aware of new services: sudo sytemctl daemon-reload
7 - sudo systemctl enable mylar

21
lib/cfscrape/LICENSE Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Anorov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,55 +1,80 @@
from time import sleep
import logging
import random
import re
from requests.sessions import Session
import js2py
import subprocess
from copy import deepcopy
from time import sleep
from requests.sessions import Session
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
__version__ = "1.9.5"
DEFAULT_USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/65.0.3325.181 Chrome/65.0.3325.181 Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; Moto G (5) Build/NPPS25.137-93-8) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
]
DEFAULT_USER_AGENT = random.choice(DEFAULT_USER_AGENTS)
BUG_REPORT = """\
Cloudflare may have changed their technique, or there may be a bug in the script.
Please read https://github.com/Anorov/cloudflare-scrape#updates, then file a \
bug report at https://github.com/Anorov/cloudflare-scrape/issues."\
"""
ANSWER_ACCEPT_ERROR = """\
The challenge answer was not properly accepted by Cloudflare. This can occur if \
the target website is under heavy load, or if Cloudflare is experiencing issues. You can
potentially resolve this by increasing the challenge answer delay (default: 8 seconds). \
For example: cfscrape.create_scraper(delay=15)
If increasing the delay does not help, please open a GitHub issue at \
https://github.com/Anorov/cloudflare-scrape/issues\
"""
class CloudflareScraper(Session):
def __init__(self, *args, **kwargs):
self.delay = kwargs.pop("delay", 8)
super(CloudflareScraper, self).__init__(*args, **kwargs)
if "requests" in self.headers["User-Agent"]:
# Spoof Firefox on Linux if no custom User-Agent has been set
# Set a random User-Agent if no custom User-Agent has been set
self.headers["User-Agent"] = DEFAULT_USER_AGENT
def is_cloudflare_challenge(self, resp):
return (
resp.status_code == 503
and resp.headers.get("Server", "").startswith("cloudflare")
and b"jschl_vc" in resp.content
and b"jschl_answer" in resp.content
)
def request(self, method, url, *args, **kwargs):
resp = super(CloudflareScraper, self).request(method, url, *args, **kwargs)
# Check if Cloudflare anti-bot is on
if ( resp.status_code == 503
and resp.headers.get("Server") == "cloudflare-nginx"
and b"jschl_vc" in resp.content
and b"jschl_answer" in resp.content
):
return self.solve_cf_challenge(resp, **kwargs)
if self.is_cloudflare_challenge(resp):
resp = self.solve_cf_challenge(resp, **kwargs)
# Otherwise, no Cloudflare anti-bot detected
return resp
def solve_cf_challenge(self, resp, **original_kwargs):
sleep(5) # Cloudflare requires a delay before solving the challenge
sleep(self.delay) # Cloudflare requires a delay before solving the challenge
body = resp.text
parsed_url = urlparse(resp.url)
domain = urlparse(resp.url).netloc
domain = parsed_url.netloc
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain)
cloudflare_kwargs = deepcopy(original_kwargs)
@ -61,23 +86,15 @@ class CloudflareScraper(Session):
params["jschl_vc"] = re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)
params["pass"] = re.search(r'name="pass" value="(.+?)"', body).group(1)
# Extract the arithmetic operation
js = self.extract_js(body)
except Exception:
except Exception as e:
# Something is wrong with the page.
# This may indicate Cloudflare has changed their anti-bot
# technique. If you see this and are running the latest version,
# please open a GitHub issue so I can update the code accordingly.
logging.error("[!] Unable to parse Cloudflare anti-bots page. "
"Try upgrading cloudflare-scrape, or submit a bug report "
"if you are running the latest version. Please read "
"https://github.com/Anorov/cloudflare-scrape#updates "
"before submitting a bug report.")
raise
raise ValueError("Unable to parse Cloudflare anti-bots page: %s %s" % (e.message, BUG_REPORT))
# Safely evaluate the Javascript expression
params["jschl_answer"] = str(int(js2py.eval_js(js)) + len(domain))
# Solve the Javascript challenge
params["jschl_answer"] = self.solve_challenge(body, domain)
# Requests transforms any request into a GET after a redirect,
# so the redirect has to be handled manually here to allow for
@ -85,26 +102,58 @@ class CloudflareScraper(Session):
method = resp.request.method
cloudflare_kwargs["allow_redirects"] = False
redirect = self.request(method, submit_url, **cloudflare_kwargs)
redirect_location = urlparse(redirect.headers["Location"])
if not redirect_location.netloc:
redirect_url = "%s://%s%s" % (parsed_url.scheme, domain, redirect_location.path)
return self.request(method, redirect_url, **original_kwargs)
return self.request(method, redirect.headers["Location"], **original_kwargs)
def extract_js(self, body):
js = re.search(r"setTimeout\(function\(\){\s+(var "
def solve_challenge(self, body, domain):
try:
js = re.search(r"setTimeout\(function\(\){\s+(var "
"s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n", body).group(1)
js = re.sub(r"a\.value = (parseInt\(.+?\)).+", r"\1", js)
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js)
except Exception:
raise ValueError("Unable to identify Cloudflare IUAM Javascript on website. %s" % BUG_REPORT)
js = re.sub(r"a\.value = (.+ \+ t\.length).+", r"\1", js)
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js).replace("t.length", str(len(domain)))
# Strip characters that could be used to exit the string context
# These characters are not currently used in Cloudflare's arithmetic snippet
js = re.sub(r"[\n\\']", "", js)
return js
if "toFixed" not in js:
raise ValueError("Error parsing Cloudflare IUAM Javascript challenge. %s" % BUG_REPORT)
# Use vm.runInNewContext to safely evaluate code
# The sandboxed code cannot use the Node.js standard library
js = "console.log(require('vm').runInNewContext('%s', Object.create(null), {timeout: 5000}));" % js
try:
result = subprocess.check_output(["node", "-e", js]).strip()
except OSError as e:
if e.errno == 2:
raise EnvironmentError("Missing Node.js runtime. Node is required and must be in the PATH (check with `node -v`). Your Node binary may be called `nodejs` rather than `node`, in which case you may need to run `apt-get install nodejs-legacy` on some Debian-based systems. (Please read the cfscrape"
" README's Dependencies section: https://github.com/Anorov/cloudflare-scrape#dependencies.")
raise
except Exception:
logging.error("Error executing Cloudflare IUAM Javascript. %s" % BUG_REPORT)
raise
try:
float(result)
except Exception:
raise ValueError("Cloudflare IUAM challenge returned unexpected answer. %s" % BUG_REPORT)
return result
@classmethod
def create_scraper(cls, sess=None, **kwargs):
"""
Convenience function for creating a ready-to-go requests.Session (subclass) object.
Convenience function for creating a ready-to-go CloudflareScraper object.
"""
scraper = cls()
scraper = cls(**kwargs)
if sess:
attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"]
@ -125,7 +174,7 @@ class CloudflareScraper(Session):
scraper.headers["User-Agent"] = user_agent
try:
resp = scraper.get(url)
resp = scraper.get(url, **kwargs)
resp.raise_for_status()
except Exception as e:
logging.error("'%s' returned an error. Could not collect tokens." % url)
@ -153,9 +202,9 @@ class CloudflareScraper(Session):
"""
Convenience function for building a Cookie HTTP header value.
"""
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent)
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent, **kwargs)
return "; ".join("=".join(pair) for pair in tokens.items()), user_agent
create_scraper = CloudflareScraper.create_scraper
get_tokens = CloudflareScraper.get_tokens
get_cookie_string = CloudflareScraper.get_cookie_string
get_cookie_string = CloudflareScraper.get_cookie_string

View File

@ -45,7 +45,7 @@ def ctmain():
if opts.cv_api_key:
if opts.cv_api_key != settings.cv_api_key:
settings.cv_api_key = opts.cv_api_key
settings.save()
#settings.save()
if opts.only_set_key:
print("Key set")
return

View File

@ -168,16 +168,16 @@ class ComicTaggerSettings:
# see if it's in the path of unix user
if utils.which("rar") is not None:
self.rar_exe_path = utils.which("rar")
if self.rar_exe_path != "":
self.save()
#if self.rar_exe_path != "":
# self.save()
if self.unrar_exe_path == "":
if platform.system() != "Windows":
# see if it's in the path of unix user
if utils.which("unrar") is not None:
self.unrar_exe_path = utils.which("unrar")
if self.unrar_exe_path != "":
self.save()
#if self.unrar_exe_path != "":
# self.save()
# make sure unrar/rar programs are now in the path for the UnRAR class to
# use

View File

@ -542,6 +542,7 @@ class PostProcessor(object):
temploc= watchmatch['justthedigits'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
logger.info('temploc: %s' % temploc)
datematch = "False"
if any(['annual' in temploc.lower(), 'special' in temploc.lower()]) and mylar.CONFIG.ANNUALS_ON is True:
biannchk = re.sub('-', '', temploc.lower()).strip()
@ -557,6 +558,7 @@ class PostProcessor(object):
annchk = "yes"
issuechk = myDB.select("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
else:
annchk = "no"
fcdigit = helpers.issuedigits(temploc)
issuechk = myDB.select("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'], fcdigit])
@ -671,12 +673,18 @@ class PostProcessor(object):
clocation = watchmatch['comiclocation']
else:
clocation = os.path.join(watchmatch['comiclocation'],helpers.conversion(watchmatch['comicfilename']))
if 'Annual' in isc['ComicName']:
annualtype = 'Annual'
elif 'Special' in isc['ComicName']:
annualtype = 'Special'
annualtype = None
if annchk == 'yes':
if 'Annual' in isc['ReleaseComicName']:
annualtype = 'Annual'
elif 'Special' in isc['ReleaseComicName']:
annualtype = 'Special'
else:
annualtype = None
if 'Annual' in isc['ComicName']:
annualtype = 'Annual'
elif 'Special' in isc['ComicName']:
annualtype = 'Special'
manual_list.append({"ComicLocation": clocation,
"ComicID": cs['ComicID'],
"IssueID": isc['IssueID'],
@ -694,28 +702,46 @@ class PostProcessor(object):
logger.fdebug(module + '[NON-MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Incorrect series - not populating..continuing post-processing')
continue
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
self.matched = True
continue #break
mlp = []
xmld = filechecker.FileChecker()
#mod_seriesname = as_dinfo['mod_seriesname']
for x in manual_list:
xmld1 = xmld.dynamic_replace(helpers.conversion(x['ComicName']))
xmld = filechecker.FileChecker()
xmld1 = xmld.dynamic_replace(helpers.conversion(cs['ComicName']))
xseries = xmld1['mod_seriesname'].lower()
xmld2 = xmld.dynamic_replace(helpers.conversion(x['Series']))
xmld2 = xmld.dynamic_replace(helpers.conversion(watchmatch['series_name']))
xfile = xmld2['mod_seriesname'].lower()
if re.sub('\|', '', xseries).strip() == re.sub('\|', '', xfile).strip():
#logger.fdebug(module + '[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (x['ComicName'], x['ComicID']))
mlp.append(x)
if re.sub('\|', '', xseries) == re.sub('\|', '', xfile):
logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, watchmatch['series_name'], cs['ComicID']))
self.matched = True
else:
pass
if len(manual_list) == 1 and len(mlp) == 1:
manual_list = mlp
#logger.fdebug(module + '[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series')
continue #break
if datematch == 'True':
logger.fdebug(module + '[SUCCESSFUL MATCH: ' + cs['ComicName'] + '-' + cs['ComicID'] + '] Match verified for ' + helpers.conversion(fl['comicfilename']))
break
#mlp = []
#xmld = filechecker.FileChecker()
#if len(manual_list) > 1:
# #in case the manual pp matches on more than one series in the watchlist, drop back down to exact name matching to see if we can narrow
# #the matches down further to the point where there's only one exact match. Not being able to match specifically when there is more than
# #one item in the manual list that's matched to the same file will result in a dupe_src error and/or mistakingly PP'ing against the
# #wrong series.
# for x in manual_list:
# xmld1 = xmld.dynamic_replace(helpers.conversion(x['ComicName']))
# xseries = xmld1['mod_seriesname'].lower()
# xmld2 = xmld.dynamic_replace(helpers.conversion(x['Series']))
# xfile = xmld2['mod_seriesname'].lower()
# #logger.info('[xseries:%s][xfile:%s]' % (xseries,xfile))
# if re.sub('\|', '', xseries).strip() == re.sub('\|', '', xfile).strip():
# logger.fdebug('%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]' % (module, x['ComicName'], x['ComicID']))
# mlp.append(x)
# else:
# pass
# if len(mlp) == 1:
# manual_list = mlp
# logger.fdebug('%s[CONFIRMED-FORCE-OVERRIDE] Over-ride of matching taken due to exact name matching of series' % module)
# else:
# logger.warn('%s[CONFIRMATION-PROBLEM] Unable to determine proper match for series as more than one successful match came up.' % module)
#we should setup for manual post-processing of story-arc issues here
#we can also search by ComicID to just grab those particular arcs as an alternative as well (not done)
@ -918,7 +944,7 @@ class PostProcessor(object):
logger.fdebug(module + '[ONEOFF-SELECTION][self.nzb_name: %s]' % self.nzb_name)
oneoffvals = []
for ofl in oneofflist:
logger.info('[ONEOFF-SELECTION] ofl: %s' % ofl)
#logger.info('[ONEOFF-SELECTION] ofl: %s' % ofl)
oneoffvals.append({"ComicName": ofl['ComicName'],
"ComicPublisher": ofl['PUBLISHER'],
"Issue_Number": ofl['Issue_Number'],
@ -936,7 +962,7 @@ class PostProcessor(object):
#this seems redundant to scan in all over again...
#for fl in filelist['comiclist']:
for ofv in oneoffvals:
logger.info('[ONEOFF-SELECTION] ofv: %s' % ofv)
#logger.info('[ONEOFF-SELECTION] ofv: %s' % ofv)
wm = filechecker.FileChecker(watchcomic=ofv['ComicName'], Publisher=ofv['ComicPublisher'], AlternateSearch=None, manual=ofv['WatchValues'])
#if fl['sub'] is not None:
# pathtofile = os.path.join(fl['comiclocation'], fl['sub'], fl['comicfilename'])
@ -1388,12 +1414,14 @@ class PostProcessor(object):
# this has no issueID, therefore it's a one-off or a manual post-proc.
# At this point, let's just drop it into the Comic Location folder and forget about it..
if sandwich is not None and 'S' in sandwich:
self._log("One-off STORYARC mode enabled for Post-Processing for " + sarc)
logger.info(module + ' One-off STORYARC mode enabled for Post-Processing for ' + sarc)
self._log("One-off STORYARC mode enabled for Post-Processing for %s" % sarc)
logger.info('%s One-off STORYARC mode enabled for Post-Processing for %s' % (module, sarc))
else:
self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.")
logger.info(module + ' One-off mode enabled for Post-Processing. Will move into Grab-bag directory.')
self._log("Grab-Bag Directory set to : " + mylar.CONFIG.GRABBAG_DIR)
if mylar.CONFIG.GRABBAG_DIR is None:
mylar.CONFIG.GRABBAG_DIR = os.path.join(mylar.CONFIG.DESTINATION_DIR, 'Grabbag')
logger.info('%s One-off mode enabled for Post-Processing. Will move into Grab-bag directory: %s' % (module, mylar.CONFIG.GRABBAG_DIR))
self._log("Grab-Bag Directory set to : %s" % mylar.CONFIG.GRABBAG_DIR)
grdst = mylar.CONFIG.GRABBAG_DIR
odir = location
@ -2250,13 +2278,14 @@ class PostProcessor(object):
updatetable = 'issues'
else:
updater.foundsearch(comicid, issueid, mode='want_ann', down=downtype, module=module, crc=crcvalue)
if 'annual' in issuenzb['ComicName'].lower(): #series.lower():
if 'annual' in issuenzb['ReleaseComicName'].lower(): #series.lower():
dispiss = 'Annual #%s' % issuenumOG
elif 'special' in issuenzb['ComicName'].lower():
elif 'special' in issuenzb['ReleaseComicName'].lower():
dispiss = 'Special #%s' % issuenumOG
else:
dispiss = '#%s' % issuenumOG
updatetable = 'annuals'
logger.fdebug('[annchk:%s] issue to update: %s' % (annchk, dispiss))
#new method for updating status after pp
if os.path.isfile(dst):
@ -2358,22 +2387,22 @@ class PostProcessor(object):
seriesmetadata['seriesmeta'] = seriesmeta
self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata)
if ml is not None:
#we only need to return self.log if it's a manual run and it's not a snatched torrent
#manual run + not snatched torrent (or normal manual-run)
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log": self.log,
"mode": 'stop',
"issueid": issueid,
"comicid": comicid})
if self.apicall is True:
self.sendnotify(series, issueyear, dispiss, annchk, module)
return self.queue.put(self.valreturn)
#if ml is not None:
# #we only need to return self.log if it's a manual run and it's not a snatched torrent
# #manual run + not snatched torrent (or normal manual-run)
# logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
# self._log(u"Post Processing SUCCESSFUL! ")
# self.valreturn.append({"self.log": self.log,
# "mode": 'stop',
# "issueid": issueid,
# "comicid": comicid})
# #if self.apicall is True:
# self.sendnotify(series, issueyear, dispiss, annchk, module)
# return self.queue.put(self.valreturn)
self.sendnotify(series, issueyear, dispiss, annchk, module)
logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
logger.info('%s Post-Processing completed for: %s %s' % (module, series, dispiss))
self._log(u"Post Processing SUCCESSFUL! ")
self.valreturn.append({"self.log": self.log,

View File

@ -22,6 +22,7 @@ import os, sys, subprocess
import threading
import datetime
from datetime import timedelta
import webbrowser
import sqlite3
import itertools
@ -104,6 +105,7 @@ CV_HEADERS = None
CVURL = None
DEMURL = None
WWTURL = None
WWT_CF_COOKIEVALUE = None
KEYS_32P = None
AUTHKEY_32P = None
FEED_32P = None
@ -125,6 +127,7 @@ SNATCHED_QUEUE = Queue.Queue()
NZB_QUEUE = Queue.Queue()
PP_QUEUE = Queue.Queue()
SEARCH_QUEUE = Queue.Queue()
SEARCH_TIER_DATE = None
COMICSORT = None
PULLBYFILE = None
CFG = None
@ -160,11 +163,11 @@ def initialize(config_file):
global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \
DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, \
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \
USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \
PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \
MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, LOG_LEVEL, \
SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO
SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO, SEARCH_TIER_DATE
cc = mylar.config.Config(config_file)
CONFIG = cc.read(startup=True)
@ -229,6 +232,13 @@ def initialize(config_file):
CURRENT_WEEKNUMBER = todaydate.strftime("%U")
CURRENT_YEAR = todaydate.strftime("%Y")
if SEARCH_TIER_DATE is None:
#tier the wanted listed so anything older than 14 days won't trigger the API during searches.
#utc_date = datetime.datetime.utcnow()
STD = todaydate - timedelta(days = 14)
SEARCH_TIER_DATE = STD.strftime('%Y-%m-%d')
logger.fdebug('SEARCH_TIER_DATE set to : %s' % SEARCH_TIER_DATE)
#set the default URL for ComicVine API here.
CVURL = 'https://comicvine.gamespot.com/api/'
@ -472,7 +482,7 @@ def dbcheck():
c.execute('SELECT ReleaseDate from storyarcs')
except sqlite3.OperationalError:
try:
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT)')
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
c.execute('DROP TABLE readinglist')
except sqlite3.OperationalError:
@ -486,7 +496,7 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE TEXT, PUBLISHER TEXT, ISSUE TEXT, COMIC VARCHAR(150), EXTRA TEXT, STATUS TEXT, ComicID TEXT, IssueID TEXT, CV_Last_Update TEXT, DynamicName TEXT, weeknumber TEXT, year TEXT, volume TEXT, seriesyear TEXT, annuallink TEXT, rowid INTEGER PRIMARY KEY)')
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT, Volume TEXT, IssueNumber TEXT, DynamicName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT, StatusChange TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT, DateAdded TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS rssdb (Title TEXT UNIQUE, Link TEXT, Pubdate TEXT, Site TEXT, Size TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS futureupcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Publisher TEXT, Status TEXT, DisplayComicName TEXT, weeknumber TEXT, year TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS failed (ID TEXT, Status TEXT, ComicID TEXT, IssueID TEXT, Provider TEXT, ComicName TEXT, Issue_Number TEXT, NZBName TEXT, DateFailed TEXT)')
@ -651,7 +661,6 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE issues ADD COLUMN ImageURL_ALT TEXT')
## -- ImportResults Table --
try:
@ -829,6 +838,7 @@ def dbcheck():
c.execute('SELECT OneOff from nzblog')
except sqlite3.OperationalError:
c.execute('ALTER TABLE nzblog ADD COLUMN OneOff TEXT')
## -- Annuals Table --
try:
@ -877,6 +887,10 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE annuals ADD COLUMN IssueDate_Edit TEXT')
try:
c.execute('SELECT DateAdded from annuals')
except sqlite3.OperationalError:
c.execute('ALTER TABLE annuals ADD COLUMN DateAdded TEXT')
## -- Snatched Table --
@ -965,6 +979,11 @@ def dbcheck():
except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN Manual TEXT')
try:
c.execute('SELECT DateAdded from storyarcs')
except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN DateAdded TEXT')
## -- searchresults Table --
try:
c.execute('SELECT SRID from searchresults')

View File

@ -104,7 +104,7 @@ class info32p(object):
all_script2 = soup.find_all("link", {"rel": "alternate"})
authfound = False
logger.info('%s Atttempting to integrate with all of your 32P Notification feeds.' % self.module)
logger.info('%s Attempting to integrate with all of your 32P Notification feeds.' % self.module)
#get inkdrop count ...
#user_info = soup.find_all(attrs={"class": "stat"})

View File

@ -101,6 +101,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'HOST_RETURN' : (str, 'Interface', None),
'AUTHENTICATION' : (int, 'Interface', 0),
'LOGIN_TIMEOUT': (int, 'Interface', 43800),
'ALPHAINDEX': (bool, 'Interface', True),
'API_ENABLED' : (bool, 'API', False),
'API_KEY' : (str, 'API', None),
@ -274,6 +275,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'MINSEEDS': (int, 'Torrents', 0),
'ALLOW_PACKS': (bool, 'Torrents', False),
'ENABLE_PUBLIC': (bool, 'Torrents', False),
'PUBLIC_VERIFY': (bool, 'Torrents', True),
'AUTO_SNATCH': (bool, 'AutoSnatch', False),
'AUTO_SNATCH_SCRIPT': (str, 'AutoSnatch', None),
@ -761,6 +763,10 @@ class Config(object):
except OSError:
logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + mylar.DATA_DIR)
if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]):
self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag')
logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR)
## Sanity checking
if any([self.COMICVINE_API is None, self.COMICVINE_API == 'None', self.COMICVINE_API == '']):
logger.error('No User Comicvine API key specified. I will not work very well due to api limits - http://api.comicvine.com/ and get your own free key.')

View File

@ -789,7 +789,7 @@ def fullmonth(monthno):
monthconv = None
for numbs in basmonths:
if numbs in str(int(monthno)):
if int(numbs) == int(monthno):
monthconv = basmonths[numbs]
return monthconv
@ -3539,7 +3539,7 @@ def getImage(comicid, url, issueid=None):
statinfo = os.stat(coverfile)
coversize = statinfo.st_size
if int(coversize) < 30000 or str(r.status_code) != '200':
if int(coversize) < 10000 or str(r.status_code) != '200':
if str(r.status_code) != '200':
logger.info('Trying to grab an alternate cover due to problems trying to retrieve the main cover image.')
else:
@ -3775,6 +3775,20 @@ def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
mylar.updater.foundsearch(x['comicid'], x['issueid'], mode=mode, provider=prov, hash=hash)
def DateAddedFix():
import db
myDB = db.DBConnection()
DA_A = datetime.datetime.today()
DateAdded = DA_A.strftime('%Y-%m-%d')
issues = myDB.select("SELECT IssueID FROM issues WHERE Status='Wanted' and DateAdded is NULL")
for da in issues:
myDB.upsert("issues", {'DateAdded': DateAdded}, {'IssueID': da[0]})
annuals = myDB.select("SELECT IssueID FROM annuals WHERE Status='Wanted' and DateAdded is NULL")
for an in annuals:
myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]})
def file_ops(path,dst,arc=False,one_off=False):
# # path = source path + filename
# # dst = destination path + filename

View File

@ -74,6 +74,7 @@ if not LOG_LANG.startswith('en'):
logging.getLogger('apscheduler.threadpool').setLevel(logging.WARN)
logging.getLogger('apscheduler.scheduler').propagate = False
logging.getLogger('apscheduler.threadpool').propagate = False
logging.getLogger('cherrypy').propagate = False
lg = logging.getLogger('mylar')
lg.setLevel(logging.DEBUG)
@ -238,7 +239,7 @@ else:
logging.getLogger('apscheduler.threadpool').setLevel(logging.WARN)
logging.getLogger('apscheduler.scheduler').propagate = False
logging.getLogger('apscheduler.threadpool').propagate = False
logging.getLogger('cherrypy').propagate = False
# Close and remove old handlers. This is required to reinit the loggers
# at runtime

View File

@ -54,7 +54,7 @@ def pullsearch(comicapi, comicquery, offset, type):
filterline+= ',name:%s' % x
cnt+=1
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + filterline + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description,first_issue,last_issue&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + filterline + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description,first_issue,last_issue&format=xml&sort=date_last_updated:desc&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page
#all these imports are standard on most modern python implementations
#logger.info('MB.PULLURL:' + PULLURL)

View File

@ -107,6 +107,7 @@ class NZBGet(object):
stat = False
double_pp = False
double_type = None
while stat is False:
time.sleep(10)
queueinfo = self.server.listgroups()
@ -117,22 +118,37 @@ class NZBGet(object):
else:
if 'comicrn' in queuedl[0]['PostInfoText'].lower():
double_pp = True
double_type = 'ComicRN'
elif 'nzbtomylar' in queuedl[0]['PostInfoText'].lower():
double_pp = True
double_type = 'nzbToMylar'
if all([len(queuedl[0]['ScriptStatuses']) > 0, double_pp is False]):
for x in queuedl[0]['ScriptStatuses']:
if 'comicrn' in x['Name'].lower():
double_pp = True
double_type = 'ComicRN'
break
elif 'nzbtomylar' in x['Name'].lower():
double_pp = True
double_type = 'nzbToMylar'
break
if all([len(queuedl[0]['Parameters']) > 0, double_pp is False]):
for x in queuedl[0]['Parameters']:
if all(['comicrn' in x['Name'].lower(), x['Value'] == 'yes']):
double_pp = True
double_type = 'ComicRN'
break
elif all(['nzbtomylar' in x['Name'].lower(), x['Value'] == 'yes']):
double_pp = True
double_type = 'nzbToMylar'
break
if double_pp is True:
logger.warn('ComicRN has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.')
logger.warn('Either disable Completed Download Handling for NZBGet within Mylar, or remove ComicRN from your category script in NZBGet.')
logger.warn('%s has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.' % double_type)
logger.warn('Either disable Completed Download Handling for NZBGet within Mylar, or remove %s from your category script in NZBGet.' % double_type)
return {'status': 'double-pp', 'failed': False}
logger.fdebug('status: %s' % queuedl[0]['Status'])
@ -152,7 +168,7 @@ class NZBGet(object):
found = False
destdir = None
double_pp = False
hq = [hs for hs in history if hs['NZBID'] == nzbid and ('SUCCESS' in hs['Status'] or 'COPY' in hs['Status'])]
hq = [hs for hs in history if hs['NZBID'] == nzbid and ('SUCCESS' in hs['Status'] or ('COPY' in hs['Status'] and 'DELETED' not in hq[0]['Status']))]
if len(hq) > 0:
logger.fdebug('found matching completed item in history. Job has a status of %s' % hq[0]['Status'])
if len(hq[0]['ScriptStatuses']) > 0:
@ -172,7 +188,7 @@ class NZBGet(object):
logger.warn('Either disable Completed Download Handling for NZBGet within Mylar, or remove ComicRN from your category script in NZBGet.')
return {'status': 'double-pp', 'failed': False}
if all(['SUCCESS' in hq[0]['Status'], hq[0]['DownloadedSizeMB'] == hq[0]['FileSizeMB']]):
if all(['SUCCESS' in hq[0]['Status'], (hq[0]['FileSizeMB']*.95) <= hq[0]['DownloadedSizeMB'] <= (hq[0]['FileSizeMB']*1.05)]):
logger.fdebug('%s has final file size of %sMB' % (hq[0]['Name'], hq[0]['DownloadedSizeMB']))
if os.path.isdir(hq[0]['DestDir']):
destdir = hq[0]['DestDir']

View File

@ -380,7 +380,7 @@ class OPDS(object):
updated = issue['ReleaseDate']
image = None
thumbnail = None
if 'DateAdded' in issue:
if not 'ReleaseComicID' in issue:
title = escape('%s - %s' % (issue['Issue_Number'], issue['IssueName']))
image = issue['ImageURL_ALT']
thumbnail = issue['ImageURL']
@ -451,7 +451,7 @@ class OPDS(object):
image = None
thumbnail = None
if issuebook:
if 'DateAdded' in issuebook.keys():
if not 'ReleaseComicID' in issuebook.keys():
if issuebook['DateAdded'] is None:
title = escape('%03d: %s #%s - %s (In stores %s)' % (index + number, issuebook['ComicName'], issuebook['Issue_Number'], issuebook['IssueName'], issuebook['ReleaseDate']))
image = issuebook['ImageURL_ALT']

View File

@ -51,6 +51,7 @@ class Readinglist(object):
logger.error(self.module + ' Cannot locate IssueID - aborting..')
return
else:
logger.fdebug('%s Successfully found annual for %s' % (self.module, readlist['ComicID']))
annualize = True
comicinfo = myDB.selectone("SELECT * from comics where ComicID=?", [readlist['ComicID']]).fetchone()
logger.info(self.module + ' Attempting to add issueid ' + readlist['IssueID'])
@ -71,19 +72,19 @@ class Readinglist(object):
if not locpath is None:
comicissue = readlist['Issue_Number']
comicname = comicinfo['ComicName']
if annualize is True:
comicname = readlist['ReleaseComicName']
else:
comicname = comicinfo['ComicName']
dspinfo = comicname + ' #' + comicissue
if annualize:
if mylar.CONFIG.ANNUALS_ON:
if 'annual' in readlist['ComicName'].lower():
if annualize is True:
if mylar.CONFIG.ANNUALS_ON is True:
dspinfo = comicname + ' #' + readlist['Issue_Number']
if 'annual' in comicname.lower():
comicissue = 'Annual ' + readlist['Issue_Number']
dspinfo = comicname + ' Annual #' + readlist['Issue_Number']
elif 'special' in readlist['ComicName'].lower():
elif 'special' in comicname.lower():
comicissue = 'Special ' + readlist['Issue_Number']
dspinfo = comicname + ' Special #' + readlist['Issue_Number']
else:
comicname = comicinfo['ComicName'] + ' Annual'
dspinfo = comicname + ' #' + comicissue
ctrlval = {"IssueID": self.IssueID}
newval = {"DateAdded": helpers.today(),
"Status": "Added",

View File

@ -114,6 +114,7 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
elif pickfeed == "999": #WWT rss feed
feed = mylar.WWTURL + 'rss.php?cat=132,50'
feedtype = ' from the New Releases RSS Feed from WorldWideTorrents'
verify = bool(mylar.CONFIG.PUBLIC_VERIFY)
elif int(pickfeed) >= 7 and feedinfo is not None:
#personal 32P notification feeds.
#get the info here
@ -135,24 +136,35 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
elif pickfeed == '1' or pickfeed == '4' or int(pickfeed) > 7:
picksite = '32P'
if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']):
if all([pickfeed != '4', pickfeed != '3', pickfeed != '5']):
payload = None
ddos_protection = round(random.uniform(0,15),2)
time.sleep(ddos_protection)
logger.info('Now retrieving feed from %s' % picksite)
try:
headers = {'Accept-encoding': 'gzip',
'User-Agent': mylar.CV_HEADERS['User-Agent']}
cf_cookievalue = None
scraper = cfscrape.create_scraper()
if pickfeed == '2':
cf_cookievalue, cf_user_agent = scraper.get_tokens(feed)
headers = {'Accept-encoding': 'gzip',
'User-Agent': cf_user_agent}
if pickfeed == '999':
if all([pickfeed == '999', mylar.WWT_CF_COOKIEVALUE is None]):
try:
cf_cookievalue, cf_user_agent = scraper.get_tokens(feed, user_agent=mylar.CV_HEADERS['User-Agent'])
except Exception as e:
logger.warn('[WWT-RSSFEED] Unable to retrieve RSS properly: %s' % e)
lp+=1
continue
else:
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
cookievalue = cf_cookievalue
elif pickfeed == '999':
cookievalue = mylar.WWT_CF_COOKIEVALUE
if cf_cookievalue:
r = scraper.get(feed, verify=verify, cookies=cf_cookievalue, headers=headers)
r = scraper.get(feed, verify=verify, cookies=cookievalue, headers=headers)
else:
r = scraper.get(feed, verify=verify)
r = scraper.get(feed, verify=verify, headers=headers)
except Exception, e:
logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
lp+=1
@ -188,12 +200,12 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
#DEMONOID SEARCH RESULT (parse)
pass
elif pickfeed == "999":
try:
feedme = feedparser.parse(feed)
except Exception, e:
logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
lp+=1
continue
#try:
# feedme = feedparser.parse(feed)
#except Exception, e:
# logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
# lp+=1
# continue
#WWT / FEED
for entry in feedme.entries:
@ -233,9 +245,11 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
tmpsz_end = tmp1 + 2
tmpsz_st += 7
else:
tmpsz = tmpsz[:80] #limit it to the first 80 so it doesn't pick up alt covers mistakingly
tmpsz_st = tmpsz.rfind('|')
if tmpsz_st != -1:
tmpsize = tmpsz[tmpsz_st:tmpsz_st+14]
tmpsz_end = tmpsz.find('<br />', tmpsz_st)
tmpsize = tmpsz[tmpsz_st:tmpsz_end] #st+14]
if any(['GB' in tmpsize, 'MB' in tmpsize, 'KB' in tmpsize, 'TB' in tmpsize]):
tmp1 = tmpsz.find('MB', tmpsz_st)
if tmp1 == -1:
@ -260,7 +274,6 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
elif 'TB' in tmpsz[tmpsz_st:tmpsz_end]:
szform = 'TB'
sz = 'T'
tsize = helpers.human2bytes(str(tmpsz[tmpsz_st:tmpsz.find(szform, tmpsz_st) -1]) + str(sz))
#timestamp is in YYYY-MM-DDTHH:MM:SS+TZ :/
@ -278,9 +291,10 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
feeddata.append({
'site': picksite,
'title': feedme.entries[i].title,
'link': str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[2]),
'link': str(re.sub('genid=', '', urlparse.urlparse(feedme.entries[i].link)[4]).strip()),
#'link': str(urlparse.urlparse(feedme.entries[i].link)[2].rpartition('/')[0].rsplit('/',2)[2]),
'pubdate': pdate,
'size': tsize,
'size': tsize
})
#32p / FEEDS
@ -942,7 +956,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
wwt_referrer = 'http' + mylar.WWTURL[5:]
headers = {'Accept-encoding': 'gzip',
'User-Agent': str(mylar.USER_AGENT),
'User-Agent': mylar.CV_HEADERS['User-Agent'],
'Referer': wwt_referrer}
logger.fdebug('Grabbing torrent [id:' + str(linkit) + '] from url:' + str(url))
@ -978,8 +992,11 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
return "fail"
try:
scraper = cfscrape.create_scraper()
if cf_cookievalue:
r = scraper.get(url, params=payload, cookies=cf_cookievalue, verify=verify, stream=True, headers=headers)
if site == 'WWT':
if mylar.WWT_CF_COOKIEVALUE is None:
cf_cookievalue, cf_user_agent = s.get_tokens(newurl, user_agent=mylar.CV_HEADERS['User-Agent'])
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
r = scraper.get(url, params=payload, cookies=mylar.WWT_CF_COOKIEVALUE, verify=verify, stream=True, headers=headers)
else:
r = scraper.get(url, params=payload, verify=verify, stream=True, headers=headers)
#r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)

View File

@ -56,7 +56,7 @@ class tehMain():
logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on Demonoid / WorldWideTorrents.')
#rsscheck.torrents(pickfeed='3') #TP.SE RSS Check (has to be page-parsed)
rsscheck.torrents(pickfeed='Public') #TPSE = DEM RSS Check + WWT RSS Check
if mylar.CONFIG.ENABLE_32P:
if mylar.CONFIG.ENABLE_32P is True:
logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on 32P.')
if mylar.CONFIG.MODE_32P == 0:
logger.fdebug('[RSS-FEEDS] 32P mode set to Legacy mode. Monitoring New Releases feed only.')
@ -75,7 +75,7 @@ class tehMain():
if feedinfo != "disable":
pass
else:
mylar.CONFIG.ENABLE_32P = 0
mylar.CONFIG.ENABLE_32P = False
#mylar.config_write()
else:
feedinfo = mylar.FEEDINFO_32P

View File

@ -76,10 +76,10 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
logger.info("Annual/Special issue search detected. Appending to issue #")
#anything for mode other than None indicates an annual.
if all(['annual' not in ComicName.lower(), 'special' not in ComicName.lower()]):
ComicName = ComicName + " annual"
ComicName = ComicName + " Annual"
if all([AlternateSearch is not None, AlternateSearch != "None", 'special' not in ComicName.lower()]):
AlternateSearch = AlternateSearch + " annual"
AlternateSearch = AlternateSearch + " Annual"
if mode == 'pullwant' or IssueID is None:
#one-off the download.
@ -367,7 +367,11 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
prov_count+=1
if findit['status'] is True:
srchloop = 4
if searchprov == 'newznab':
searchprov = newznab_host[0].rstrip() + ' (newznab)'
elif searchprov == 'torznab':
searchprov = torznab_host[0].rstrip() + ' (torznab)'
srchloop = 4
break
elif srchloop == 2 and (cmloopit -1 >= 1):
time.sleep(30) #pause for 30s to not hammmer api's
@ -1064,7 +1068,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
fndcomicversion = None
for ct in ctchk:
if any([ct.lower().startswith('v') and ct[1:].isdigit(), ct.lower()[:3] == 'vol', volfound == True]):
if any([ct.lower().startswith('v') and ct[1:].isdigit(), ct.lower()[:3] == 'vol' and (len(ct) == 3 or ct[3:].isdigit()), volfound == True]):
if volfound == True:
logger.fdebug('Split Volume label detected [' + ct + '] - ie. Vol 4. Attempting to adust.')
if ct.isdigit():
@ -1079,6 +1083,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('Newly finished reformed cleantitle (with NO volume label): ' + cleantitle)
volfound == False
tmpsplit = ct
#re.sub(r'\W+','', tmpsplit[tmpsplit.find('vol')+3]) == '']
if tmpsplit.lower().startswith('vol'):
logger.fdebug('volume detected - stripping and re-analzying for volume label.')
origvol = tmpsplit
@ -1912,7 +1917,8 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
'SARC': None,
'StoryArcID': None,
'IssueArcID': None,
'mode': 'want'
'mode': 'want',
'DateAdded': iss['DateAdded']
})
elif stloop == 2:
if mylar.CONFIG.SEARCH_STORYARCS is True or rsscheck:
@ -1930,7 +1936,8 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
'SARC': iss['StoryArc'],
'StoryArcID': iss['StoryArcID'],
'IssueArcID': iss['IssueArcID'],
'mode': 'story_arc'
'mode': 'story_arc',
'DateAdded': iss['DateAdded']
})
cnt+=1
logger.info('Storyarcs to be searched for : %s' % cnt)
@ -1948,7 +1955,8 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
'SARC': None,
'StoryArcID': None,
'IssueArcID': None,
'mode': 'want_ann'
'mode': 'want_ann',
'DateAdded': iss['DateAdded']
})
stloop-=1
@ -2016,7 +2024,22 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
else:
ComicYear = str(result['IssueDate'])[:4]
if rsscheck is None:
if result['DateAdded'] is None:
DA = datetime.datetime.today()
DateAdded = DA.strftime('%Y-%m-%d')
if result['mode'] == 'want':
table = 'issues'
elif result['mode'] == 'want_ann':
table = 'annuals'
elif result['mode'] == 'story_arc':
table = 'storyarcs'
logger.fdebug('%s #%s did not have a DateAdded recorded, setting it : %s' % (comic['ComicName'], result['Issue_Number'], DateAdded))
myDB.upsert(table, {'DateAdded': DateAdded}, {'IssueID': result['IssueID']})
else:
DateAdded = result['DateAdded']
if rsscheck is None and DateAdded >= mylar.SEARCH_TIER_DATE:
logger.info('adding: ComicID:%s IssueiD: %s' % (result['ComicID'], result['IssueID']))
mylar.SEARCH_QUEUE.put({'comicname': comic['ComicName'], 'seriesyear': SeriesYear, 'issuenumber': result['Issue_Number'], 'issueid': result['IssueID'], 'comicid': result['ComicID']})
continue

View File

@ -70,7 +70,7 @@ class WebInterface(object):
def home(self):
comics = helpers.havetotals()
return serve_template(templatename="index.html", title="Home", comics=comics)
return serve_template(templatename="index.html", title="Home", comics=comics, alphaindex=mylar.CONFIG.ALPHAINDEX)
home.exposed = True
def comicDetails(self, ComicID):
@ -2022,6 +2022,10 @@ class WebInterface(object):
futureupcoming = sorted(futureupcoming, key=itemgetter('IssueDate', 'ComicName', 'IssueNumber'), reverse=True)
#fix None DateAdded points here
helpers.DateAddedFix()
issues = myDB.select("SELECT * from issues WHERE Status='Wanted'")
if mylar.CONFIG.UPCOMING_STORYARCS is True:
arcs = myDB.select("SELECT * from storyarcs WHERE Status='Wanted'")
@ -2042,6 +2046,7 @@ class WebInterface(object):
isCounts[1] = 0 #1 wanted
isCounts[2] = 0 #2 snatched
isCounts[3] = 0 #3 failed
isCounts[4] = 0 #3 wantedTier
ann_list = []
@ -2061,7 +2066,8 @@ class WebInterface(object):
issues += annuals_list
issues_tmp = sorted(issues, key=itemgetter('ReleaseDate'), reverse=True)
issues = sorted(issues_tmp, key=itemgetter('Status'), reverse=True)
issues_tmp1 = sorted(issues_tmp, key=itemgetter('DateAdded'), reverse=True)
issues = sorted(issues_tmp1, key=itemgetter('Status'), reverse=True)
for curResult in issues:
baseissues = {'wanted': 1, 'snatched': 2, 'failed': 3}
@ -2070,17 +2076,21 @@ class WebInterface(object):
continue
else:
if seas in curResult['Status'].lower():
sconv = baseissues[seas]
isCounts[sconv]+=1
if all([curResult['DateAdded'] <= mylar.SEARCH_TIER_DATE, curResult['Status'] == 'Wanted']):
isCounts[4]+=1
else:
sconv = baseissues[seas]
isCounts[sconv]+=1
continue
isCounts = {"Wanted": str(isCounts[1]),
"Snatched": str(isCounts[2]),
"Failed": str(isCounts[3]),
"StoryArcs": str(len(arcs))}
"StoryArcs": str(len(arcs)),
"WantedTier": str(isCounts[4])}
iss_cnt = int(isCounts['Wanted'])
wantedcount = iss_cnt # + ann_cnt
wantedcount = iss_cnt + int(isCounts['WantedTier']) # + ann_cnt
#let's straightload the series that have no issue data associated as of yet (ie. new series) from the futurepulllist
future_nodata_upcoming = myDB.select("SELECT * FROM futureupcoming WHERE IssueNumber='1' OR IssueNumber='0'")
@ -4451,10 +4461,52 @@ class WebInterface(object):
COUNT_HAVES = CHAVES[0][0]
COUNT_ISSUES = CISSUES[0][0]
COUNT_SIZE = helpers.human_size(CSIZE[0][0])
CCONTCOUNT = 0
cti = helpers.havetotals()
for cchk in cti:
if cchk['recentstatus'] is 'Continuing':
CCONTCOUNT += 1
comicinfo = {"COUNT_COMICS": COUNT_COMICS,
"COUNT_HAVES": COUNT_HAVES,
"COUNT_ISSUES": COUNT_ISSUES,
"COUNT_SIZE": COUNT_SIZE}
"COUNT_SIZE": COUNT_SIZE,
"CCONTCOUNT": CCONTCOUNT}
DLPROVSTATS = myDB.select("SELECT Provider, COUNT(Provider) AS Frequency FROM Snatched WHERE Status = 'Snatched' AND Provider is NOT NULL GROUP BY Provider ORDER BY Frequency DESC")
freq = dict()
freq_tot = 0
for row in DLPROVSTATS:
if any(['CBT' in row['Provider'], '32P' in row['Provider'], 'ComicBT' in row['Provider']]):
try:
tmpval = freq['32P']
freq.update({'32P': tmpval + row['Frequency']})
except:
freq.update({'32P': row['Frequency']})
elif 'KAT' in row['Provider']:
try:
tmpval = freq['KAT']
freq.update({'KAT': tmpval + row['Frequency']})
except:
freq.update({'KAT': row['Frequency']})
elif 'experimental' in row['Provider']:
try:
tmpval = freq['Experimental']
freq.update({'Experimental': tmpval + row['Frequency']})
except:
freq.update({'Experimental': row['Frequency']})
elif [True for x in freq if re.sub("\(newznab\)", "", str(row['Provider'])).strip() in x]:
try:
tmpval = freq[re.sub("\(newznab\)", "", row['Provider']).strip()]
freq.update({re.sub("\(newznab\)", "", row['Provider']).strip(): tmpval + row['Frequency']})
except:
freq.update({re.sub("\(newznab\)", "", row['Provider']).strip(): row['Frequency']})
else:
freq.update({re.sub("\(newznab\)", "", row['Provider']).strip(): row['Frequency']})
freq_tot += row['Frequency']
dlprovstats = sorted(freq.iteritems(), key=itemgetter(1), reverse=True)
if mylar.SCHED_RSS_LAST is None:
rss_sclast = 'Unknown'
@ -4670,7 +4722,10 @@ class WebInterface(object):
"opds_authentication": helpers.checked(mylar.CONFIG.OPDS_AUTHENTICATION),
"opds_username": mylar.CONFIG.OPDS_USERNAME,
"opds_password": mylar.CONFIG.OPDS_PASSWORD,
"opds_metainfo": helpers.checked(mylar.CONFIG.OPDS_METAINFO)
"opds_metainfo": helpers.checked(mylar.CONFIG.OPDS_METAINFO),
"dlstats": dlprovstats,
"dltotals": freq_tot,
"alphaindex": mylar.CONFIG.ALPHAINDEX
}
return serve_template(templatename="config.html", title="Settings", config=config, comicinfo=comicinfo)
config.exposed = True
@ -4880,7 +4935,7 @@ class WebInterface(object):
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
'prowl_enabled', 'prowl_onsnatch', 'nma_enabled', 'nma_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
'opds_enable', 'opds_authentication', 'opds_metainfo']
'opds_enable', 'opds_authentication', 'opds_metainfo', 'alphaindex']
for checked_config in checked_configs:
if checked_config not in kwargs:

View File

@ -962,10 +962,10 @@ def new_pullcheck(weeknumber, pullyear, comic1off_name=None, comic1off_id=None,
annualidmatch = [x for x in weeklylist if week['annuallink'] is not None and (int(x['ComicID']) == int(week['annuallink']))]
#The above will auto-match against ComicID if it's populated on the pullsite, otherwise do name-matching.
namematch = [ab for ab in weeklylist if ab['DynamicName'] == week['dynamicname']]
logger.fdebug('rowid: ' + str(week['rowid']))
logger.fdebug('idmatch: ' + str(idmatch))
logger.fdebug('annualidmatch: ' + str(annualidmatch))
logger.fdebug('namematch: ' + str(namematch))
#logger.fdebug('rowid: ' + str(week['rowid']))
#logger.fdebug('idmatch: ' + str(idmatch))
#logger.fdebug('annualidmatch: ' + str(annualidmatch))
#logger.fdebug('namematch: ' + str(namematch))
if any([idmatch,namematch,annualidmatch]):
if idmatch and not annualidmatch:
comicname = idmatch[0]['ComicName'].strip()

View File

@ -22,7 +22,7 @@ import time
import sys
import datetime
from datetime import timedelta
import lib.cfscrape as cfscrape
import mylar
from mylar import logger, helpers
@ -43,9 +43,13 @@ class wwt(object):
'incldead': 0,
'lang': 0}
with requests.Session() as s:
with cfscrape.create_scraper() as s:
newurl = self.url + 'torrents-search.php'
r = s.get(newurl, params=params, verify=True)
if mylar.WWT_CF_COOKIEVALUE is None:
cf_cookievalue, cf_user_agent = s.get_tokens(newurl, user_agent=mylar.CV_HEADERS['User-Agent'])
mylar.WWT_CF_COOKIEVALUE = cf_cookievalue
r = s.get(newurl, params=params, verify=True, cookies=mylar.WWT_CF_COOKIEVALUE, headers=mylar.CV_HEADERS)
if not r.status_code == 200:
return