FIX: Added user-agent to all CV queries, and switched all queries to utilize the requests module, FIX: Added User-Agent to ComicTagger and switched to requests module for all CV queries, FIX: Removed libraryscan option from GUI as no longer used, FIX: Ability to sort 'Have' column on main index page properly now (by % complete), FIX: Fixed some display issues with the series detail page and displaying, FIX: Metatagging should now be working again for story-arc, and one-off post-processing, FIX: Fixed post-processing problem with decimal places if number was padded, FIX: Updated CV-URL to point to new CV api location, FIX: Fixed problem when file-checking and series contained numbers and/or decimals, would take the modified series name instead of the actual series name resulting in some missed matches, IMP: Added another keyword to ignore when checking for annuals against a particular series, FIX:(#1210) When importing files, if issues were meta-tagged with CVS would not scan the metadata properly, FIX: Fixed checkboxes on annuals table so drop-down will work again (just doesn't refresh the screen afterwards atm), FIX:(#1182) Notifications not respecting http base, IMP: Added option to specify SSL Verification on/off per provider when searching/downloading, FIX: Possible fix for cache_dir not being used on restarts of mylar, FIX: configparser check would only check on linux systems, and if not present would cause errors during post-processing on non nix-based systems, FIX:(#1181) Manual renaming an entire series would fail if lowercase_filenames was enabled as an option

This commit is contained in:
evilhero 2016-02-25 11:40:09 -05:00
parent bbcd4ea82a
commit b4f6d9a12a
23 changed files with 536 additions and 318 deletions

View File

@ -102,7 +102,6 @@ def main():
print "Daemonize not supported under Windows, starting normally" print "Daemonize not supported under Windows, starting normally"
else: else:
mylar.DAEMON = True mylar.DAEMON = True
mylar.VERBOSE = 0
if args.pidfile: if args.pidfile:
mylar.PIDFILE = str(args.pidfile) mylar.PIDFILE = str(args.pidfile)

View File

@ -171,41 +171,41 @@ table#series_table td#active { vertical-align: middle; text-align: left; max-wid
div#paddingheader { padding-top: 48px; font-size: 24px; font-weight: bold; text-align: center; } div#paddingheader { padding-top: 48px; font-size: 24px; font-weight: bold; text-align: center; }
div#paddingheadertitle { padding-top: 24px; font-size: 24px; font-weight: bold; text-align: center; } div#paddingheadertitle { padding-top: 24px; font-size: 24px; font-weight: bold; text-align: center; }
div#nopaddingheader { font-size: 24px; font-weight: bold; text-align: center; } div#nopaddingheader { font-size: 24px; font-weight: bold; text-align: center; }
table#issue_table { background-color: grey; width: 100%; padding: 10px; } table#issue_table { background-color: white; width: 100%; padding: 10px; }
table#issue_table th#select { vertical-align: middle; text-align: left; min-width: 5px; } table#issue_table th#select { vertical-align: middle; text-align: left; max-width: 10px; }
table#issue_table th#int_issuenumber { text-align: left; min-width: 1px } table#issue_table th#int_issuenumber { text-align: left; max-width: 50px }
table#issue_table th#issuenumber { text-align: left; min-width: 10px; } table#issue_table th#issuenumber { text-align: left; max-width: 50px; }
table#issue_table th#issuename { text-align: center; min-width: 200px; } table#issue_table th#issuename { text-align: center; min-width: 200px; }
table#issue_table th#reldate { text-align: center; min-width: 10px; } table#issue_table th#reldate { text-align: center; max-width: 75px; }
table#issue_table th#status { text-align: center; min-width: 10px; } table#issue_table th#status { text-align: center; max-width: 75px; }
table#issue_table th#options { text-align: center; min-width: 30px; } table#issue_table th#options { text-align: center; min-width: 50px; }
table#issue_table td#select { vertical-align: middle; text-align: left; min-width: 5px; } table#issue_table td#select { vertical-align: middle; text-align: left; max-width: 10px; }
table#issue_table td#int_issuenumber { vertical-align: middle; text-align: left; } table#issue_table td#int_issuenumber { vertical-align: middle; text-align: left; max-width: 50px; }
table#issue_table td#issuenumber { vertical-align: middle; text-align: left; max-width: 10px; } table#issue_table td#issuenumber { vertical-align: middle; text-align: left; max-width: 50px; }
table#issue_table td#issuename { vertical-align: middle; text-align: center; font-size: 9px; } table#issue_table td#issuename { vertical-align: middle; text-align: center; min-width: 200px; font-size: 9px; }
table#issue_table td#reldate { vertical-align: middle; text-align: center; } table#issue_table td#reldate { vertical-align: middle; text-align: center; max-width: 75px; }
table#issue_table td#status { vertical-align: middle; text-align: center; font-size: 13px; } table#issue_table td#status { vertical-align: middle; text-align: center; max-width: 75px; font-size: 13px; }
table#issue_table td#options { vertical-align: middle; text-align: center; } table#issue_table td#options { vertical-align: middle; text-align: center; min-width: 50px; }
table#annual_table { background-color: white; width: 100%; padding: 10px; } table#annual_table { background-color: white; width: 100%; padding: 10px; }
table#annual_table th#aselect { vertical-align: middle; text-align: left; min-width: 10px; } table#annual_table th#select { vertical-align: middle; text-align: left; max-width: 10px; }
table#annual_table th#aint_issuenumber { text-align: left; min-width: 0px } table#annual_table th#aint_issuenumber { text-align: left; max-width: 1px }
table#annual_table th#aissuenumber { text-align: left; min-width: 20px; } table#annual_table th#aissuenumber { text-align: left; max-width: 50px; }
table#annual_table th#aissuename { text-align: center; min-width: 200px; } table#annual_table th#aissuename { text-align: center; min-width: 200px; }
table#annual_table th#areldate { text-align: center; min-width: 10px; } table#annual_table th#areldate { text-align: center; max-width: 75px; }
table#annual_table th#astatus { text-align: center; min-width: 10px; } table#annual_table th#astatus { text-align: center; max-width: 75px; }
table#annual_table th#aoptions { text-align: center; min-width: 20px; } table#annual_table th#aoptions { text-align: center; min-width: 50px; }
table#annual_table td#aselect { vertical-align: middle; text-align: left; } table#annual_table td#select { vertical-align: middle; text-align: left; max-width: 10px; }
table#annual_table td#aint_issuenumber { vertical-align: middle; text-align: left; } table#annual_table td#aint_issuenumber { vertical-align: middle; text-align: left; max-width: 1 px; }
table#annual_table td#aissuenumber { vertical-align: middle; text-align: left; } table#annual_table td#aissuenumber { vertical-align: middle; text-align: left; max-width: 50px; }
table#annual_table td#aissuename { vertical-align: middle; text-align: center; font-size: 9px; } table#annual_table td#aissuename { vertical-align: middle; text-align: center; font-size: 9px; min-width: 200px; }
table#annual_table td#areldate { vertical-align: middle; text-align: center; } table#annual_table td#areldate { vertical-align: middle; text-align: center; max-width: 75px; }
table#annual_table td#astatus { vertical-align: middle; text-align: center; font-size: 13px; } table#annual_table td#astatus { vertical-align: middle; text-align: center; max-width: 75px; font-size: 13px; }
table#annual_table td#aoptions { vertical-align: middle; text-align: center; } table#annual_table td#aoptions { vertical-align: middle; text-align: center; min-width: 50px; }
table#history_table { background-color: white; width: 100%; font-size: 13px; } table#history_table { background-color: white; width: 100%; font-size: 13px; }

View File

@ -490,7 +490,7 @@
<form action="markannuals" method="get" id="markannuals"> <form action="markannuals" method="get" id="markannuals">
<div id="markannuals">Mark selected annuals as <div id="markannuals">Mark selected annuals as
<select name="action" form="markannuals" onChange="doAjaxCall('markannuals',$(this),'table',true);" data-success="selected issues marked"> <select name="ann_action" onChange="doAjaxCall('markannuals',$(this),'table',true);" data-success="selected annuals marked">
<option disabled="disabled" selected="selected">Choose...</option> <option disabled="disabled" selected="selected">Choose...</option>
<option value="Wanted">Wanted</option> <option value="Wanted">Wanted</option>
<option value="Skipped">Skipped</option> <option value="Skipped">Skipped</option>
@ -498,22 +498,13 @@
<option value="Archived">Archived</option> <option value="Archived">Archived</option>
<option value="Ignored">Ignored</option> <option value="Ignored">Ignored</option>
</select> </select>
selected annuals
<input type="hidden" value="Go"> <input type="hidden" value="Go">
</div> </div>
<table class="display_no_select" id="annual_table"> <table class="display_no_select" id="annual_table">
<!-- <div class="ui-widget">
<label for="annseries">Series: </label>
<input id="annseries" />
</div>
<div class="ui-widget" style="margin-top: 2em; font-family: Arial;">
Result:
<div id="log" style="height: 200px; width: 300px; overflow: auto;" class="ui-widget-content"></div>
</div>
-->
<thead> <thead>
<tr> <tr>
<th id="select" align="left"><input type="checkbox" onClick="toggle(this)" name="annuals" class="checkbox" /></th> <th id="select" align="left"><input type="checkbox" onClick="toggle(this)" name="annuals" class="checkbox" /></th>

View File

@ -214,10 +214,6 @@
<input type="text" name="search_delay" value="${config['search_delay']}" size="4" />mins <input type="text" name="search_delay" value="${config['search_delay']}" size="4" />mins
</div> </div>
</div> </div>
<div class="row">
<label>Library Scan Interval</label>
<input type="text" name="libraryscan_interval" value="${config['libraryscan_interval']}" size="4">mins
</div> </div>
<legend>Comic Location</legend> <legend>Comic Location</legend>
<div> <div>
@ -461,6 +457,9 @@
<input id="usenzbsu" type="checkbox" onclick="initConfigCheckbox($(this));" name="nzbsu" value="1" ${config['use_nzbsu']} /><legend>NZB.SU</legend> <input id="usenzbsu" type="checkbox" onclick="initConfigCheckbox($(this));" name="nzbsu" value="1" ${config['use_nzbsu']} /><legend>NZB.SU</legend>
</div> </div>
<div class="config"> <div class="config">
<div class="row checkbox">
<input type="checkbox" name="nzbsu_verify" value="1" ${config['nzbsu_verify']} /><label>Verify SSL</label>
</div>
<div class="row"> <div class="row">
<label>NZB.SU UID</label> <label>NZB.SU UID</label>
<input type="text" name="nzbsu_uid" value="${config['nzbsu_uid']}" size="15" > <input type="text" name="nzbsu_uid" value="${config['nzbsu_uid']}" size="15" >
@ -478,6 +477,9 @@
<input id="usedognzb" type="checkbox" onclick="initConfigCheckbox($(this));" name="dognzb" value="1" ${config['use_dognzb']} /><legend>DOGNZB</legend> <input id="usedognzb" type="checkbox" onclick="initConfigCheckbox($(this));" name="dognzb" value="1" ${config['use_dognzb']} /><legend>DOGNZB</legend>
</div> </div>
<div class="config"> <div class="config">
<div class="row checkbox">
<input id="dognzb_verify" type="checkbox" name="dognzb_verify" value="1" ${config['dognzb_verify']} /><label>Verify SSL</label>
</div>
<div class="row"> <div class="row">
<label>DOGNZB API</label> <label>DOGNZB API</label>
<input type="text" name="dognzb_apikey" value="${config['dognzb_api']}" size="36"> <input type="text" name="dognzb_apikey" value="${config['dognzb_api']}" size="36">
@ -485,22 +487,6 @@
</div> </div>
</fieldset> </fieldset>
<fieldset>
<div class="row checkbox left clearfix">
<input id="useomgwtfnzbs" type="checkbox" onclick="initConfigCheckbox($(this));" name="omgwtfnzbs" value="1" ${config['use_omgwtfnzbs']} /><legend>OMGWTFNZBS</legend>
</div>
<div class="config">
<div class="row">
<label>OMGWTFNZBS UserName</label>
<input type="text" name="omgwtfnzbs_username" value="${config['omgwtfnzbs_username']}" size="15" >
</div>
<div class="row">
<label>OMGWTFNZBS API</label>
<input type="text" name="omgwtfnzbs_apikey" value="${config['omgwtfnzbs_api']}" size="36">
</div>
</div>
</fieldset>
<fieldset> <fieldset>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix">
<input id="useexperimental" type="checkbox" onclick="initConfigCheckbox($(this));" name="experimental" value="1" ${config['use_experimental']} /><legend>Use Experimental Search</legend> <input id="useexperimental" type="checkbox" onclick="initConfigCheckbox($(this));" name="experimental" value="1" ${config['use_experimental']} /><legend>Use Experimental Search</legend>
@ -596,10 +582,15 @@
%> %>
%for newznab in config['extra_newznabs']: %for newznab in config['extra_newznabs']:
<% <%
if newznab[4] == '1' or newznab[4] == 1: if newznab[5] == '1' or newznab[5] == 1:
newznab_enabled = "checked" newznab_enabled = "checked"
else: else:
newznab_enabled = "" newznab_enabled = ""
if newznab[2] == '1' or newznab[2] == 1:
newznab_verify = "checked"
else:
newznab_verify = ""
%> %>
<div class="config" id="newznab${newznab_number}"> <div class="config" id="newznab${newznab_number}">
<div class="row"> <div class="row">
@ -610,13 +601,16 @@
<label>Newznab Host</label> <label>Newznab Host</label>
<input type="text" name="newznab_host${newznab_number}" value="${newznab[1]}" size="30"> <input type="text" name="newznab_host${newznab_number}" value="${newznab[1]}" size="30">
</div> </div>
<div class="row checkbox">
<input id="newznab_verify" type="checkbox" name="newznab_verify${newznab_number}" value="1" ${newznab_verify} /><label>Verify SSL</label>
</div>
<div class="row"> <div class="row">
<label>Newznab API</label> <label>Newznab API</label>
<input type="text" name="newznab_api${newznab_number}" value="${newznab[2]}" size="36"> <input type="text" name="newznab_api${newznab_number}" value="${newznab[3]}" size="36">
</div> </div>
<div class="row"> <div class="row">
<label>Newznab UID</label> <label>Newznab UID</label>
<input type="text" name="newznab_uid${newznab_number}" value="${newznab[3]}" size="15"> <input type="text" name="newznab_uid${newznab_number}" value="${newznab[4]}" size="15">
<small>( only needed for RSS feed )</small> <small>( only needed for RSS feed )</small>
</div> </div>
<div class="row checkbox"> <div class="row checkbox">
@ -1339,7 +1333,7 @@
$("#add_newznab").click(function() { $("#add_newznab").click(function() {
var intId = $("#newznab_providers > div").size() + deletedNewznabs + 1; var intId = $("#newznab_providers > div").size() + deletedNewznabs + 1;
var formfields = $("<div class=\"config\" id=\"newznab" + intId + "\"><div class=\"row\"><label>Newznab Name</label><input type=\"text\" name=\"newznab_name" + intId + "\" size=\"36\"></div><div class=\"row\"><label>Newznab Host</label><input type=\"text\" name=\"newznab_host" + intId + "\" + value=\"http://\" + size=\"30\"></div><div class=\"row\"><label>Newznab API</label><input type=\"text\" name=\"newznab_api" + intId + "\" size=\"36\"></div><div class=\"row\"><label>Newznab UID</label><input type=\"text\" name=\"newznab_uid" + intId + "\" size=\"15\"></div><div class=\"row checkbox\"><input type=\"checkbox\" name=\"newznab_enabled" + intId + "\" value=\"1\" checked /><label>Enabled</label></div>"); var formfields = $("<div class=\"config\" id=\"newznab" + intId + "\"><div class=\"row\"><label>Newznab Name</label><input type=\"text\" name=\"newznab_name" + intId + "\" size=\"36\"></div><div class=\"row\"><label>Newznab Host</label><input type=\"text\" name=\"newznab_host" + intId + "\" + value=\"http://\" + size=\"30\"></div><div class=\"row checkbox\"><input type=\"checkbox\" name=\"newznab_verify" + intId + "\" value=\"0\" checked /><label>Verify SSL</label></div><div class=\"row\"><label>Newznab API</label><input type=\"text\" name=\"newznab_api" + intId + "\" size=\"36\"></div><div class=\"row\"><label>Newznab UID</label><input type=\"text\" name=\"newznab_uid" + intId + "\" size=\"15\"></div><div class=\"row checkbox\"><input type=\"checkbox\" name=\"newznab_enabled" + intId + "\" value=\"1\" checked /><label>Enabled</label></div>");
var removeButton = $("<div class=\"row\"><input type=\"button\" class=\"remove\" value=\"Remove\" /></div>"); var removeButton = $("<div class=\"row\"><input type=\"button\" class=\"remove\" value=\"Remove\" /></div>");
removeButton.click(function() { removeButton.click(function() {
$(this).parent().remove(); $(this).parent().remove();
@ -1355,31 +1349,31 @@
}; };
$('#nma_test').click(function () { $('#nma_test').click(function () {
$.get("/testNMA", $.get("testNMA",
function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); }); function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); });
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut(); $('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
}); });
$('#prowl_test').click(function () { $('#prowl_test').click(function () {
$.get("/testprowl", $.get("testprowl",
function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); }); function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); });
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut(); $('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
}); });
$('#pushover_test').click(function () { $('#pushover_test').click(function () {
$.get("/testpushover", $.get("testpushover",
function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); }); function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); });
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut(); $('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
}); });
$('#boxcar_test').click(function () { $('#boxcar_test').click(function () {
$.get("/testboxcar", $.get("testboxcar",
function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); }); function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); });
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut(); $('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
}); });
$('#pushbullet_test').click(function () { $('#pushbullet_test').click(function () {
$.get("/testpushbullet", $.get("testpushbullet",
function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); }); function (data) { $('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); });
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut(); $('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
}); });
@ -1396,7 +1390,6 @@
initConfigCheckbox("#enable_torznab"); initConfigCheckbox("#enable_torznab");
initConfigCheckbox("#usenzbsu"); initConfigCheckbox("#usenzbsu");
initConfigCheckbox("#usedognzb"); initConfigCheckbox("#usedognzb");
initConfigCheckbox("#useomgwtfnzbs");
initConfigCheckbox("#enable_torrents"); initConfigCheckbox("#enable_torrents");
initConfigCheckbox("#torrent_local"); initConfigCheckbox("#torrent_local");
initConfigCheckbox("#torrent_seedbox"); initConfigCheckbox("#torrent_seedbox");

View File

@ -351,6 +351,92 @@ table.display tr:last-child td {
table.display tr td#add .ui-icon { display: inline-block; } table.display tr td#add .ui-icon { display: inline-block; }
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* DataTables row classes
*/
table.display_no_select tr.odd.gradeA {
background-color: #ddffdd;
}
table.display_no_select tr.even.gradeA {
background-color: #ddffdd;
}
table.display_no_select tr.odd.gradeC {
background-color: #ebf5ff;
}
table.display_no_select tr.even.gradeC {
background-color: #ebf5ff;
}
table.display_no_select tr.even.gradeH{
background-color: #FFF5CC;
}
table.display_no_select tr.odd.gradeH {
background-color: #FFF5CC;
}
table.display_no_select tr.even.gradeL {
background-color: #ebf5ff;
}
table.display_no_select tr.odd.gradeL {
background-color: #ebf5ff;
}
table.display_no_select tr.odd.gradeX {
background-color: #ffdddd;
}
table.display_no_select tr.even.gradeX {
background-color: #ffdddd;
}
table.display_no_select tr.odd.gradeU {
background-color: #ddd;
}
table.display_no_select tr.even.gradeU {
background-color: #eee;
}
table.display_no_select tr.odd.gradeP {
background-color: #68FC68;
}
table.display_no_select tr.even.gradeP {
background-color: #68FC68;
}
table.display_no_select tr.odd.gradeD {
background-color: #C7EFC7;
}
table.display_no_select tr.odd.gradeZ {
background-color: #FAFAFA;
}
table.display_no_select tr.even.gradeZ {
background-color: white;
}
table.display_no_select tr.gradeL #status {
background: url("../images/loader_black.gif") no-repeat scroll 15px center transparent;
font-size: 11px;
text-indent: -3000px;
}
table.display_no_select tr.gradeA td,
table.display_no_select tr.gradeC td,
table.display_no_select tr.gradeX td,
table.display_no_select tr.gradeU td,
table.display_no_select tr.gradeP td,
table.display_no_select tr.gradeD td,
table.display_no_select tr.gradeZ td {border-bottom: 1px solid #FFF;}
table.display_no_select tr:last-child td {
border-bottom: 1px solid #eee;
}
table.display_no_select tr td#add .ui-icon { display: inline-block; }
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Misc * Misc

View File

@ -286,8 +286,7 @@ input,
select, select,
form .checkbox input, form .checkbox input,
.configtable td#middle, .configtable td#middle,
#series_table td#have, #series_table td#have {
#album_table td#have {
vertical-align: middle; vertical-align: middle;
} }
input[type="radio"] { input[type="radio"] {
@ -1228,7 +1227,8 @@ div#artistheader h2 a {
min-width: 275px; min-width: 275px;
text-align: left; text-align: left;
} }
#series_table th#year { #series_table th#year,
#series_table th#have_percent {
max-width: 25px; max-width: 25px;
text-align: left; text-align: left;
} }
@ -1256,7 +1256,8 @@ div#artistheader h2 a {
text-align: left; text-align: left;
vertical-align: middle; vertical-align: middle;
} }
#series_table td#year { #series_table td#year,
#series_table td#have_percent {
max-width: 25px; max-width: 25px;
text-align: left; text-align: left;
vertical-align: middle; vertical-align: middle;

View File

@ -15,6 +15,7 @@
<th id="year">Year</th> <th id="year">Year</th>
<th id="issue">Last Issue</th> <th id="issue">Last Issue</th>
<th id="published">Published</th> <th id="published">Published</th>
<th id="have_percent">Have %</th>
<th id="have">Have</th> <th id="have">Have</th>
<th id="status">Status</th> <th id="status">Status</th>
<th id="active">Active</th> <th id="active">Active</th>
@ -46,6 +47,7 @@
<td id="year"><span title="${comic['ComicYear']}"></span>${comic['ComicYear']}</td> <td id="year"><span title="${comic['ComicYear']}"></span>${comic['ComicYear']}</td>
<td id="issue"><span title="${comic['LatestIssue']}"></span># ${comic['LatestIssue']}</td> <td id="issue"><span title="${comic['LatestIssue']}"></span># ${comic['LatestIssue']}</td>
<td id="published">${comic['LatestDate']}</td> <td id="published">${comic['LatestDate']}</td>
<td id="have_percent">${comic['percent']}</td>
<td id="have"><span title="${comic['percent']}"></span>${css}<div style="width:${comic['percent']}%"><span class="progressbar-front-text">${comic['haveissues']}/${comic['totalissues']}</span></div></td> <td id="have"><span title="${comic['percent']}"></span>${css}<div style="width:${comic['percent']}%"><span class="progressbar-front-text">${comic['haveissues']}/${comic['totalissues']}</span></div></td>
<td id="status">${comic['recentstatus']}</td> <td id="status">${comic['recentstatus']}</td>
<td id="active" align="center"> <td id="active" align="center">
@ -74,6 +76,12 @@
$('#series_table').dataTable( $('#series_table').dataTable(
{ {
"bDestroy": true, "bDestroy": true,
"aoColumnDefs": [
{ 'bSortable': false, 'aTargets': [5] },
{ 'bVisible': false, 'aTargets': [5] },
{ 'sType': 'numeric', 'aTargets': [5] },
{ 'iDataSort': [5], 'aTargets': [6] }
],
"aLengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]], "aLengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]],
"oLanguage": { "oLanguage": {
"sLengthMenu":"Show _MENU_ results per page", "sLengthMenu":"Show _MENU_ results per page",

View File

@ -44,10 +44,6 @@
(this.value==this.defaultValue) this.value='';" name="path" size="70" /> (this.value==this.defaultValue) this.value='';" name="path" size="70" />
%endif %endif
</div> </div>
<!-- <div class="row checkbox">
<input type="checkbox" name="libraryscan" id="libraryscan" value="1" ${checked(mylar.LIBRARYSCAN)}><label>Automatically Scan Library</label>
</div>
-->
<div class="row checkbox"> <div class="row checkbox">
<input type="checkbox" name="autoadd" id="autoadd" value="1" ${checked(mylar.ADD_COMICS)}><label>Auto-add new series</label> <input type="checkbox" name="autoadd" id="autoadd" value="1" ${checked(mylar.ADD_COMICS)}><label>Auto-add new series</label>
</div> </div>

View File

@ -136,8 +136,6 @@
<%def name="javascriptIncludes()"> <%def name="javascriptIncludes()">
<script src="js/libs/jquery.dataTables.min.js"></script> <script src="js/libs/jquery.dataTables.min.js"></script>
<script src="js/libs/jquery.dataTables.rowReordering.js"></script>
<script type="text/javascript"> <script type="text/javascript">
$("#menu_link_scan").click(function() { $("#menu_link_scan").click(function() {
$('#chkoptions').submit(); $('#chkoptions').submit();
@ -163,10 +161,6 @@
"sPaginationType": "full_numbers", "sPaginationType": "full_numbers",
"aaSorting": [] "aaSorting": []
}) })
.rowReordering({
sURL: "orderThis",
sRequestType: "GET"
});
resetFilters("item"); resetFilters("item");
} }

View File

@ -27,7 +27,7 @@ import re
import time import time
import datetime import datetime
import ctversion import ctversion
import sys import os, sys
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
try: try:
@ -50,6 +50,13 @@ from comicvinecacher import ComicVineCacher
from genericmetadata import GenericMetadata from genericmetadata import GenericMetadata
from issuestring import IssueString from issuestring import IssueString
try:
lib_path = os.path.join(ComicTaggerSettings.baseDir(), '..')
sys.path.append(lib_path)
import lib.requests as requests
except ImportError:
print "Unable to use requests module. This is a CRITICAL error and ComicTagger cannot proceed. Exiting."
class CVTypeID: class CVTypeID:
Volume = "4050" Volume = "4050"
Issue = "4000" Issue = "4000"
@ -89,7 +96,7 @@ class ComicVineTalker(QObject):
def __init__(self): def __init__(self):
QObject.__init__(self) QObject.__init__(self)
self.api_base_url = "http://api.comicvine.com/" self.api_base_url = "http://comicvine.gamespot.com/api"
self.wait_for_rate_limit = False self.wait_for_rate_limit = False
# key that is registered to comictagger # key that is registered to comictagger
@ -100,6 +107,7 @@ class ComicVineTalker(QObject):
else: else:
self.api_key = ComicVineTalker.api_key self.api_key = ComicVineTalker.api_key
self.cv_headers = {'User-Agent': 'ComicTagger.[ninjas.walk.alone.fork] - UserAgent + CV Rate Limiting / 1.01 - KATANA'}
self.log_func = None self.log_func = None
def setLogFunc( self , log_func ): def setLogFunc( self , log_func ):
@ -129,10 +137,11 @@ class ComicVineTalker(QObject):
def testKey( self, key ): def testKey( self, key ):
test_url = self.api_base_url + "/issue/1/?api_key=" + key + "&format=json&field_list=name" test_url = self.api_base_url + "/issue/1/?api_key=" + key + "&format=json&field_list=name"
resp = urllib2.urlopen( test_url ) r = requests.get(test_url, headers=self.cv_headers)
content = resp.read() #resp = urllib2.urlopen( test_url )
#content = resp.read()
cv_response = json.loads( content ) cv_response = r.json() #json.loads( r.content )
# Bogus request, but if the key is wrong, you get error 100: "Invalid API Key" # Bogus request, but if the key is wrong, you get error 100: "Invalid API Key"
return cv_response[ 'status_code' ] != 100 return cv_response[ 'status_code' ] != 100
@ -149,8 +158,8 @@ class ComicVineTalker(QObject):
while True: while True:
time.sleep(2) #imposed by new CV rate limiting - 1 api request / second (maximum) - safetyset to 2s intervals time.sleep(2) #imposed by new CV rate limiting - 1 api request / second (maximum) - safetyset to 2s intervals
#self.writeLog( "Self imposed rate-limiting of 1 api request / second\n" ) #self.writeLog( "Self imposed rate-limiting of 1 api request / second\n" )
content = self.getUrlContent(url) cv_response = self.getUrlContent(url)
cv_response = json.loads(content) #cv_response = json.loads(content)
if self.wait_for_rate_limit and cv_response[ 'status_code' ] == ComicVineTalkerException.RateLimit: if self.wait_for_rate_limit and cv_response[ 'status_code' ] == ComicVineTalkerException.RateLimit:
self.writeLog( "Rate limit encountered. Waiting for {0} minutes\n".format(limit_wait_time)) self.writeLog( "Rate limit encountered. Waiting for {0} minutes\n".format(limit_wait_time))
time.sleep(limit_wait_time * 60) time.sleep(limit_wait_time * 60)
@ -176,8 +185,9 @@ class ComicVineTalker(QObject):
#print "ATB---", url #print "ATB---", url
for tries in range(3): for tries in range(3):
try: try:
resp = urllib2.urlopen( url ) r = requests.get(url, headers=self.cv_headers)
return resp.read() #resp = urllib2.urlopen( url )
return r.json()
except urllib2.HTTPError as e: except urllib2.HTTPError as e:
if e.getcode() == 500: if e.getcode() == 500:
self.writeLog( "Try #{0}: ".format(tries+1) ) self.writeLog( "Try #{0}: ".format(tries+1) )
@ -641,7 +651,7 @@ class ComicVineTalker(QObject):
# scrape the CV issue page URL to get the alternate cover URLs # scrape the CV issue page URL to get the alternate cover URLs
resp = urllib2.urlopen( issue_page_url ) resp = urllib2.urlopen( issue_page_url )
content = resp.read() content = resp.read()
alt_cover_url_list = self.parseOutAltCoverUrls( content) alt_cover_url_list = self.parseOutAltCoverUrls(content)
# cache this alt cover URL list # cache this alt cover URL list
self.cacheAlternateCoverURLs( issue_id, alt_cover_url_list ) self.cacheAlternateCoverURLs( issue_id, alt_cover_url_list )

View File

@ -566,7 +566,9 @@ class PostProcessor(object):
self._log("The file cannot be found in the location provided for metatagging to be used [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...") self._log("The file cannot be found in the location provided for metatagging to be used [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...")
logger.error(module + ' The file cannot be found in the location provided for metatagging to be used [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...') logger.error(module + ' The file cannot be found in the location provided for metatagging to be used [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...')
else: else:
odir = os.path.split(metaresponse)[0]
ofilename = os.path.split(metaresponse)[1] ofilename = os.path.split(metaresponse)[1]
ext = os.path.splitext(metaresponse)[1]
logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..') logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..')
self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...') self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...')
@ -809,7 +811,9 @@ class PostProcessor(object):
self._log("The file cannot be found in the location provided for metatagging [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...") self._log("The file cannot be found in the location provided for metatagging [" + filename_in_error + "]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...")
logger.error(module + ' The file cannot be found in the location provided for metagging [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...') logger.error(module + ' The file cannot be found in the location provided for metagging [' + filename_in_error + ']. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging...')
else: else:
odir = os.path.split(metaresponse)[0]
ofilename = os.path.split(metaresponse)[1] ofilename = os.path.split(metaresponse)[1]
ext = os.path.splitext(metaresponse)[1]
logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..') logger.info(module + ' Sucessfully wrote metadata to .cbz (' + ofilename + ') - Continuing..')
self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...') self._log('Sucessfully wrote metadata to .cbz (' + ofilename + ') - proceeding...')
@ -1025,6 +1029,8 @@ class PostProcessor(object):
if '.' in issuenum: if '.' in issuenum:
iss_find = issuenum.find('.') iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find] iss_b4dec = issuenum[:iss_find]
if iss_b4dec == '':
iss_b4dec = '0'
iss_decval = issuenum[iss_find +1:] iss_decval = issuenum[iss_find +1:]
if iss_decval.endswith('.'): iss_decval = iss_decval[:-1] if iss_decval.endswith('.'): iss_decval = iss_decval[:-1]
if int(iss_decval) == 0: if int(iss_decval) == 0:
@ -1056,16 +1062,33 @@ class PostProcessor(object):
elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00" elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"
logger.fdebug(module + ' Zero Suppression set to : ' + str(mylar.ZERO_LEVEL_N)) logger.fdebug(module + ' Zero Suppression set to : ' + str(mylar.ZERO_LEVEL_N))
if str(len(issueno)) > 1:
if issueno.isalpha():
self._log('issue detected as an alpha.')
prettycomiss = str(issueno)
elif int(issueno) < 0: prettycomiss = None
self._log("issue detected is a negative")
prettycomiss = '-' + str(zeroadd) + str(abs(issueno)) if issueno.isalpha():
elif int(issueno) < 10: logger.fdebug('issue detected as an alpha.')
self._log("issue detected less than 10") prettycomiss = str(issueno)
else:
try:
x = float(issueno)
#validity check
if x < 0:
logger.info('I\'ve encountered a negative issue #: ' + str(issueno) + '. Trying to accomodate.')
prettycomiss = '-' + str(zeroadd) + str(issueno[1:])
elif x >= 0:
pass
else:
raise ValueError
except ValueError, e:
logger.warn('Unable to properly determine issue number [' + str(issueno) + '] - you should probably log this on github for help.')
return
if prettycomiss is None and len(str(issueno)) > 0:
#if int(issueno) < 0:
# self._log("issue detected is a negative")
# prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
if int(issueno) < 10:
logger.fdebug('issue detected less than 10')
if '.' in iss: if '.' in iss:
if int(iss_decval) > 0: if int(iss_decval) > 0:
issueno = str(iss) issueno = str(iss)
@ -1076,9 +1099,9 @@ class PostProcessor(object):
prettycomiss = str(zeroadd) + str(iss) prettycomiss = str(zeroadd) + str(iss)
if issue_except != 'None': if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except prettycomiss = str(prettycomiss) + issue_except
self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) logger.fdebug('Zero level supplement set to ' + str(mylar.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
elif int(issueno) >= 10 and int(issueno) < 100: elif int(issueno) >= 10 and int(issueno) < 100:
self._log("issue detected greater than 10, but less than 100") logger.fdebug('issue detected greater than 10, but less than 100')
if mylar.ZERO_LEVEL_N == "none": if mylar.ZERO_LEVEL_N == "none":
zeroadd = "" zeroadd = ""
else: else:
@ -1093,19 +1116,73 @@ class PostProcessor(object):
prettycomiss = str(zeroadd) + str(iss) prettycomiss = str(zeroadd) + str(iss)
if issue_except != 'None': if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except prettycomiss = str(prettycomiss) + issue_except
self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss)) logger.fdebug('Zero level supplement set to ' + str(mylar.ZERO_LEVEL_N) + '.Issue will be set as : ' + str(prettycomiss))
else: else:
self._log("issue detected greater than 100") logger.fdebug('issue detected greater than 100')
if '.' in iss: if '.' in iss:
if int(iss_decval) > 0: if int(iss_decval) > 0:
issueno = str(iss) issueno = str(iss)
prettycomiss = str(issueno) prettycomiss = str(issueno)
if issue_except != 'None': if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except prettycomiss = str(prettycomiss) + issue_except
self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss)) logger.fdebug('Zero level supplement set to ' + str(mylar.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
else:
elif len(str(issueno)) == 0:
prettycomiss = str(issueno) prettycomiss = str(issueno)
self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss)) logger.fdebug('issue length error - cannot determine length. Defaulting to None: ' + str(prettycomiss))
#start outdated?
# if str(len(issueno)) > 1:
# if issueno.isalpha():
# self._log('issue detected as an alpha.')
# prettycomiss = str(issueno)
# elif int(issueno) < 0:
# self._log("issue detected is a negative")
# prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
# elif int(issueno) < 10:
# self._log("issue detected less than 10")
# if '.' in iss:
# if int(iss_decval) > 0:
# issueno = str(iss)
# prettycomiss = str(zeroadd) + str(iss)
# else:
# prettycomiss = str(zeroadd) + str(int(issueno))
# else:
# prettycomiss = str(zeroadd) + str(iss)
# if issue_except != 'None':
# prettycomiss = str(prettycomiss) + issue_except
# self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
# elif int(issueno) >= 10 and int(issueno) < 100:
# self._log("issue detected greater than 10, but less than 100")
# if mylar.ZERO_LEVEL_N == "none":
# zeroadd = ""
# else:
# zeroadd = "0"
# if '.' in iss:
# if int(iss_decval) > 0:
# issueno = str(iss)
# prettycomiss = str(zeroadd) + str(iss)
# else:
# prettycomiss = str(zeroadd) + str(int(issueno))
# else:
# prettycomiss = str(zeroadd) + str(iss)
# if issue_except != 'None':
# prettycomiss = str(prettycomiss) + issue_except
# self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss))
# else:
# self._log("issue detected greater than 100")
# if '.' in iss:
# if int(iss_decval) > 0:
# issueno = str(iss)
# prettycomiss = str(issueno)
# if issue_except != 'None':
# prettycomiss = str(prettycomiss) + issue_except
# self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
# else:
# prettycomiss = str(issueno)
# self._log("issue length error - cannot determine length. Defaulting to None: " + str(prettycomiss))
#--end outdated?
if annchk == "yes": if annchk == "yes":
self._log("Annual detected.") self._log("Annual detected.")
@ -1463,7 +1540,7 @@ class PostProcessor(object):
#tidyup old path #tidyup old path
try: try:
if os.path.isdir(odir) and odir != self.nzb_folder: if os.path.isdir(odir) and odir != self.nzb_folder:
logger.fdebug(module + 'self.nzb_folder: ' + self.nzb_folder) logger.fdebug(module + ' self.nzb_folder: ' + self.nzb_folder)
# check to see if the directory is empty or not. # check to see if the directory is empty or not.
if not os.listdir(odir): if not os.listdir(odir):
logger.fdebug(module + ' Tidying up. Deleting folder : ' + odir) logger.fdebug(module + ' Tidying up. Deleting folder : ' + odir)

View File

@ -135,6 +135,7 @@ SEARCH_DELAY = 1
COMICVINE_API = None COMICVINE_API = None
DEFAULT_CVAPI = '583939a3df0a25fc4e8b7a29934a13078002dc27' DEFAULT_CVAPI = '583939a3df0a25fc4e8b7a29934a13078002dc27'
CVAPI_RATE = 2 CVAPI_RATE = 2
CV_HEADERS = None
CHECK_GITHUB = False CHECK_GITHUB = False
CHECK_GITHUB_ON_STARTUP = False CHECK_GITHUB_ON_STARTUP = False
@ -152,14 +153,12 @@ DELETE_REMOVE_DIR = False
ADD_COMICS = False ADD_COMICS = False
COMIC_DIR = None COMIC_DIR = None
LIBRARYSCAN = False
IMP_MOVE = False IMP_MOVE = False
IMP_RENAME = False IMP_RENAME = False
IMP_METADATA = True # should default to False - this is enabled for testing only. IMP_METADATA = True # should default to False - this is enabled for testing only.
SEARCH_INTERVAL = 360 SEARCH_INTERVAL = 360
NZB_STARTUP_SEARCH = False NZB_STARTUP_SEARCH = False
LIBRARYSCAN_INTERVAL = 300
DOWNLOAD_SCAN_INTERVAL = 5 DOWNLOAD_SCAN_INTERVAL = 5
FOLDER_SCAN_LOG_VERBOSE = 0 FOLDER_SCAN_LOG_VERBOSE = 0
CHECK_FOLDER = None CHECK_FOLDER = None
@ -244,18 +243,17 @@ PROVIDER_ORDER = None
NZBSU = False NZBSU = False
NZBSU_UID = None NZBSU_UID = None
NZBSU_APIKEY = None NZBSU_APIKEY = None
NZBSU_VERIFY = True
DOGNZB = False DOGNZB = False
DOGNZB_APIKEY = None DOGNZB_APIKEY = None
DOGNZB_VERIFY = True
OMGWTFNZBS = False
OMGWTFNZBS_USERNAME = None
OMGWTFNZBS_APIKEY = None
NEWZNAB = False NEWZNAB = False
NEWZNAB_NAME = None NEWZNAB_NAME = None
NEWZNAB_HOST = None NEWZNAB_HOST = None
NEWZNAB_APIKEY = None NEWZNAB_APIKEY = None
NEWZNAB_VERIFY = True
NEWZNAB_UID = None NEWZNAB_UID = None
NEWZNAB_ENABLED = False NEWZNAB_ENABLED = False
EXTRA_NEWZNABS = [] EXTRA_NEWZNABS = []
@ -266,6 +264,7 @@ TORZNAB_NAME = None
TORZNAB_HOST = None TORZNAB_HOST = None
TORZNAB_APIKEY = None TORZNAB_APIKEY = None
TORZNAB_CATEGORY = None TORZNAB_CATEGORY = None
TORZNAB_VERIFY = False
EXPERIMENTAL = False EXPERIMENTAL = False
ALTEXPERIMENTAL = False ALTEXPERIMENTAL = False
@ -350,6 +349,7 @@ SEEDBOX_WATCHDIR = None
ENABLE_TORRENT_SEARCH = 0 ENABLE_TORRENT_SEARCH = 0
ENABLE_KAT = 0 ENABLE_KAT = 0
KAT_PROXY = None KAT_PROXY = None
KAT_VERIFY = True
ENABLE_32P = 0 ENABLE_32P = 0
MODE_32P = None #0 = legacymode, #1 = authmode MODE_32P = None #0 = legacymode, #1 = authmode
@ -412,19 +412,19 @@ def check_setting_str(config, cfg_name, item_name, def_val, log=True):
def initialize(): def initialize():
with INIT_LOCK: with INIT_LOCK:
global __INITIALIZED__, DBCHOICE, DBUSER, DBPASS, DBNAME, COMICVINE_API, DEFAULT_CVAPI, CVAPI_RATE, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, UPCOMING_SNATCHED, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, OLDCONFIG_VERSION, OS_DETECT, \ global __INITIALIZED__, DBCHOICE, DBUSER, DBPASS, DBNAME, COMICVINE_API, DEFAULT_CVAPI, CVAPI_RATE, CV_HEADERS, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, UPCOMING_SNATCHED, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, OLDCONFIG_VERSION, OS_DETECT, \
queue, LOCAL_IP, EXT_IP, HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, HTTPS_FORCE_ON, HOST_RETURN, API_ENABLED, API_KEY, DOWNLOAD_APIKEY, LAUNCH_BROWSER, GIT_PATH, SAFESTART, AUTO_UPDATE, \ queue, LOCAL_IP, EXT_IP, HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, HTTPS_FORCE_ON, HOST_RETURN, API_ENABLED, API_KEY, DOWNLOAD_APIKEY, LAUNCH_BROWSER, GIT_PATH, SAFESTART, AUTO_UPDATE, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, GIT_USER, GIT_BRANCH, USER_AGENT, DESTINATION_DIR, MULTIPLE_DEST_DIRS, CREATE_FOLDERS, DELETE_REMOVE_DIR, \ CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, GIT_USER, GIT_BRANCH, USER_AGENT, DESTINATION_DIR, MULTIPLE_DEST_DIRS, CREATE_FOLDERS, DELETE_REMOVE_DIR, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \ DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \
LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, FOLDER_SCAN_LOG_VERBOSE, IMPORTLOCK, NZB_DOWNLOADER, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_TO_MYLAR, SAB_DIRECTORY, USE_BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \ DOWNLOAD_SCAN_INTERVAL, FOLDER_SCAN_LOG_VERBOSE, IMPORTLOCK, NZB_DOWNLOADER, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_TO_MYLAR, SAB_DIRECTORY, USE_BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_DIRECTORY, NZBSU, NZBSU_UID, NZBSU_APIKEY, DOGNZB, DOGNZB_APIKEY, OMGWTFNZBS, OMGWTFNZBS_USERNAME, OMGWTFNZBS_APIKEY, \ USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_DIRECTORY, NZBSU, NZBSU_UID, NZBSU_APIKEY, NZBSU_VERIFY, DOGNZB, DOGNZB_APIKEY, DOGNZB_VERIFY, \
NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \ NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_VERIFY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
ENABLE_TORZNAB, TORZNAB_NAME, TORZNAB_HOST, TORZNAB_APIKEY, TORZNAB_CATEGORY, \ ENABLE_TORZNAB, TORZNAB_NAME, TORZNAB_HOST, TORZNAB_APIKEY, TORZNAB_CATEGORY, TORZNAB_VERIFY, \
EXPERIMENTAL, ALTEXPERIMENTAL, \ EXPERIMENTAL, ALTEXPERIMENTAL, \
ENABLE_META, CMTAGGER_PATH, CBR2CBZ_ONLY, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, CT_SETTINGSPATH, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, \ ENABLE_META, CMTAGGER_PATH, CBR2CBZ_ONLY, CT_TAG_CR, CT_TAG_CBL, CT_CBZ_OVERWRITE, UNRAR_CMD, CT_SETTINGSPATH, UPDATE_ENDED, INDIE_PUB, BIGGIE_PUB, IGNORE_HAVETOTAL, SNATCHED_HAVETOTAL, PROVIDER_ORDER, \
dbUpdateScheduler, searchScheduler, RSSScheduler, WeeklyScheduler, VersionScheduler, FolderMonitorScheduler, \ dbUpdateScheduler, searchScheduler, RSSScheduler, WeeklyScheduler, VersionScheduler, FolderMonitorScheduler, \
ENABLE_TORRENTS, MINSEEDS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \ ENABLE_TORRENTS, MINSEEDS, TORRENT_LOCAL, LOCAL_WATCHDIR, TORRENT_SEEDBOX, SEEDBOX_HOST, SEEDBOX_PORT, SEEDBOX_USER, SEEDBOX_PASS, SEEDBOX_WATCHDIR, \
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_KAT, KAT_PROXY, ENABLE_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \ ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_KAT, KAT_PROXY, KAT_VERIFY, ENABLE_32P, MODE_32P, KEYS_32P, RSSFEED_32P, USERNAME_32P, PASSWORD_32P, AUTHKEY_32P, PASSKEY_32P, FEEDINFO_32P, VERIFY_32P, SNATCHEDTORRENT_NOTIFY, \
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \ PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \ PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \ PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, STORYARCDIR, COPY2ARCDIR, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
@ -439,7 +439,6 @@ def initialize():
CheckSection('NZBGet') CheckSection('NZBGet')
CheckSection('NZBsu') CheckSection('NZBsu')
CheckSection('DOGnzb') CheckSection('DOGnzb')
CheckSection('OMGWTFNZBS')
CheckSection('Experimental') CheckSection('Experimental')
CheckSection('Newznab') CheckSection('Newznab')
CheckSection('Torznab') CheckSection('Torznab')
@ -486,8 +485,7 @@ def initialize():
MAX_LOGSIZE = 1000000 MAX_LOGSIZE = 1000000
GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '') GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '')
LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', '') LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', '')
if not CACHE_DIR: CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', '')
CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', '')
CHECK_GITHUB = bool(check_setting_int(CFG, 'General', 'check_github', 1)) CHECK_GITHUB = bool(check_setting_int(CFG, 'General', 'check_github', 1))
CHECK_GITHUB_ON_STARTUP = bool(check_setting_int(CFG, 'General', 'check_github_on_startup', 1)) CHECK_GITHUB_ON_STARTUP = bool(check_setting_int(CFG, 'General', 'check_github_on_startup', 1))
@ -506,8 +504,6 @@ def initialize():
ALT_PULL = bool(check_setting_int(CFG, 'General', 'alt_pull', 0)) ALT_PULL = bool(check_setting_int(CFG, 'General', 'alt_pull', 0))
SEARCH_INTERVAL = check_setting_int(CFG, 'General', 'search_interval', 360) SEARCH_INTERVAL = check_setting_int(CFG, 'General', 'search_interval', 360)
NZB_STARTUP_SEARCH = bool(check_setting_int(CFG, 'General', 'nzb_startup_search', 0)) NZB_STARTUP_SEARCH = bool(check_setting_int(CFG, 'General', 'nzb_startup_search', 0))
LIBRARYSCAN = bool(check_setting_int(CFG, 'General', 'libraryscan', 1))
LIBRARYSCAN_INTERVAL = check_setting_int(CFG, 'General', 'libraryscan_interval', 300)
ADD_COMICS = bool(check_setting_int(CFG, 'General', 'add_comics', 0)) ADD_COMICS = bool(check_setting_int(CFG, 'General', 'add_comics', 0))
COMIC_DIR = check_setting_str(CFG, 'General', 'comic_dir', '') COMIC_DIR = check_setting_str(CFG, 'General', 'comic_dir', '')
IMP_MOVE = bool(check_setting_int(CFG, 'General', 'imp_move', 0)) IMP_MOVE = bool(check_setting_int(CFG, 'General', 'imp_move', 0))
@ -646,6 +642,7 @@ def initialize():
ENABLE_TORRENT_SEARCH = bool(check_setting_int(CFG, 'Torrents', 'enable_torrent_search', 0)) ENABLE_TORRENT_SEARCH = bool(check_setting_int(CFG, 'Torrents', 'enable_torrent_search', 0))
ENABLE_KAT = bool(check_setting_int(CFG, 'Torrents', 'enable_kat', 0)) ENABLE_KAT = bool(check_setting_int(CFG, 'Torrents', 'enable_kat', 0))
KAT_PROXY = check_setting_str(CFG, 'Torrents', 'kat_proxy', '') KAT_PROXY = check_setting_str(CFG, 'Torrents', 'kat_proxy', '')
KAT_VERIFY = bool(check_setting_int(CFG, 'Torrents', 'kat_verify', 1))
ENABLE_CBT = check_setting_str(CFG, 'Torrents', 'enable_cbt', '-1') ENABLE_CBT = check_setting_str(CFG, 'Torrents', 'enable_cbt', '-1')
if ENABLE_CBT != '-1': if ENABLE_CBT != '-1':
@ -730,23 +727,18 @@ def initialize():
NZBSU = bool(check_setting_int(CFG, 'NZBsu', 'nzbsu', 0)) NZBSU = bool(check_setting_int(CFG, 'NZBsu', 'nzbsu', 0))
NZBSU_UID = check_setting_str(CFG, 'NZBsu', 'nzbsu_uid', '') NZBSU_UID = check_setting_str(CFG, 'NZBsu', 'nzbsu_uid', '')
NZBSU_APIKEY = check_setting_str(CFG, 'NZBsu', 'nzbsu_apikey', '') NZBSU_APIKEY = check_setting_str(CFG, 'NZBsu', 'nzbsu_apikey', '')
NZBSU_VERIFY = bool(check_setting_int(CFG, 'NZBsu', 'nzbsu_verify', 1))
if NZBSU: if NZBSU:
PR.append('nzb.su') PR.append('nzb.su')
PR_NUM +=1 PR_NUM +=1
DOGNZB = bool(check_setting_int(CFG, 'DOGnzb', 'dognzb', 0)) DOGNZB = bool(check_setting_int(CFG, 'DOGnzb', 'dognzb', 0))
DOGNZB_APIKEY = check_setting_str(CFG, 'DOGnzb', 'dognzb_apikey', '') DOGNZB_APIKEY = check_setting_str(CFG, 'DOGnzb', 'dognzb_apikey', '')
DOGNZB_VERIFY = bool(check_setting_int(CFG, 'DOGnzb', 'dognzb_verify', 1))
if DOGNZB: if DOGNZB:
PR.append('dognzb') PR.append('dognzb')
PR_NUM +=1 PR_NUM +=1
OMGWTFNZBS = bool(check_setting_int(CFG, 'OMGWTFNZBS', 'omgwtfnzbs', 0))
OMGWTFNZBS_USERNAME = check_setting_str(CFG, 'OMGWTFNZBS', 'omgwtfnzbs_username', '')
OMGWTFNZBS_APIKEY = check_setting_str(CFG, 'OMGWTFNZBS', 'omgwtfnzbs_apikey', '')
if OMGWTFNZBS:
PR.append('OMGWTFNZBS')
PR_NUM +=1
EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0)) EXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'experimental', 0))
ALTEXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'altexperimental', 1)) ALTEXPERIMENTAL = bool(check_setting_int(CFG, 'Experimental', 'altexperimental', 1))
if EXPERIMENTAL: if EXPERIMENTAL:
@ -757,6 +749,8 @@ def initialize():
TORZNAB_NAME = check_setting_str(CFG, 'Torznab', 'torznab_name', '') TORZNAB_NAME = check_setting_str(CFG, 'Torznab', 'torznab_name', '')
TORZNAB_HOST = check_setting_str(CFG, 'Torznab', 'torznab_host', '') TORZNAB_HOST = check_setting_str(CFG, 'Torznab', 'torznab_host', '')
TORZNAB_APIKEY = check_setting_str(CFG, 'Torznab', 'torznab_apikey', '') TORZNAB_APIKEY = check_setting_str(CFG, 'Torznab', 'torznab_apikey', '')
TORZNAB_VERIFY = bool(check_setting_int(CFG, 'Torznab', 'torznab_verify', 0))
TORZNAB_CATEGORY = check_setting_str(CFG, 'Torznab', 'torznab_category', '') TORZNAB_CATEGORY = check_setting_str(CFG, 'Torznab', 'torznab_category', '')
if ENABLE_TORZNAB: if ENABLE_TORZNAB:
PR.append('Torznab') PR.append('Torznab')
@ -777,6 +771,8 @@ def initialize():
elif CONFIG_VERSION == '5': elif CONFIG_VERSION == '5':
NEWZNAB_UID = check_setting_str(CFG, 'Newznab', 'newznab_uid', '') NEWZNAB_UID = check_setting_str(CFG, 'Newznab', 'newznab_uid', '')
NEWZNAB_NAME = check_setting_str(CFG, 'Newznab', 'newznab_name', '') NEWZNAB_NAME = check_setting_str(CFG, 'Newznab', 'newznab_name', '')
elif CONFIG_VERSION == '6':
NEWZNAB_VERIFY = bool(check_setting_int(CFG, 'Newznab', 'newznab_verify', 0))
# this gets nasty # this gets nasty
# if configv is != 4, then the NewznabName doesn't exist so we need to create and add it and # if configv is != 4, then the NewznabName doesn't exist so we need to create and add it and
@ -789,32 +785,36 @@ def initialize():
EN_NUM = 4 #EN_NUM is the number of iterations of itertools to use EN_NUM = 4 #EN_NUM is the number of iterations of itertools to use
elif CONFIG_VERSION == '5': elif CONFIG_VERSION == '5':
EN_NUM = 5 #addition of Newznab UID EN_NUM = 5 #addition of Newznab UID
elif CONFIG_VERSION == '6':
EN_NUM = 6
else: else:
EN_NUM = 3 EN_NUM = 3
EXTRA_NEWZNABS = list(itertools.izip(*[itertools.islice(flattened_newznabs, i, None, EN_NUM) for i in range(EN_NUM)])) EXTRA_NEWZNABS = list(itertools.izip(*[itertools.islice(flattened_newznabs, i, None, EN_NUM) for i in range(EN_NUM)]))
#if ConfigV3 add the nzb_name to it.. #if ConfigV3 add the nzb_name to it..
if CONFIG_VERSION != '5': #just bump it up to V5 and throw in the UID too. if CONFIG_VERSION != '6': #just bump it up to V6 and throw in the VERIFY too.
ENABS = [] ENABS = []
for en in EXTRA_NEWZNABS: for en in EXTRA_NEWZNABS:
#set newznabname to newznab address initially so doesn't bomb. #set newznabname to newznab address initially so doesn't bomb.
if CONFIG_VERSION == '4': if CONFIG_VERSION == '4':
ENABS.append((en[0], en[1], en[2], '1', en[3])) #0=name,1=host,2=api,3=enabled/disabled ENABS.append((en[0], en[1], '0', en[2], '1', en[3])) #0=name,1=host,2=api,3=enabled/disabled
elif CONFIG_VERSION == '5':
ENABS.append((en[0], en[1], '0', en[2], en[3], en[4])) #0=name,1=host,2=api,3=enabled/disabled
else: else:
ENABS.append((en[0], en[0], en[1], '1', en[2])) #0=host,1=api,2=enabled/disabled ENABS.append((en[0], en[0], '0', en[1], '1', en[2])) #0=name,1=host,2=api,3=uid,4=enabled/disabled
#now we hammer the EXTRA_NEWZNABS with the corrected version #now we hammer the EXTRA_NEWZNABS with the corrected version
EXTRA_NEWZNABS = ENABS EXTRA_NEWZNABS = ENABS
#update the configV and write the config. #update the configV and write the config.
CONFIG_VERSION = '5' CONFIG_VERSION = '6'
config_write() config_write()
#to counteract the loss of the 1st newznab entry because of a switch, let's rewrite to the tuple #to counteract the loss of the 1st newznab entry because of a switch, let's rewrite to the tuple
if NEWZNAB_HOST and CONFIG_VERSION: if NEWZNAB_HOST and CONFIG_VERSION:
EXTRA_NEWZNABS.append((NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, int(NEWZNAB_ENABLED))) EXTRA_NEWZNABS.append((NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_VERIFY, NEWZNAB_APIKEY, NEWZNAB_UID, int(NEWZNAB_ENABLED)))
#PR_NUM +=1 #PR_NUM +=1
# Need to rewrite config here and bump up config version # Need to rewrite config here and bump up config version
CONFIG_VERSION = '5' CONFIG_VERSION = '6'
config_write() config_write()
#print 'PR_NUM:' + str(PR_NUM) #print 'PR_NUM:' + str(PR_NUM)
@ -822,7 +822,7 @@ def initialize():
for ens in EXTRA_NEWZNABS: for ens in EXTRA_NEWZNABS:
#print ens[0] #print ens[0]
#print 'enabled:' + str(ens[4]) #print 'enabled:' + str(ens[4])
if ens[4] == '1': # if newznabs are enabled if ens[5] == '1': # if newznabs are enabled
if ens[0] == "": if ens[0] == "":
PR.append(ens[1]) PR.append(ens[1])
else: else:
@ -985,18 +985,22 @@ def initialize():
USER_AGENT = 'Mylar/' +str(hash) +'(' +vers +') +http://www.github.com/evilhero/mylar/' USER_AGENT = 'Mylar/' +str(hash) +'(' +vers +') +http://www.github.com/evilhero/mylar/'
CV_HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
# verbatim DB module. # verbatim DB module.
logger.info('[DB Module] Loading : ' + DBCHOICE + ' as the database module to use.') logger.info('[DB Module] Loading : ' + DBCHOICE + ' as the database module to use.')
logger.info('[Cache Check] Cache directory currently set to : ' + CACHE_DIR)
# Put the cache dir in the data dir for now # Put the cache dir in the data dir for now
if not CACHE_DIR: if not CACHE_DIR:
CACHE_DIR = os.path.join(str(DATA_DIR), 'cache') CACHE_DIR = os.path.join(str(DATA_DIR), 'cache')
logger.info('[Cache Check] Cache directory not found in configuration. Defaulting location to : ' + CACHE_DIR)
if not os.path.exists(CACHE_DIR): if not os.path.exists(CACHE_DIR):
try: try:
os.makedirs(CACHE_DIR) os.makedirs(CACHE_DIR)
except OSError: except OSError:
logger.error('Could not create cache dir. Check permissions of datadir: ' + DATA_DIR) logger.error('[Cache Check] Could not create cache dir. Check permissions of datadir: ' + DATA_DIR)
#ComicVine API Check #ComicVine API Check
if COMICVINE_API is None or COMICVINE_API == '': if COMICVINE_API is None or COMICVINE_API == '':
@ -1045,7 +1049,7 @@ def initialize():
logger.info('Synology Parsing Fix already implemented. No changes required at this time.') logger.info('Synology Parsing Fix already implemented. No changes required at this time.')
#set the default URL for ComicVine API here. #set the default URL for ComicVine API here.
CVURL = 'http://www.comicvine.com/api/' CVURL = 'http://comicvine.gamespot.com/api/'
#comictagger - force to use included version if option is enabled. #comictagger - force to use included version if option is enabled.
if ENABLE_META: if ENABLE_META:
@ -1241,8 +1245,6 @@ def config_write():
new_config['General']['alt_pull'] = int(ALT_PULL) new_config['General']['alt_pull'] = int(ALT_PULL)
new_config['General']['search_interval'] = SEARCH_INTERVAL new_config['General']['search_interval'] = SEARCH_INTERVAL
new_config['General']['nzb_startup_search'] = int(NZB_STARTUP_SEARCH) new_config['General']['nzb_startup_search'] = int(NZB_STARTUP_SEARCH)
new_config['General']['libraryscan'] = int(LIBRARYSCAN)
new_config['General']['libraryscan_interval'] = LIBRARYSCAN_INTERVAL
new_config['General']['add_comics'] = int(ADD_COMICS) new_config['General']['add_comics'] = int(ADD_COMICS)
new_config['General']['comic_dir'] = COMIC_DIR new_config['General']['comic_dir'] = COMIC_DIR
new_config['General']['imp_move'] = int(IMP_MOVE) new_config['General']['imp_move'] = int(IMP_MOVE)
@ -1349,6 +1351,7 @@ def config_write():
new_config['Torrents']['enable_torrent_search'] = int(ENABLE_TORRENT_SEARCH) new_config['Torrents']['enable_torrent_search'] = int(ENABLE_TORRENT_SEARCH)
new_config['Torrents']['enable_kat'] = int(ENABLE_KAT) new_config['Torrents']['enable_kat'] = int(ENABLE_KAT)
new_config['Torrents']['kat_proxy'] = KAT_PROXY new_config['Torrents']['kat_proxy'] = KAT_PROXY
new_config['Torrents']['kat_verify'] = KAT_VERIFY
new_config['Torrents']['enable_32p'] = int(ENABLE_32P) new_config['Torrents']['enable_32p'] = int(ENABLE_32P)
new_config['Torrents']['mode_32p'] = int(MODE_32P) new_config['Torrents']['mode_32p'] = int(MODE_32P)
new_config['Torrents']['passkey_32p'] = PASSKEY_32P new_config['Torrents']['passkey_32p'] = PASSKEY_32P
@ -1382,15 +1385,12 @@ def config_write():
new_config['NZBsu']['nzbsu'] = int(NZBSU) new_config['NZBsu']['nzbsu'] = int(NZBSU)
new_config['NZBsu']['nzbsu_uid'] = NZBSU_UID new_config['NZBsu']['nzbsu_uid'] = NZBSU_UID
new_config['NZBsu']['nzbsu_apikey'] = NZBSU_APIKEY new_config['NZBsu']['nzbsu_apikey'] = NZBSU_APIKEY
new_config['NZBsu']['nzbsu_verify'] = NZBSU_VERIFY
new_config['DOGnzb'] = {} new_config['DOGnzb'] = {}
new_config['DOGnzb']['dognzb'] = int(DOGNZB) new_config['DOGnzb']['dognzb'] = int(DOGNZB)
new_config['DOGnzb']['dognzb_apikey'] = DOGNZB_APIKEY new_config['DOGnzb']['dognzb_apikey'] = DOGNZB_APIKEY
new_config['DOGnzb']['dognzb_verify'] = DOGNZB_VERIFY
new_config['OMGWTFNZBS'] = {}
new_config['OMGWTFNZBS']['omgwtfnzbs'] = int(OMGWTFNZBS)
new_config['OMGWTFNZBS']['omgwtfnzbs_username'] = OMGWTFNZBS_USERNAME
new_config['OMGWTFNZBS']['omgwtfnzbs_apikey'] = OMGWTFNZBS_APIKEY
new_config['Experimental'] = {} new_config['Experimental'] = {}
new_config['Experimental']['experimental'] = int(EXPERIMENTAL) new_config['Experimental']['experimental'] = int(EXPERIMENTAL)
@ -1402,6 +1402,7 @@ def config_write():
new_config['Torznab']['torznab_host'] = TORZNAB_HOST new_config['Torznab']['torznab_host'] = TORZNAB_HOST
new_config['Torznab']['torznab_apikey'] = TORZNAB_APIKEY new_config['Torznab']['torznab_apikey'] = TORZNAB_APIKEY
new_config['Torznab']['torznab_category'] = TORZNAB_CATEGORY new_config['Torznab']['torznab_category'] = TORZNAB_CATEGORY
new_config['Torznab']['torznab_verify'] = TORZNAB_VERIFY
new_config['Newznab'] = {} new_config['Newznab'] = {}
new_config['Newznab']['newznab'] = int(NEWZNAB) new_config['Newznab']['newznab'] = int(NEWZNAB)

View File

@ -71,13 +71,13 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd) logger.fdebug(module + ' UNRAR path set to : ' + unrar_cmd)
#check for dependencies here - configparser #check for dependencies here - configparser
try: try:
import configparser import configparser
except ImportError: except ImportError:
logger.warn(module + ' configparser not found on system. Please install manually in order to write metadata') logger.warn(module + ' configparser not found on system. Please install manually in order to write metadata')
logger.warn(module + ' continuing with PostProcessing, but I am not using metadata.') logger.warn(module + ' continuing with PostProcessing, but I am not using metadata.')
return "fail" return "fail"
if not os.path.exists(unrar_cmd): if not os.path.exists(unrar_cmd):

View File

@ -25,7 +25,7 @@ import mylar
import platform import platform
from bs4 import BeautifulSoup as Soup from bs4 import BeautifulSoup as Soup
import httplib import httplib
import lib.requests as requests
def patch_http_response_read(func): def patch_http_response_read(func):
def inner(*args): def inner(*args):
@ -84,13 +84,23 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
time.sleep(mylar.CVAPI_RATE) time.sleep(mylar.CVAPI_RATE)
#download the file: #download the file:
file = urllib2.urlopen(PULLURL) #set payload to None for now...
payload = None
verify = False
try:
r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
except Exception, e:
logger.warn('Error fetching data from ComicVine: %s' % (e))
return
#file = urllib2.urlopen(PULLURL)
#convert to string: #convert to string:
data = file.read() #data = file.read()
#close file because we dont need it anymore: #close file because we dont need it anymore:
file.close() #file.close()
#parse the xml you downloaded #parse the xml you downloaded
dom = parseString(data) dom = parseString(r.content) #(data)
return dom return dom

View File

@ -21,7 +21,7 @@ import zlib
import pprint import pprint
import subprocess import subprocess
import re import re
#import logger import hashlib
import mylar import mylar
from mylar import logger, helpers from mylar import logger, helpers
import unicodedata import unicodedata
@ -226,7 +226,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
altsearchcomic = "127372873872871091383 abdkhjhskjhkjdhakajhf" altsearchcomic = "127372873872871091383 abdkhjhskjhkjdhakajhf"
AS_Alt.append(altsearchcomic) AS_Alt.append(altsearchcomic)
for i in watchcomic.split(): for i in u_watchcomic.split():
if i.isdigit(): if i.isdigit():
numberinseries = 'True' numberinseries = 'True'
else: else:
@ -262,10 +262,10 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
#logger.fdebug('[FILECHECKER] i : ' + str(i)) #logger.fdebug('[FILECHECKER] i : ' + str(i))
if ('(' in i): if ('(' in i):
bracketsinseries = 'True' bracketsinseries = 'True'
bracket_length_st = watchcomic.find('(') bracket_length_st = u_watchcomic.find('(')
bracket_length_en = watchcomic.find(')', bracket_length_st) bracket_length_en = u_watchcomic.find(')', bracket_length_st)
bracket_length = bracket_length_en - bracket_length_st bracket_length = bracket_length_en - bracket_length_st
bracket_word = watchcomic[bracket_length_st:bracket_length_en +1] bracket_word = u_watchcomic[bracket_length_st:bracket_length_en +1]
if mylar.FOLDER_SCAN_LOG_VERBOSE: if mylar.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracket_word)) logger.fdebug('[FILECHECKER] bracketinseries: ' + str(bracket_word))
@ -306,7 +306,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
#if the series has digits this f's it up. #if the series has digits this f's it up.
if numberinseries == 'True' or decimalinseries == 'True': if numberinseries == 'True' or decimalinseries == 'True':
#we need to remove the series from the subname and then search the remainder. #we need to remove the series from the subname and then search the remainder.
watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', watchcomic) #remove spec chars for watchcomic match. watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', u_watchcomic) #remove spec chars for watchcomic match.
if mylar.FOLDER_SCAN_LOG_VERBOSE: if mylar.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('[FILECHECKER] watch-cleaned: ' + watchname) logger.fdebug('[FILECHECKER] watch-cleaned: ' + watchname)
subthis = re.sub('.cbr', '', subname) subthis = re.sub('.cbr', '', subname)
@ -335,7 +335,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
logger.fdebug('[FILECHECKER] year detected: ' + str(tmpi)) logger.fdebug('[FILECHECKER] year detected: ' + str(tmpi))
subname = re.sub('(19\d{2}|20\d{2})(.*)', '\\2 (\\1)', subthis) subname = re.sub('(19\d{2}|20\d{2})(.*)', '\\2 (\\1)', subthis)
subname = re.sub('\(\)', '', subname).strip() subname = re.sub('\(\)', '', subname).strip()
subname = watchcomic + ' ' + subname subname = u_watchcomic + ' ' + subname
if mylar.FOLDER_SCAN_LOG_VERBOSE: if mylar.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('[FILECHECKER] new subname reversed: ' + subname) logger.fdebug('[FILECHECKER] new subname reversed: ' + subname)
break break
@ -389,11 +389,11 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
subthis = re.sub('.cbr', '', subname) subthis = re.sub('.cbr', '', subname)
subthis = re.sub('.cbz', '', subthis) subthis = re.sub('.cbz', '', subthis)
if decimalinseries == 'True': if decimalinseries == 'True':
watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', watchcomic) #remove spec chars for watchcomic match. watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', u_watchcomic) #remove spec chars for watchcomic match.
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis) subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis)
else: else:
# in order to get series like Earth 2 scanned in that contain a decimal, I removed the \. from the re.subs below - 28-08-2014 # in order to get series like Earth 2 scanned in that contain a decimal, I removed the \. from the re.subs below - 28-08-2014
watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', watchcomic) #remove spec chars for watchcomic match. watchname = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', u_watchcomic) #remove spec chars for watchcomic match.
subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis) subthis = re.sub('[\:\;\!\'\/\?\+\=\_\%\-]', '', subthis)
if mylar.FOLDER_SCAN_LOG_VERBOSE: if mylar.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('[FILECHECKER] watch-cleaned: ' + watchname) logger.fdebug('[FILECHECKER] watch-cleaned: ' + watchname)
@ -538,7 +538,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
detneg = "no" detneg = "no"
leavehyphen = False leavehyphen = False
should_restart = True should_restart = True
lenwatch = len(watchcomic) # because subname gets replaced dynamically, the length will change and things go wrong. lenwatch = len(u_watchcomic) # because subname gets replaced dynamically, the length will change and things go wrong.
while should_restart: while should_restart:
should_restart = False should_restart = False
for nono in not_these: for nono in not_these:
@ -560,7 +560,7 @@ def listFiles(dir, watchcomic, Publisher, AlternateSearch=None, manual=None, sar
logger.fdebug('[FILECHECKER] possible negative issue detected.') logger.fdebug('[FILECHECKER] possible negative issue detected.')
nonocount = nonocount + subcnt - 1 nonocount = nonocount + subcnt - 1
detneg = "yes" detneg = "yes"
elif '-' in watchcomic and j < lenwatch: elif (slashcoloninseries or '-' in u_watchcomic) and j < lenwatch:
lenwatch -=1 lenwatch -=1
if mylar.FOLDER_SCAN_LOG_VERBOSE: if mylar.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug('[FILECHECKER] - appears in series title.') logger.fdebug('[FILECHECKER] - appears in series title.')
@ -1412,7 +1412,9 @@ def crc(filename):
#return "%X"%(prev & 0xFFFFFFFF) #return "%X"%(prev & 0xFFFFFFFF)
#speed in lieu of memory (file into memory entirely) #speed in lieu of memory (file into memory entirely)
return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF) #return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF)
return hashlib.md5(filename).hexdigest()
def setperms(path, dir=False): def setperms(path, dir=False):

View File

@ -216,7 +216,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
#else: #else:
#sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear']) #sresults = mb.findComic(annComicName, mode, issue=annissues, limityear=annualval['AnnualYear'])
#print "annualyear: " + str(annualval['AnnualYear']) #print "annualyear: " + str(annualval['AnnualYear'])
annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print'} annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'}
logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear)) logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear))
sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True) sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True)
@ -1486,7 +1486,7 @@ def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, weeklyissu
sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True) sresults, explicit = mb.findComic(annComicName, mode, issue=None, explicit='all')#,explicit=True)
type='comic' type='comic'
annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print'} annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected', 'print edition', 'tpb', 'available in print', 'collects'}
if len(sresults) == 1: if len(sresults) == 1:
logger.fdebug('[IMPORTER-ANNUAL] - 1 result') logger.fdebug('[IMPORTER-ANNUAL] - 1 result')

View File

@ -189,6 +189,7 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
if issueinfo is None: if issueinfo is None:
pass pass
else: else:
issuenotes_id = None
logger.info('Successfully retrieved some tags. Lets see what I can figure out.') logger.info('Successfully retrieved some tags. Lets see what I can figure out.')
comicname = issueinfo[0]['series'] comicname = issueinfo[0]['series']
logger.fdebug('Series Name: ' + comicname) logger.fdebug('Series Name: ' + comicname)
@ -208,9 +209,19 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
if issuenotes is not None: if issuenotes is not None:
if 'Issue ID' in issuenotes: if 'Issue ID' in issuenotes:
st_find = issuenotes.find('Issue ID') st_find = issuenotes.find('Issue ID')
issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip() tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
if issuenotes_id.isdigit(): if tmp_issuenotes_id.isdigit():
issuenotes_id = tmp_issuenotes_id
logger.fdebug('Successfully retrieved CV IssueID for ' + comicname + ' #' + str(issue_number) + ' [' + str(issuenotes_id) + ']') logger.fdebug('Successfully retrieved CV IssueID for ' + comicname + ' #' + str(issue_number) + ' [' + str(issuenotes_id) + ']')
elif 'CVDB' in issuenotes:
st_find = issuenotes.find('CVDB')
tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
if tmp_issuenotes_id.isdigit():
issuenotes_id = tmp_issuenotes_id
logger.fdebug('Successfully retrieved CV IssueID for ' + comicname + ' #' + str(issue_number) + ' [' + str(issuenotes_id) + ']')
else:
logger.fdebug('Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.')
logger.fdebug("adding " + comicname + " to the import-queue!") logger.fdebug("adding " + comicname + " to the import-queue!")
impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss) impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
logger.fdebug("impid: " + str(impid)) logger.fdebug("impid: " + str(impid))

View File

@ -21,6 +21,7 @@ import threading
import platform import platform
import urllib, urllib2 import urllib, urllib2
from xml.dom.minidom import parseString, Element from xml.dom.minidom import parseString, Element
import lib.requests as requests
import mylar import mylar
from mylar import logger, db, cv from mylar import logger, db, cv
@ -69,18 +70,26 @@ def pullsearch(comicapi, comicquery, offset, explicit, type):
time.sleep(mylar.CVAPI_RATE) time.sleep(mylar.CVAPI_RATE)
#download the file: #download the file:
payload = None
verify = False
try: try:
file = urllib2.urlopen(PULLURL) r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
except urllib2.HTTPError, err: except Exception, e:
logger.error('err : ' + str(err)) logger.warn('Error fetching data from ComicVine: %s' % (e))
logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
return return
#convert to string: # try:
data = file.read() # file = urllib2.urlopen(PULLURL)
#close file because we dont need it anymore: # except urllib2.HTTPError, err:
file.close() # logger.error('err : ' + str(err))
#parse the xml you downloaded # logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
dom = parseString(data) # return
# #convert to string:
# data = file.read()
# #close file because we dont need it anymore:
# file.close()
# #parse the xml you downloaded
dom = parseString(r.content) #(data)
return dom return dom
def findComic(name, mode, issue, limityear=None, explicit=None, type=None): def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
@ -367,16 +376,24 @@ def storyarcinfo(xmlid):
else: else:
time.sleep(mylar.CVAPI_RATE) time.sleep(mylar.CVAPI_RATE)
try: #download the file:
file = urllib2.urlopen(ARCPULL_URL) payload = None
except urllib2.HTTPError, err: verify = False
logger.error('err : ' + str(err))
logger.error('There was a major problem retrieving data from ComicVine - on their end.')
return
arcdata = file.read() try:
file.close() r = requests.get(ARCPULL_URL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
arcdom = parseString(arcdata) except Exception, e:
logger.warn('Error fetching data from ComicVine: %s' % (e))
return
# try:
# file = urllib2.urlopen(ARCPULL_URL)
# except urllib2.HTTPError, err:
# logger.error('err : ' + str(err))
# logger.error('There was a major problem retrieving data from ComicVine - on their end.')
# return
# arcdata = file.read()
# file.close()
arcdom = parseString(r.content) #(arcdata)
try: try:
logger.fdebug('story_arc ascension') logger.fdebug('story_arc ascension')

View File

@ -80,11 +80,14 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
if pickfeed == "1" and mylar.ENABLE_32P: # 32pages new releases feed. if pickfeed == "1" and mylar.ENABLE_32P: # 32pages new releases feed.
feed = 'https://32pag.es/feeds.php?feed=torrents_all&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] feed = 'https://32pag.es/feeds.php?feed=torrents_all&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey']
feedtype = ' from the New Releases RSS Feed for comics' feedtype = ' from the New Releases RSS Feed for comics'
verify = bool(mylar.VERIFY_32P)
elif pickfeed == "2" and srchterm is not None: # kat.ph search elif pickfeed == "2" and srchterm is not None: # kat.ph search
feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1" feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "3": # kat.ph rss feed elif pickfeed == "3": # kat.ph rss feed
feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1" feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases RSS Feed for comics' feedtype = ' from the New Releases RSS Feed for comics'
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "4": #32p search elif pickfeed == "4": #32p search
if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']): if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None, mylar.PASSWORD_32P == '']):
logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.') logger.error('[RSS] Warning - you NEED to enter in your 32P Username and Password to use this option.')
@ -97,29 +100,39 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
return return
elif pickfeed == "5" and srchterm is not None: # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized) elif pickfeed == "5" and srchterm is not None: # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized)
feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Aother%20seeds%3A1/?rss=1" feed = kat_url + "usearch/" + str(srchterm) + "%20category%3Aother%20seeds%3A1/?rss=1"
verify = bool(mylar.KAT_VERIFY)
elif pickfeed == "6": # kat.ph rss feed (category:other so that we can get them quicker if need-be) elif pickfeed == "6": # kat.ph rss feed (category:other so that we can get them quicker if need-be)
feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1" feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(mylar.MINSEEDS) + "/?rss=1"
feedtype = ' from the New Releases for category Other RSS Feed that contain comics' feedtype = ' from the New Releases for category Other RSS Feed that contain comics'
verify = bool(mylar.KAT_VERIFY)
elif int(pickfeed) >= 7 and feedinfo is not None: elif int(pickfeed) >= 7 and feedinfo is not None:
#personal 32P notification feeds. #personal 32P notification feeds.
#get the info here #get the info here
feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname'] feed = 'https://32pag.es/feeds.php?feed=' + feedinfo['feed'] + '&user=' + feedinfo['user'] + '&auth=' + feedinfo['auth'] + '&passkey=' + feedinfo['passkey'] + '&authkey=' + feedinfo['authkey'] + '&name=' + feedinfo['feedname']
feedtype = ' from your Personal Notification Feed : ' + feedinfo['feedname'] feedtype = ' from your Personal Notification Feed : ' + feedinfo['feedname']
verify = bool(mylar.VERIFY_32P)
else: else:
logger.error('invalid pickfeed denoted...') logger.error('invalid pickfeed denoted...')
return return
#logger.info('[' + str(pickfeed) + '] feed URL: ' + str(feed))
if pickfeed != '4':
feedme = feedparser.parse(feed)
if pickfeed == "3" or pickfeed == "6" or pickfeed == "2" or pickfeed == "5": if pickfeed == "3" or pickfeed == "6" or pickfeed == "2" or pickfeed == "5":
picksite = 'KAT' picksite = 'KAT'
elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7: elif pickfeed == "1" or pickfeed == "4" or int(pickfeed) > 7:
picksite = '32P' picksite = '32P'
if pickfeed != '4':
payload = None
try:
r = requests.get(feed, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching RSS Feed Data from %s: %s' % (picksite, e))
return
feedme = feedparser.parse(r.content)
#feedme = feedparser.parse(feed)
i = 0 i = 0
if pickfeed == '4': if pickfeed == '4':
@ -282,10 +295,6 @@ def nzbs(provider=None, forcerss=False):
num_items = "&num=100" if forcerss else "" # default is 25 num_items = "&num=100" if forcerss else "" # default is 25
_parse_feed('dognzb', 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030' + num_items) _parse_feed('dognzb', 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030' + num_items)
if mylar.OMGWTFNZBS == 1:
num_items = "&num=100" if forcerss else "" # default is 25
_parse_feed('omgwtfnzbs', 'http://api.omgwtfnzbs.org/rss?t=7030&dl=1&i=' + (mylar.OMGWTFNZBS_USERNAME or '1') + '&r=' + mylar.OMGWTFNZBS_APIKEY + num_items)
for newznab_host in newznab_hosts: for newznab_host in newznab_hosts:
site = newznab_host[0].rstrip() site = newznab_host[0].rstrip()
(newznabuid, _, newznabcat) = (newznab_host[3] or '').partition('#') (newznabuid, _, newznabcat) = (newznab_host[3] or '').partition('#')

View File

@ -19,6 +19,7 @@ import mylar
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker
import lib.feedparser as feedparser import lib.feedparser as feedparser
import lib.requests as requests
import urllib import urllib
import os, errno import os, errno
import string import string
@ -105,9 +106,6 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mylar.DOGNZB == 1: if mylar.DOGNZB == 1:
nzbprovider.append('dognzb') nzbprovider.append('dognzb')
nzbp+=1 nzbp+=1
if mylar.OMGWTFNZBS == 1:
nzbprovider.append('omgwtfnzbs')
nzbp+=1
# -------- # --------
# Xperimental # Xperimental
@ -122,7 +120,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if mylar.NEWZNAB == 1: if mylar.NEWZNAB == 1:
#if len(mylar.EXTRA_NEWZNABS > 0): #if len(mylar.EXTRA_NEWZNABS > 0):
for newznab_host in mylar.EXTRA_NEWZNABS: for newznab_host in mylar.EXTRA_NEWZNABS:
if newznab_host[4] == '1' or newznab_host[4] == 1: if newznab_host[5] == '1' or newznab_host[5] == 1:
newznab_hosts.append(newznab_host) newznab_hosts.append(newznab_host)
#if newznab_host[0] == newznab_host[1]: #if newznab_host[0] == newznab_host[1]:
# nzbprovider.append('newznab') # nzbprovider.append('newznab')
@ -294,20 +292,22 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == 'nzb.su': if nzbprov == 'nzb.su':
apikey = mylar.NZBSU_APIKEY apikey = mylar.NZBSU_APIKEY
verify = bool(mylar.NZBSU_VERIFY)
elif nzbprov == 'dognzb': elif nzbprov == 'dognzb':
apikey = mylar.DOGNZB_APIKEY apikey = mylar.DOGNZB_APIKEY
elif nzbprov == 'omgwtfnzbs': verify = bool(mylar.DOGNZB_VERIFY)
apikey = mylar.OMGWTFNZBS_APIKEY
elif nzbprov == 'experimental': elif nzbprov == 'experimental':
apikey = 'none' apikey = 'none'
verify = False
elif nzbprov == 'newznab': elif nzbprov == 'newznab':
#updated to include Newznab Name now #updated to include Newznab Name now
name_newznab = newznab_host[0].rstrip() name_newznab = newznab_host[0].rstrip()
host_newznab = newznab_host[1].rstrip() host_newznab = newznab_host[1].rstrip()
apikey = newznab_host[2].rstrip() apikey = newznab_host[3].rstrip()
if '#' in newznab_host[3].rstrip(): verify = bool(newznab_host[2].rstrip())
catstart = newznab_host[3].find('#') if '#' in newznab_host[4].rstrip():
category_newznab = newznab_host[3][catstart +1:] catstart = newznab_host[4].find('#')
category_newznab = newznab_host[4][catstart +1:]
logger.fdebug('non-default Newznab category set to :' + str(category_newznab)) logger.fdebug('non-default Newznab category set to :' + str(category_newznab))
else: else:
category_newznab = '7030' category_newznab = '7030'
@ -512,8 +512,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
findurl = "https://api.dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030" findurl = "https://api.dognzb.cr/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
elif nzbprov == 'nzb.su': elif nzbprov == 'nzb.su':
findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030" findurl = "https://api.nzb.su/api?t=search&q=" + str(comsearch) + "&o=xml&cat=7030"
elif nzbprov == 'omgwtfnzbs':
findurl = "https://api.omgwtfnzbs.org/xml/?search=" + str(comsearch) + "&user=" + mylar.OMGWTFNZBS_USERNAME + "&o=xml&catid=9"
elif nzbprov == 'newznab': elif nzbprov == 'newznab':
#let's make sure the host has a '/' at the end, if not add it. #let's make sure the host has a '/' at the end, if not add it.
if host_newznab[len(host_newznab) -1:len(host_newznab)] != '/': if host_newznab[len(host_newznab) -1:len(host_newznab)] != '/':
@ -530,26 +528,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
apikey = mylar.TORZNAB_APIKEY apikey = mylar.TORZNAB_APIKEY
if nzbprov != 'nzbx': if nzbprov != 'nzbx':
# helper function to replace apikey here so we avoid logging it ;) # helper function to replace apikey here so we avoid logging it ;)
if nzbprov == 'omgwtfnzbs': findurl = findurl + "&apikey=" + str(apikey)
findurl = findurl + "&api=" + str(apikey)
else:
findurl = findurl + "&apikey=" + str(apikey)
logsearch = helpers.apiremove(str(findurl), 'nzb') logsearch = helpers.apiremove(str(findurl), 'nzb')
logger.fdebug("search-url: " + str(logsearch))
### IF USENET_RETENTION is set, honour it ### IF USENET_RETENTION is set, honour it
### For newznab sites, that means appending "&maxage=<whatever>" on the URL ### For newznab sites, that means appending "&maxage=<whatever>" on the URL
if mylar.USENET_RETENTION != None and nzbprov != 'torznab': if mylar.USENET_RETENTION != None and nzbprov != 'torznab':
if nzbprov == 'omgwtfnzbs': findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION)
findurl = findurl + "&retention=" + str(mylar.USENET_RETENTION)
else:
findurl = findurl + "&maxage=" + str(mylar.USENET_RETENTION)
# Add a user-agent
#print ("user-agent:" + str(mylar.USER_AGENT))
request = urllib2.Request(findurl)
request.add_header('User-Agent', str(mylar.USER_AGENT))
opener = urllib2.build_opener()
#set a delay between searches here. Default is for 60 seconds... #set a delay between searches here. Default is for 60 seconds...
#changing this to lower could result in a ban from your nzb source due to hammering. #changing this to lower could result in a ban from your nzb source due to hammering.
@ -579,8 +564,25 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.info("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering") logger.info("pausing for " + str(pause_the_search) + " seconds before continuing to avoid hammering")
time.sleep(pause_the_search) time.sleep(pause_the_search)
# Add a user-agent
headers = {'User-Agent': str(mylar.USER_AGENT)}
payload = None
if findurl.startswith('https'):
try:
from lib.requests.packages.urllib3 import disable_warnings
disable_warnings()
except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
else:
verify = False
#logger.fdebug('[SSL: ' + str(verify) + '] Search URL: ' + findurl)
logger.fdebug('[SSL: ' + str(verify) + '] Search URL: ' + str(logsearch))
try: try:
data = opener.open(request).read() r = requests.get(findurl, params=payload, verify=verify, headers=headers)
except Exception, e: except Exception, e:
logger.warn('Error fetching data from %s: %s' % (nzbprov, e)) logger.warn('Error fetching data from %s: %s' % (nzbprov, e))
if 'HTTP Error 503' in e: if 'HTTP Error 503' in e:
@ -589,6 +591,13 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
break break
data = False data = False
logger.info(r.content)
if str(r.status_code) != '200':
logger.warn('Unable to download torrent from ' + nzbprov + ' [Status Code returned: ' + str(r.status_code) + ']')
else:
data = r.content
if data: if data:
bb = feedparser.parse(data) bb = feedparser.parse(data)
else: else:
@ -1795,13 +1804,13 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
logger.fdebug('NZBMegaSearch url detected. Adjusting...') logger.fdebug('NZBMegaSearch url detected. Adjusting...')
nzbmega = True nzbmega = True
else: else:
apikey = newznab[2].rstrip() apikey = newznab[3].rstrip()
down_url = host_newznab_fix + 'api' down_url = host_newznab_fix + 'api'
verify = False verify = bool(newznab[2])
else: else:
down_url = 'https://api.nzb.su/api?' down_url = 'https://api.nzb.su/api'
apikey = mylar.NZBSU_APIKEY apikey = mylar.NZBSU_APIKEY
verify = True #unsure if verify should be set to True for nzb.su or not. verify = bool(mylar.NZBSU_VERIFY)
if nzbmega == True: if nzbmega == True:
down_url = link down_url = link
@ -1816,15 +1825,7 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
elif nzbprov == 'dognzb': elif nzbprov == 'dognzb':
#dognzb - need to add back in the dog apikey #dognzb - need to add back in the dog apikey
down_url = urljoin(link, str(mylar.DOGNZB_APIKEY)) down_url = urljoin(link, str(mylar.DOGNZB_APIKEY))
verify = False verify = bool(mylar.DOGNZB_VERIFY)
elif nzbprov == 'omgwtfnzbs':
#omgwtfnzbs.
down_url = 'https://api.omgwtfnzbs.org/sn.php?'
payload = {'id': str(nzbid),
'user': str(mylar.OMGWTFNZBS_USERNAME),
'api': str(mylar.OMGWTFNZBS_APIKEY)}
verify = True
else: else:
#experimental - direct link. #experimental - direct link.
@ -1837,7 +1838,6 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
else: else:
logger.info('Download URL: ' + down_url + '?' + urllib.urlencode(payload) + ' [VerifySSL:' + str(verify) + ']') logger.info('Download URL: ' + down_url + '?' + urllib.urlencode(payload) + ' [VerifySSL:' + str(verify) + ']')
import lib.requests as requests
if down_url.startswith('https'): if down_url.startswith('https'):
try: try:
@ -2157,8 +2157,6 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
except: except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.') logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
import lib.requests as requests
try: try:
requests.put(tmpapi, verify=False) requests.put(tmpapi, verify=False)
except: except:
@ -2397,10 +2395,6 @@ def generate_id(nzbprov, link):
url_parts = urlparse.urlparse(link) url_parts = urlparse.urlparse(link)
path_parts = url_parts[2].rpartition('/') path_parts = url_parts[2].rpartition('/')
nzbid = path_parts[0].rsplit('/', 1)[1] nzbid = path_parts[0].rsplit('/', 1)[1]
elif nzbprov == 'omgwtfnzbs':
url_parts = urlparse.urlparse(link)
path_parts = url_parts[4].split('&')
nzbid = path_parts[0].rsplit('=',1)[1]
elif nzbprov == 'newznab': elif nzbprov == 'newznab':
#if in format of http://newznab/getnzb/<id>.nzb&i=1&r=apikey #if in format of http://newznab/getnzb/<id>.nzb&i=1&r=apikey
tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url. tmpid = urlparse.urlparse(link)[4] #param 4 is the query string from the url.

View File

@ -951,7 +951,7 @@ class WebInterface(object):
force_rss.exposed = True force_rss.exposed = True
def markannuals(self, ann_action=None, **args): def markannuals(self, ann_action=None, **args):
self.markissues(action=ann_action, **args) self.markissues(ann_action, **args)
markannuals.exposed = True markannuals.exposed = True
def markissues(self, action=None, **args): def markissues(self, action=None, **args):
@ -1014,7 +1014,6 @@ class WebInterface(object):
threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start() threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % mi['ComicID']) raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % mi['ComicID'])
markissues.exposed = True markissues.exposed = True
def markentries(self, action=None, **args): def markentries(self, action=None, **args):
@ -1873,22 +1872,18 @@ class WebInterface(object):
renameiss = helpers.rename_param(comicid, comicname, issue['Issue_Number'], filename, comicyear=None, issueid=None, annualize=annualize) renameiss = helpers.rename_param(comicid, comicname, issue['Issue_Number'], filename, comicyear=None, issueid=None, annualize=annualize)
nfilename = renameiss['nfilename'] nfilename = renameiss['nfilename']
srciss = os.path.join(comicdir, filename) srciss = os.path.join(comicdir, filename)
if mylar.LOWERCASE_FILENAMES:
dstiss = os.path.join(comicdir, nfilename).lower()
else:
dstiss = os.path.join(comicdir, nfilename)
if filename != nfilename: if filename != nfilename:
logger.info("Renaming " + str(filename) + " ... to ... " + str(nfilename)) logger.info('Renaming ' + filename + ' ... to ... ' + renameiss['nfilename'])
try: try:
shutil.move(srciss, dstiss) shutil.move(srciss, renameiss['destination_dir'])
except (OSError, IOError): except (OSError, IOError):
logger.error("Failed to move files - check directories and manually re-run.") logger.error('Failed to move files - check directories and manually re-run.')
return return
filefind+=1 filefind+=1
else: else:
logger.info("Not renaming " + str(filename) + " as it is in desired format already.") logger.info('Not renaming ' + filename + ' as it is in desired format already.')
#continue #continue
logger.info("I have renamed " + str(filefind) + " issues of " + comicname) logger.info('I have renamed ' + str(filefind) + ' issues of ' + comicname)
updater.forceRescan(comicid) updater.forceRescan(comicid)
manualRename.exposed = True manualRename.exposed = True
@ -2749,14 +2744,6 @@ class WebInterface(object):
# return serve_template(templatename="importlog.html", title="Log", implog=implog) # return serve_template(templatename="importlog.html", title="Log", implog=implog)
importLog.exposed = True importLog.exposed = True
# def logs(self, log_level=None):
#if mylar.LOG_LEVEL is None or mylar.LOG_LEVEL == '' or log_level is None:
# mylar.LOG_LEVEL = 'INFO'
#else:
# mylar.LOG_LEVEL = log_level
# return serve_template(templatename="logs.html", title="Log", lineList=mylar.LOG_LIST, loglevel=mylar.LOG_LEVEL)
# logs.exposed = True
def logs(self): def logs(self):
return serve_template(templatename="logs.html", title="Log", lineList=mylar.LOG_LIST) return serve_template(templatename="logs.html", title="Log", lineList=mylar.LOG_LIST)
logs.exposed = True logs.exposed = True
@ -2955,7 +2942,6 @@ class WebInterface(object):
queue = Queue.Queue() queue = Queue.Queue()
#save the values so they stick. #save the values so they stick.
mylar.LIBRARYSCAN = libraryscan
mylar.ADD_COMICS = autoadd mylar.ADD_COMICS = autoadd
mylar.COMIC_DIR = path mylar.COMIC_DIR = path
mylar.IMP_MOVE = imp_move mylar.IMP_MOVE = imp_move
@ -3360,7 +3346,6 @@ class WebInterface(object):
"download_scan_interval": mylar.DOWNLOAD_SCAN_INTERVAL, "download_scan_interval": mylar.DOWNLOAD_SCAN_INTERVAL,
"nzb_search_interval": mylar.SEARCH_INTERVAL, "nzb_search_interval": mylar.SEARCH_INTERVAL,
"nzb_startup_search": helpers.checked(mylar.NZB_STARTUP_SEARCH), "nzb_startup_search": helpers.checked(mylar.NZB_STARTUP_SEARCH),
"libraryscan_interval": mylar.LIBRARYSCAN_INTERVAL,
"search_delay": mylar.SEARCH_DELAY, "search_delay": mylar.SEARCH_DELAY,
"nzb_downloader_sabnzbd": helpers.radio(mylar.NZB_DOWNLOADER, 0), "nzb_downloader_sabnzbd": helpers.radio(mylar.NZB_DOWNLOADER, 0),
"nzb_downloader_nzbget": helpers.radio(mylar.NZB_DOWNLOADER, 1), "nzb_downloader_nzbget": helpers.radio(mylar.NZB_DOWNLOADER, 1),
@ -3385,11 +3370,10 @@ class WebInterface(object):
"use_nzbsu": helpers.checked(mylar.NZBSU), "use_nzbsu": helpers.checked(mylar.NZBSU),
"nzbsu_uid": mylar.NZBSU_UID, "nzbsu_uid": mylar.NZBSU_UID,
"nzbsu_api": mylar.NZBSU_APIKEY, "nzbsu_api": mylar.NZBSU_APIKEY,
"nzbsu_verify": helpers.checked(mylar.NZBSU_VERIFY),
"use_dognzb": helpers.checked(mylar.DOGNZB), "use_dognzb": helpers.checked(mylar.DOGNZB),
"dognzb_api": mylar.DOGNZB_APIKEY, "dognzb_api": mylar.DOGNZB_APIKEY,
"use_omgwtfnzbs": helpers.checked(mylar.OMGWTFNZBS), "dognzb_verify": helpers.checked(mylar.DOGNZB_VERIFY),
"omgwtfnzbs_username": mylar.OMGWTFNZBS_USERNAME,
"omgwtfnzbs_api": mylar.OMGWTFNZBS_APIKEY,
"use_experimental": helpers.checked(mylar.EXPERIMENTAL), "use_experimental": helpers.checked(mylar.EXPERIMENTAL),
"enable_torznab": helpers.checked(mylar.ENABLE_TORZNAB), "enable_torznab": helpers.checked(mylar.ENABLE_TORZNAB),
"torznab_name": mylar.TORZNAB_NAME, "torznab_name": mylar.TORZNAB_NAME,
@ -3399,6 +3383,7 @@ class WebInterface(object):
"use_newznab": helpers.checked(mylar.NEWZNAB), "use_newznab": helpers.checked(mylar.NEWZNAB),
"newznab_host": mylar.NEWZNAB_HOST, "newznab_host": mylar.NEWZNAB_HOST,
"newznab_name": mylar.NEWZNAB_NAME, "newznab_name": mylar.NEWZNAB_NAME,
"newznab_verify": helpers.checked(mylar.NEWZNAB_VERIFY),
"newznab_api": mylar.NEWZNAB_APIKEY, "newznab_api": mylar.NEWZNAB_APIKEY,
"newznab_uid": mylar.NEWZNAB_UID, "newznab_uid": mylar.NEWZNAB_UID,
"newznab_enabled": helpers.checked(mylar.NEWZNAB_ENABLED), "newznab_enabled": helpers.checked(mylar.NEWZNAB_ENABLED),
@ -3694,10 +3679,10 @@ class WebInterface(object):
readOptions.exposed = True readOptions.exposed = True
def configUpdate(self, comicvine_api=None, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, enable_https=0, https_cert=None, https_key=None, api_enabled=0, api_key=None, launch_browser=0, auto_update=0, annuals_on=0, max_logsize=None, download_scan_interval=None, nzb_search_interval=None, nzb_startup_search=0, libraryscan_interval=None, def configUpdate(self, comicvine_api=None, http_host='0.0.0.0', http_username=None, http_port=8090, http_password=None, enable_https=0, https_cert=None, https_key=None, api_enabled=0, api_key=None, launch_browser=0, auto_update=0, annuals_on=0, max_logsize=None, download_scan_interval=None, nzb_search_interval=None, nzb_startup_search=0,
nzb_downloader=0, sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, sab_directory=None, sab_to_mylar=0, log_dir=None, log_level=0, blackhole_dir=None, nzb_downloader=0, sab_host=None, sab_username=None, sab_apikey=None, sab_password=None, sab_category=None, sab_priority=None, sab_directory=None, sab_to_mylar=0, log_dir=None, log_level=0, blackhole_dir=None,
nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None, nzbget_directory=None, nzbget_host=None, nzbget_port=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_priority=None, nzbget_directory=None,
usenet_retention=None, nzbsu=0, nzbsu_uid=None, nzbsu_apikey=None, dognzb=0, dognzb_apikey=None, omgwtfnzbs=0, omgwtfnzbs_username=None, omgwtfnzbs_apikey=None, newznab=0, newznab_host=None, newznab_name=None, newznab_apikey=None, newznab_uid=None, newznab_enabled=0, usenet_retention=None, nzbsu=0, nzbsu_uid=None, nzbsu_apikey=None, nzbsu_verify=0, dognzb=0, dognzb_apikey=None, dognzb_verify=0, newznab=0, newznab_host=None, newznab_name=None, newznab_verify=0, newznab_apikey=None, newznab_uid=None, newznab_enabled=0,
enable_torznab=0, torznab_name=None, torznab_host=None, torznab_apikey=None, torznab_category=None, experimental=0, check_folder=None, enable_check_folder=0, enable_torznab=0, torznab_name=None, torznab_host=None, torznab_apikey=None, torznab_category=None, experimental=0, check_folder=None, enable_check_folder=0,
enable_meta=0, cbr2cbz_only=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, ct_cbz_overwrite=0, unrar_cmd=None, enable_rss=0, rss_checkinterval=None, failed_download_handling=0, failed_auto=0, enable_torrent_search=0, enable_kat=0, enable_32p=0, mode_32p=0, rssfeed_32p=None, passkey_32p=None, username_32p=None, password_32p=None, snatchedtorrent_notify=0, enable_meta=0, cbr2cbz_only=0, cmtagger_path=None, ct_tag_cr=0, ct_tag_cbl=0, ct_cbz_overwrite=0, unrar_cmd=None, enable_rss=0, rss_checkinterval=None, failed_download_handling=0, failed_auto=0, enable_torrent_search=0, enable_kat=0, enable_32p=0, mode_32p=0, rssfeed_32p=None, passkey_32p=None, username_32p=None, password_32p=None, snatchedtorrent_notify=0,
enable_torrents=0, minseeds=0, torrent_local=0, local_watchdir=None, torrent_seedbox=0, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None, enable_torrents=0, minseeds=0, torrent_local=0, local_watchdir=None, torrent_seedbox=0, seedbox_watchdir=None, seedbox_user=None, seedbox_pass=None, seedbox_host=None, seedbox_port=None,
@ -3724,7 +3709,6 @@ class WebInterface(object):
mylar.DOWNLOAD_SCAN_INTERVAL = download_scan_interval mylar.DOWNLOAD_SCAN_INTERVAL = download_scan_interval
mylar.SEARCH_INTERVAL = nzb_search_interval mylar.SEARCH_INTERVAL = nzb_search_interval
mylar.NZB_STARTUP_SEARCH = nzb_startup_search mylar.NZB_STARTUP_SEARCH = nzb_startup_search
mylar.LIBRARYSCAN_INTERVAL = libraryscan_interval
mylar.SEARCH_DELAY = search_delay mylar.SEARCH_DELAY = search_delay
mylar.NZB_DOWNLOADER = int(nzb_downloader) mylar.NZB_DOWNLOADER = int(nzb_downloader)
if tsab: if tsab:
@ -3750,11 +3734,10 @@ class WebInterface(object):
mylar.NZBSU = nzbsu mylar.NZBSU = nzbsu
mylar.NZBSU_UID = nzbsu_uid mylar.NZBSU_UID = nzbsu_uid
mylar.NZBSU_APIKEY = nzbsu_apikey mylar.NZBSU_APIKEY = nzbsu_apikey
mylar.NZBSU_VERIFY = nzbsu_verify
mylar.DOGNZB = dognzb mylar.DOGNZB = dognzb
mylar.DOGNZB_APIKEY = dognzb_apikey mylar.DOGNZB_APIKEY = dognzb_apikey
mylar.OMGWTFNZBS = omgwtfnzbs mylar.DOGNZB_VERIFYY = dognzb_verify
mylar.OMGWTFNZBS_USERNAME = omgwtfnzbs_username
mylar.OMGWTFNZBS_APIKEY = omgwtfnzbs_apikey
mylar.ENABLE_TORZNAB = enable_torznab mylar.ENABLE_TORZNAB = enable_torznab
mylar.TORZNAB_NAME = torznab_name mylar.TORZNAB_NAME = torznab_name
mylar.TORZNAB_HOST = torznab_host mylar.TORZNAB_HOST = torznab_host
@ -3859,7 +3842,7 @@ class WebInterface(object):
#changing this for simplicty - adding all newznabs into extra_newznabs #changing this for simplicty - adding all newznabs into extra_newznabs
if newznab_host is not None: if newznab_host is not None:
#this #this
mylar.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_apikey, newznab_uid, int(newznab_enabled))) mylar.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_verify, newznab_apikey, newznab_uid, int(newznab_enabled)))
for kwarg in kwargs: for kwarg in kwargs:
if kwarg.startswith('newznab_name'): if kwarg.startswith('newznab_name'):
@ -3871,14 +3854,18 @@ class WebInterface(object):
logger.fdebug('Blank newznab provider has been entered - removing.') logger.fdebug('Blank newznab provider has been entered - removing.')
continue continue
newznab_host = kwargs['newznab_host' + newznab_number] newznab_host = kwargs['newznab_host' + newznab_number]
try:
newznab_verify = kwargs['newznab_verify' + newznab_number]
except:
newznab_verify = 0
newznab_api = kwargs['newznab_api' + newznab_number] newznab_api = kwargs['newznab_api' + newznab_number]
newznab_uid = kwargs['newznab_uid' + newznab_number] newznab_uid = kwargs['newznab_uid' + newznab_number]
try: try:
newznab_enabled = int(kwargs['newznab_enabled' + newznab_number]) newznab_enabled = int(kwargs['newznab_enabled' + newznab_number])
except KeyError: except KeyError:
newznab_enabled = 0 newznab_enabled = 0
mylar.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_api, newznab_uid, newznab_enabled)) mylar.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_verify, newznab_api, newznab_uid, newznab_enabled))
# Sanity checking # Sanity checking
if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API == '' or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI: if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API == '' or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
@ -4245,3 +4232,8 @@ class WebInterface(object):
else: else:
return "Error sending test message to Pushbullet" return "Error sending test message to Pushbullet"
testpushbullet.exposed = True testpushbullet.exposed = True
def orderThis(self, **kwargs):
logger.info('here')
return
orderThis.exposed = True

View File

@ -120,7 +120,7 @@ def initialize(options):
# Prevent time-outs # Prevent time-outs
cherrypy.engine.timeout_monitor.unsubscribe() cherrypy.engine.timeout_monitor.unsubscribe()
cherrypy.tree.mount(WebInterface(), options['http_root'], config = conf) cherrypy.tree.mount(WebInterface(), str(options['http_root']), config = conf)
try: try:
cherrypy.process.servers.check_port(options['http_host'], options['http_port']) cherrypy.process.servers.check_port(options['http_host'], options['http_port'])

View File

@ -127,7 +127,7 @@ def pullit(forcecheck=None):
prevcomic = "" prevcomic = ""
previssue = "" previssue = ""
newrl = mylar.CACHE_DIR + "/newreleases.txt" newrl = os.path.join(mylar.CACHE_DIR, 'newreleases.txt')
if mylar.ALT_PULL: if mylar.ALT_PULL:
#logger.info('[PULL-LIST] The Alt-Pull method is currently broken. Defaulting back to the normal method of grabbing the pull-list.') #logger.info('[PULL-LIST] The Alt-Pull method is currently broken. Defaulting back to the normal method of grabbing the pull-list.')
@ -137,10 +137,13 @@ def pullit(forcecheck=None):
logger.info('[PULL-LIST] Populating & Loading pull-list data from file') logger.info('[PULL-LIST] Populating & Loading pull-list data from file')
f = urllib.urlretrieve(PULLURL, newrl) f = urllib.urlretrieve(PULLURL, newrl)
#set newrl to a manual file to pull in against that particular file
#newrl = '/mylar/tmp/newreleases.txt'
#newtxtfile header info ("SHIPDATE\tPUBLISHER\tISSUE\tCOMIC\tEXTRA\tSTATUS\n") #newtxtfile header info ("SHIPDATE\tPUBLISHER\tISSUE\tCOMIC\tEXTRA\tSTATUS\n")
#STATUS denotes default status to be applied to pulllist in Mylar (default = Skipped) #STATUS denotes default status to be applied to pulllist in Mylar (default = Skipped)
newfl = mylar.CACHE_DIR + "/Clean-newreleases.txt" newfl = os.path.join(mylar.CACHE_DIR, 'Clean-newreleases.txt')
newtxtfile = open(newfl, 'wb') newtxtfile = open(newfl, 'wb')
if check(newrl, 'Service Unavailable'): if check(newrl, 'Service Unavailable'):
@ -412,7 +415,7 @@ def pullit(forcecheck=None):
if "MAGAZINES" in row: break if "MAGAZINES" in row: break
if "BOOK" in row: break if "BOOK" in row: break
try: try:
logger.debug("Row: %s" % row) #logger.debug("Row: %s" % row)
controlValueDict = {'COMIC': row[3], controlValueDict = {'COMIC': row[3],
'ISSUE': row[2], 'ISSUE': row[2],
'EXTRA': row[4]} 'EXTRA': row[4]}
@ -428,9 +431,8 @@ def pullit(forcecheck=None):
csvfile.close() csvfile.close()
logger.info(u"Weekly Pull List successfully loaded.") logger.info(u"Weekly Pull List successfully loaded.")
#let's delete the files #let's delete the files
pullpath = str(mylar.CACHE_DIR) + "/" os.remove(os.path.join(mylar.CACHE_DIR, 'Clean-newreleases.txt'))
os.remove(str(pullpath) + "Clean-newreleases.txt") os.remove(os.path.join(mylar.CACHE_DIR, 'newreleases.txt'))
os.remove(str(pullpath) + "newreleases.txt")
pullitcheck(forcecheck=forcecheck) pullitcheck(forcecheck=forcecheck)
def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurepull=None, issue=None): def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None, futurepull=None, issue=None):
@ -1054,6 +1056,8 @@ def future_check():
#limit the search to just the 'current year' since if it's anything but a #1, it should have associated data already. #limit the search to just the 'current year' since if it's anything but a #1, it should have associated data already.
#limittheyear = [] #limittheyear = []
#limittheyear.append(cf['IssueDate'][-4:]) #limittheyear.append(cf['IssueDate'][-4:])
search_results = []
for ser in cflist: for ser in cflist:
matched = False matched = False
theissdate = ser['IssueDate'][-4:] theissdate = ser['IssueDate'][-4:]
@ -1085,6 +1089,11 @@ def future_check():
tmpsername = re.sub(' & ', '', tmpsername.lower()).strip() tmpsername = re.sub(' & ', '', tmpsername.lower()).strip()
tmpsrname = re.sub(' and ', '', tmpsrname.lower()).strip() tmpsrname = re.sub(' and ', '', tmpsrname.lower()).strip()
tmpsrname = re.sub(' & ', '', tmpsrname.lower()).strip() tmpsrname = re.sub(' & ', '', tmpsrname.lower()).strip()
#append the cleaned-up name to get searched later against if necessary.
search_results.append({'name': tmpsrname,
'comicid': sr['comicid']})
tmpsername = re.sub('\s', '', tmpsername).strip() tmpsername = re.sub('\s', '', tmpsername).strip()
tmpsrname = re.sub('\s', '', tmpsrname).strip() tmpsrname = re.sub('\s', '', tmpsrname).strip()
@ -1100,29 +1109,47 @@ def future_check():
matched = True matched = True
else: else:
logger.info('Unable to determine a successful match at this time (this is still a WIP so it will eventually work). Not going to attempt auto-adding at this time.') logger.info('Unable to determine a successful match at this time (this is still a WIP so it will eventually work). Not going to attempt auto-adding at this time.')
# for pos_match in matches: catch_words = ('the', 'and', '&', 'to')
# length_match = len(pos_match['name']) / len(ser['ComicName']) for pos_match in search_results:
# logger.fdebug('length match differential set for an allowance of 20%') logger.info(pos_match)
# logger.fdebug('actual differential in length between result and series title: ' + str((length_match * 100)-100) + '%') length_match = len(pos_match['name']) / len(ser['ComicName'])
# split_match = pos_match['name'].lower().split() logger.fdebug('length match differential set for an allowance of 20%')
# split_series = ser['ComicName'].lower().split() logger.fdebug('actual differential in length between result and series title: ' + str((length_match * 100)-100) + '%')
# word_match = 0 if ((length_match * 100)-100) > 20:
# i = 0 logger.fdebug('there are too many extra words to consider this as match for the given title. Ignoring this result.')
# for ss in split_series: continue
# logger.fdebug('ss value: ' + str(ss)) new_match = pos_match['name'].lower()
# try: split_series = ser['ComicName'].lower().split()
# matchword = split_match[i].lower() for cw in catch_words:
# except: for x in new_match.split():
# break #logger.fdebug('comparing x: ' + str(x) + ' to cw: ' + str(cw))
# if split_match.index(ss) == split_series.index(ss): if x == cw:
# #will return word position in string. new_match = re.sub(x, '', new_match)
# logger.fdebug('word match to position found in both strings at position : ' + str(split_match.index(ss)))
# word_match+=1 split_match = new_match.split()
# elif any(['the', 'and', '&'] == matchword.lower()): word_match = 0
# logger.fdebug('common word detected of : ' + matchword) i = 0
# word_match+=.5 for ss in split_series:
# i+=1 try:
# logger.info('word match score of : ' + str(word_match) + ' / ' + str(len(split_series))) matchword = split_match[i].lower()
except:
break
if split_match.index(ss) == split_series.index(ss):
#will return word position in string.
#logger.fdebug('word match to position found in both strings at position : ' + str(split_match.index(ss)))
word_match+=1
elif any([x == matchword for x in catch_words]):
#logger.fdebug('[MW] common word detected of : ' + matchword)
word_match+=.5
elif any([cw == ss for cw in catch_words]):
#logger.fdebug('[CW] common word detected of : ' + matchword)
word_match+=.5
i+=1
logger.fdebug('word match score of : ' + str(word_match) + ' / ' + str(len(split_series)))
if word_match == len(split_series) or (word_match / len(split_series)) > 80:
logger.fdebug('[' + pos_match['name'] + '] considered a match - word matching percentage is greater than 80%. Attempting to auto-add series into watchlist.')
cid = pos_match['comicid']
matched = True
if matched: if matched:
#we should probably load all additional issues for the series on the futureupcoming list that are marked as Wanted and then #we should probably load all additional issues for the series on the futureupcoming list that are marked as Wanted and then