Merge branch 'development'

This commit is contained in:
evilhero 2019-02-14 14:34:43 -05:00
commit d3cb7f87c5
29 changed files with 2197 additions and 1008 deletions

View File

@ -2,7 +2,7 @@
font-family: 'Lato'; font-family: 'Lato';
font-style: normal; font-style: normal;
font-weight: 400; font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(http://themes.googleusercontent.com/static/fonts/lato/v7/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff'); src: local('Lato Regular'), local('Lato-Regular'), url(https://themes.googleusercontent.com/static/fonts/lato/v7/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
} }
body { body {

View File

@ -397,20 +397,36 @@
</div> </div>
</div> </div>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix" id="sab_cdh" style="display:unset;">
<input type="checkbox" id="sab_client_post_processing" onclick="initConfigCheckbox($this);" name="sab_client_post_processing" value="1" ${config['sab_client_post_processing']} /><label>Enable Completed Download Handling</label> <input type="checkbox" id="sab_client_post_processing" onclick="initConfigCheckbox($this);" name="sab_client_post_processing" value="1" ${config['sab_client_post_processing']} /><label>Enable Completed Download Handling</label>
<div id="sabcompletedinfo"> <div id="sabcompletedinfo">
<div class="row"> <div class="row">
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span> <small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>
ComicRN script cannot be used with this enabled</small> ComicRN script cannot be used with this enabled & required SAB version > 0.8.0</small>
</div>
</div>
</div>
<div class="row checkbox left clearfix" id="sab_nocdh" style="display:none;">
<div>
<div class="row">
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>
Completed Download Handling is not available as your version of SABnzbd is not above 0.8.0</small>
</div> </div>
</div> </div>
</div> </div>
<div align="center" class="row"> <div align="center" class="row">
<img name="sabnzbd_statusicon" id="sabnzbd_statusicon" src="interfaces/default/images/successs.png" style="float:right;visibility:hidden;" height="20" width="20" /> <img name="sabnzbd_statusicon" id="sabnzbd_statusicon" src="interfaces/default/images/successs.png" style="float:right;visibility:hidden;" height="20" width="20" />
<input type="button" value="Test SABnzbd" id="test_sab" style="float:center" /></br> <input type="button" value="Test SABnzbd" id="test_sab" style="float:center" /></br>
<input type="text" name="sabstatus" style="text-align:center; font-size:11px;" id="sabstatus" size="50" DISABLED /> <input type="text" name="sabstatus" style="text-align:center; font-size:11px;" id="sabstatus" size="50" DISABLED />
<div name="sabversion" id="sabversion" style="font-size:11px;" align="center">
<%
if mylar.CONFIG.SAB_VERSION is not None:
sabv = 'last tested version: %s' % mylar.CONFIG.SAB_VERSION
else:
sabv = ''
%>
<span>${sabv}</span>
</div>
</div> </div>
</fieldset> </fieldset>
@ -667,15 +683,15 @@
<fieldset id="qbittorrent_options"> <fieldset id="qbittorrent_options">
<div class="row"> <div class="row">
<label>qBittorrent Host:Port </label> <label>qBittorrent Host:Port </label>
<input type="text" name="qbittorrent_host" value="${config['qbittorrent_host']}" size="30"> <input type="text" name="qbittorrent_host" id="qbittorrent_host" value="${config['qbittorrent_host']}" size="30">
</div> </div>
<div class="row"> <div class="row">
<label>qBittorrent Username</label> <label>qBittorrent Username</label>
<input type="text" name="qbittorrent_username" value="${config['qbittorrent_username']}" size="30"> <input type="text" name="qbittorrent_username" id="qbittorrent_username" value="${config['qbittorrent_username']}" size="30">
</div> </div>
<div class="row"> <div class="row">
<label>qBittorrent Password</label> <label>qBittorrent Password</label>
<input type="password" name="qbittorrent_password" value="${config['qbittorrent_password']}" size="30"> <input type="password" name="qbittorrent_password" id="qbittorrent_password" value="${config['qbittorrent_password']}" size="30">
</div> </div>
<div class="row"> <div class="row">
<label>qBittorrent Label</label> <label>qBittorrent Label</label>
@ -687,10 +703,24 @@
<input type="text" name="qbittorrent_folder" value="${config['qbittorrent_folder']}" size="30"><br/> <input type="text" name="qbittorrent_folder" value="${config['qbittorrent_folder']}" size="30"><br/>
<small>Folder path where torrents will be assigned to</small> <small>Folder path where torrents will be assigned to</small>
</div> </div>
<div class="row checkbox left clearfix"> <div class="row">
<input id="qbittorrent_startonload" type="checkbox" name="qbittorrent_startonload" value="1" ${config['qbittorrent_startonload']} /><label>Start Torrent on Successful Load</label> <label>Add torrent action:</label>
<small>Automatically start torrent on successful loading within qBittorrent client</small> <select name="qbittorrent_loadaction">
</div> %for x in ['default', 'force_start', 'pause']:
<%
if config['qbittorrent_loadaction'] == x:
outputselect = 'selected'
else:
outputselect = ''
%>
<option value=${x} ${outputselect}>${x}</option>
%endfor
</select>
</div>
<div class="row">
<img name="qbittorrent_statusicon" id="qbittorrent_statusicon" src="interfaces/default/images/successs.png" style="float:right;visibility:hidden;" height="20" width="20" />
<input type="button" value="Test Connection" id="qbittorrent_test" />
</div>
</fieldset> </fieldset>
</div> </div>
</td> </td>
@ -763,13 +793,11 @@
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Note: this is an experimental search - results may be better/worse.</small> <small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Note: this is an experimental search - results may be better/worse.</small>
</div> </div>
</fieldset> </fieldset>
<!--
<fieldset> <fieldset>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix">
<input type="checkbox" id="enable_ddl" name="enable_ddl" value=1 ${config['enable_ddl']} /><legend>Enable DDL (GetComics)</legend> <input type="checkbox" id="enable_ddl" name="enable_ddl" value=1 ${config['enable_ddl']} /><legend>Enable DDL (GetComics)</legend>
</div> </div>
</fieldset> </fieldset>
-->
<fieldset> <fieldset>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix">
<input id="enable_torrent_search" type="checkbox" onclick="initConfigCheckbox($(this));" name="enable_torrent_search" value=1 ${config['enable_torrent_search']} /><legend>Torrents</legned> <input id="enable_torrent_search" type="checkbox" onclick="initConfigCheckbox($(this));" name="enable_torrent_search" value=1 ${config['enable_torrent_search']} /><legend>Torrents</legned>
@ -1951,7 +1979,9 @@
function numberWithCommas(x) { function numberWithCommas(x) {
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
}; };
function numberWithDecimals(x) {
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ".");
};
$("#test_32p").click(function(){ $("#test_32p").click(function(){
var imagechk = document.getElementById("test32p_statusicon"); var imagechk = document.getElementById("test32p_statusicon");
$.get('test_32p', $.get('test_32p',
@ -1992,8 +2022,25 @@
alert(data.error); alert(data.error);
return; return;
} }
$('#sabstatus').val(data); var obj = JSON.parse(data);
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>"); var versionsab = obj['version'];
vsab = numberWithDecimals(versionsab);
$('#sabstatus').val(obj['status']);
$('#sabversion span').text('SABnzbd version: '+versionsab);
if ( vsab < "0.8.0" ){
scdh = document.getElementById("sab_cdh");
scdh.style.display = "none";
nocdh = document.getElementById("sab_nocdh");
nocdh.style.display = "unset";
scdh_line = document.getElementById("sab_client_post_processing");
scdh_line.value = 0;
} else {
scdh = document.getElementById("sab_cdh");
scdh.style.display = "unset";
nocdh = document.getElementById("sab_nocdh");
nocdh.style.display = "none";
}
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+obj['status']+"</div>");
if ( data.indexOf("Successfully") > -1){ if ( data.indexOf("Successfully") > -1){
imagechk.src = ""; imagechk.src = "";
imagechk.src = "interfaces/default/images/success.png"; imagechk.src = "interfaces/default/images/success.png";
@ -2116,6 +2163,32 @@
$("#add_torznab").before(torformfields); $("#add_torznab").before(torformfields);
}); });
$('#qbittorrent_test').click(function () {
var imagechk = document.getElementById("qbittorrent_statusicon");
var host = document.getElementById("qbittorrent_host").value;
var username = document.getElementById("qbittorrent_username").value;
var password = document.getElementById("qbittorrent_password").value;
$.get("testqbit",
{ host: host, username: username, password: password },
function(data){
if (data.error != undefined) {
alert(data.error);
return;
}
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>");
if ( data.indexOf("Successfully") > -1){
imagechk.src = "";
imagechk.src = "interfaces/default/images/success.png";
imagechk.style.visibility = "visible";
} else {
imagechk.src = "";
imagechk.src = "interfaces/default/images/fail.png";
imagechk.style.visibility = "visible";
}
});
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
});
function addAction() { function addAction() {
$('#autoadd').append('<input type="hidden" name="tsab" value=1 />'); $('#autoadd').append('<input type="hidden" name="tsab" value=1 />');
}; };

View File

@ -60,14 +60,14 @@
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/> <input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Enforce Renaming/MetaTagging options (if enabled)</label><br/> <input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Enforce Renaming/MetaTagging options (if enabled)</label><br/>
--> -->
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} /><label>Prepend Reading# to filename</label><br/> <input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} disabled/><label>Prepend Reading# to filename</label><br/>
<% <%
if mylar.CONFIG.STORYARCDIR: if mylar.CONFIG.STORYARCDIR:
carcdir = 'StoryArc' carcdir = 'StoryArc'
else: else:
carcdir = 'GrabBag' carcdir = 'GrabBag'
%> %>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} /><label>Copy watchlisted issues to ${carcdir} Directory</label> <input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} disabled /><label>Copy watchlisted issues to ${carcdir} Directory</label>
<input type="hidden" name="StoryArcID" value="${storyarcid}"> <input type="hidden" name="StoryArcID" value="${storyarcid}">
<input type="hidden" name="StoryArcName" value="${storyarcname}"> <input type="hidden" name="StoryArcName" value="${storyarcname}">
@ -75,7 +75,10 @@
</fieldset> </fieldset>
</form> </form>
<div style="display:block;position:relative;top:10px;"> <div style="display:block;position:relative;top:10px;">
&nbsp;
<!--
<input type="submit" value="Update"/> <input type="submit" value="Update"/>
-->
</div> </div>
<div style="display:block;float:right;position:relative;text-color:black;top:-130px;"> <div style="display:block;float:right;position:relative;text-color:black;top:-130px;">
<h1><p style="display:inline;float:right;">${storyarcname}</h1> <h1><p style="display:inline;float:right;">${storyarcname}</h1>
@ -111,8 +114,9 @@
</div> </div>
</div> </div>
<!--
<button type="button" onclick="">Finalize & Rename</button> <button type="button" onclick="">Finalize & Rename</button>
-->
<table class="display" id="arc_detail"> <table class="display" id="arc_detail">
<thead> <thead>
<tr> <tr>
@ -315,6 +319,7 @@
"sInfoFiltered":"(filtered from _MAX_ total items)"}, "sInfoFiltered":"(filtered from _MAX_ total items)"},
"iDisplayLength": 25, "iDisplayLength": 25,
"sPaginationType": "full_numbers", "sPaginationType": "full_numbers",
"stateDuration": 0,
"aaSorting": [] "aaSorting": []
}) })
resetFilters("item"); resetFilters("item");

View File

@ -62,7 +62,7 @@
%endif %endif
</div> </div>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} /><label>Prepend Reading# to filename</label> <input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} disabled /><label>Prepend Reading# to filename</label>
<% <%
if mylar.CONFIG.STORYARCDIR: if mylar.CONFIG.STORYARCDIR:
carcdir = 'StoryArc' carcdir = 'StoryArc'
@ -71,12 +71,15 @@
%> %>
</div> </div>
<div class="row checkbox left clearfix"> <div class="row checkbox left clearfix">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} /><label>Copy watchlisted issues to ${carcdir} Directory</label> <input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} disabled/><label>Copy watchlisted issues to ${carcdir} Directory</label>
</div> </div>
<input type="hidden" name="StoryArcID" value="${storyarcid}"> <input type="hidden" name="StoryArcID" value="${storyarcid}">
<input type="hidden" name="StoryArcName" value="${storyarcname}"> <input type="hidden" name="StoryArcName" value="${storyarcname}">
<div style="display:inline;position:relative;top:0px;"> <div style="display:inline;position:relative;top:0px;">
&nbsp;
<!--
<input type="submit" value="Update"/> <input type="submit" value="Update"/>
-->
</div> </div>
</form> </form>
</div> </div>

View File

@ -1,11 +1,11 @@
Instructions on setting up mylar as a systemd serivce that will run on startup/via systemctl commands... Instructions on setting up mylar as a systemd service that will run on startup/via systemctl commands...
1 - copy the mylar.service to /lib/systemd/system/mylar.service 1 - copy the mylar.service to /lib/systemd/system/mylar.service
2 - create a symbolic link to it: ln -s /lib/systemd/system/mylar.service /etc/systemd/system/mylar.service 2 - create a symbolic link to it: ln -s /lib/systemd/system/mylar.service /etc/systemd/system/mylar.service
3 - copy mylar.default to /etc/default/mylar (make sure it's renamed from mylar.default to just mylar) 3 - copy mylar.default to /etc/default/mylar (make sure it's renamed from mylar.default to just mylar)
4 - copy mylar.initd to /etc/init.d/mylar (rename it to just mylar) and then 'sudo chmod +x /etc/init.d/mylar' 4 - copy mylar.initd to /etc/init.d/mylar (rename it to just mylar) and then 'sudo chmod +x /etc/init.d/mylar'
5 - edit the /etc/default/mylar file to your defaults (make sure to set MYLAR_USER & MYLAR_HOME as they're required) 5 - edit the /etc/default/mylar file to your defaults (make sure to set MYLAR_USER & MYLAR_HOME as they're required)
6 - make systemd aware of new services: sudo sytemctl daemon-reload 6 - make systemd aware of new services: sudo systemctl daemon-reload
7 - sudo systemctl enable mylar 7 - sudo systemctl enable mylar
8 - sudo systemctl start mylar 8 - sudo systemctl start mylar
9 - to check to see if running/status - sudo sytemctl status mylar 9 - to check to see if running/status - sudo systemctl status mylar

View File

@ -1,7 +1,6 @@
import requests import requests
import json import json
class LoginRequired(Exception): class LoginRequired(Exception):
def __str__(self): def __str__(self):
return 'Please login first.' return 'Please login first.'
@ -15,7 +14,7 @@ class Client(object):
self.url = url self.url = url
session = requests.Session() session = requests.Session()
check_prefs = session.get(url+'query/preferences') check_prefs = session.get(url+'api/v2/app/preferences')
if check_prefs.status_code == 200: if check_prefs.status_code == 200:
self._is_authenticated = True self._is_authenticated = True
@ -24,9 +23,9 @@ class Client(object):
elif check_prefs.status_code == 404: elif check_prefs.status_code == 404:
self._is_authenticated = False self._is_authenticated = False
raise RuntimeError(""" raise RuntimeError("""
This wrapper only supports qBittorrent applications This wrapper only supports qBittorrent applications with
with version higher than 3.1.x. version higher than 4.1.0 (which implemented Web API v2.0).
Please use the latest qBittorrent release. Please use the latest qBittorrent release.
""") """)
else: else:
@ -104,7 +103,7 @@ class Client(object):
:return: Response to login request to the API. :return: Response to login request to the API.
""" """
self.session = requests.Session() self.session = requests.Session()
login = self.session.post(self.url+'login', login = self.session.post(self.url+'api/v2/auth/login',
data={'username': username, data={'username': username,
'password': password}) 'password': password})
if login.text == 'Ok.': if login.text == 'Ok.':
@ -116,7 +115,7 @@ class Client(object):
""" """
Logout the current session. Logout the current session.
""" """
response = self._get('logout') response = self._get('api/v2/auth/logout')
self._is_authenticated = False self._is_authenticated = False
return response return response
@ -125,27 +124,20 @@ class Client(object):
""" """
Get qBittorrent version. Get qBittorrent version.
""" """
return self._get('version/qbittorrent') return self._get('api/v2/app/version')
@property @property
def api_version(self): def api_version(self):
""" """
Get WEB API version. Get WEB API version.
""" """
return self._get('version/api') return self._get('api/v2/app/webapiVersion')
@property
def api_min_version(self):
"""
Get minimum WEB API version.
"""
return self._get('version/api_min')
def shutdown(self): def shutdown(self):
""" """
Shutdown qBittorrent. Shutdown qBittorrent.
""" """
return self._get('command/shutdown') return self._get('api/v2/app/shutdown')
def torrents(self, **filters): def torrents(self, **filters):
""" """
@ -157,6 +149,7 @@ class Client(object):
:param reverse: Enable reverse sorting. :param reverse: Enable reverse sorting.
:param limit: Limit the number of torrents returned. :param limit: Limit the number of torrents returned.
:param offset: Set offset (if less than 0, offset from end). :param offset: Set offset (if less than 0, offset from end).
:param hashes: Filter by hashes. Can contain multiple hashes separated by |.
:return: list() of torrent with matching filter. :return: list() of torrent with matching filter.
""" """
@ -166,7 +159,7 @@ class Client(object):
name = 'filter' if name == 'status' else name name = 'filter' if name == 'status' else name
params[name] = value params[name] = value
return self._get('query/torrents', params=params) return self._get('api/v2/torrents/info', params=params)
def get_torrent(self, infohash): def get_torrent(self, infohash):
""" """
@ -174,7 +167,7 @@ class Client(object):
:param infohash: INFO HASH of the torrent. :param infohash: INFO HASH of the torrent.
""" """
return self._get('query/propertiesGeneral/' + infohash.lower()) return self._get('api/v2/torrents/properties', params={'hash': infohash.lower()})
def get_torrent_trackers(self, infohash): def get_torrent_trackers(self, infohash):
""" """
@ -182,7 +175,7 @@ class Client(object):
:param infohash: INFO HASH of the torrent. :param infohash: INFO HASH of the torrent.
""" """
return self._get('query/propertiesTrackers/' + infohash.lower()) return self._get('api/v2/torrents/trackers', params={'hash': infohash.lower()})
def get_torrent_webseeds(self, infohash): def get_torrent_webseeds(self, infohash):
""" """
@ -190,7 +183,7 @@ class Client(object):
:param infohash: INFO HASH of the torrent. :param infohash: INFO HASH of the torrent.
""" """
return self._get('query/propertiesWebSeeds/' + infohash.lower()) return self._get('api/v2/torrents/webseeds', params={'hash': infohash.lower()})
def get_torrent_files(self, infohash): def get_torrent_files(self, infohash):
""" """
@ -198,14 +191,14 @@ class Client(object):
:param infohash: INFO HASH of the torrent. :param infohash: INFO HASH of the torrent.
""" """
return self._get('query/propertiesFiles/' + infohash.lower()) return self._get('api/v2/torrents/files', params={'hash': infohash.lower()})
@property @property
def global_transfer_info(self): def global_transfer_info(self):
""" """
Get JSON data of the global transfer info of qBittorrent. Get JSON data of the global transfer info of qBittorrent.
""" """
return self._get('query/transferInfo') return self._get('api/v2/transfer/info')
@property @property
def preferences(self): def preferences(self):
@ -228,7 +221,7 @@ class Client(object):
qb.preferences() qb.preferences()
""" """
prefs = self._get('query/preferences') prefs = self._get('api/v2/app/preferences')
class Proxy(Client): class Proxy(Client):
""" """
@ -270,11 +263,11 @@ class Client(object):
def sync(self, rid=0): def sync(self, rid=0):
""" """
Sync the torrents by supplied LAST RESPONSE ID. Sync the torrents by supplied LAST RESPONSE ID.
Read more @ http://git.io/vEgXr Read more @ https://git.io/fxgB8
:param rid: Response ID of last request. :param rid: Response ID of last request.
""" """
return self._get('sync/maindata', params={'rid': rid}) return self._get('api/v2/sync/maindata', params={'rid': rid})
def download_from_link(self, link, **kwargs): def download_from_link(self, link, **kwargs):
""" """
@ -286,22 +279,20 @@ class Client(object):
:return: Empty JSON data. :return: Empty JSON data.
""" """
# old:new format # qBittorrent requires adds to be done with multipath/form-data
old_arg_map = {'save_path': 'savepath'} # , 'label': 'category'} # POST requests for both URLs and .torrent files. Info on this
# can be found here, and here:
# convert old option names to new option names # http://docs.python-requests.org/en/master/user/quickstart/#post-a-multipart-encoded-file
options = kwargs.copy() # http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files
for old_arg, new_arg in old_arg_map.items(): if isinstance(link, list):
if options.get(old_arg) and not options.get(new_arg): links = '\n'.join(link)
options[new_arg] = options[old_arg] else:
links = link
options['urls'] = link torrent_data = {}
torrent_data['urls'] = (None, links)
# workaround to send multipart/formdata request for k, v in kwargs.iteritems():
# http://stackoverflow.com/a/23131823/4726598 torrent_data[k] = (None, v)
dummy_file = {'_dummy': (None, '_dummy')} return self._post('api/v2/torrents/add', data=None, files=torrent_data)
return self._post('command/download', data=options, files=dummy_file)
def download_from_file(self, file_buffer, **kwargs): def download_from_file(self, file_buffer, **kwargs):
""" """
@ -313,18 +304,23 @@ class Client(object):
:return: Empty JSON data. :return: Empty JSON data.
""" """
# qBittorrent requires adds to be done with multipath/form-data
# POST requests for both URLs and .torrent files. Info on this
# can be found here, and here:
# http://docs.python-requests.org/en/master/user/quickstart/#post-a-multipart-encoded-file
# http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files
if isinstance(file_buffer, list): if isinstance(file_buffer, list):
torrent_files = {} torrent_data = []
for i, f in enumerate(file_buffer): for f in file_buffer:
torrent_files.update({'torrents%s' % i: f}) fname = f.name
torrent_data.append(('torrents', (fname, f)))
else: else:
torrent_files = {'torrents': file_buffer} fname = file_buffer.name
torrent_data = [('torrents', (fname, file_buffer))]
for k, v in kwargs.iteritems():
torrent_data.append((k, (None, v)))
data = kwargs.copy() return self._post('api/v2/torrents/add', data=None, files=torrent_data)
if data.get('save_path'):
data.update({'savepath': data['save_path']})
return self._post('command/upload', data=data, files=torrent_files)
def add_trackers(self, infohash, trackers): def add_trackers(self, infohash, trackers):
""" """
@ -335,7 +331,7 @@ class Client(object):
""" """
data = {'hash': infohash.lower(), data = {'hash': infohash.lower(),
'urls': trackers} 'urls': trackers}
return self._post('command/addTrackers', data=data) return self._post('api/v2/torrents/addTrackers', data=data)
@staticmethod @staticmethod
def _process_infohash_list(infohash_list): def _process_infohash_list(infohash_list):
@ -356,13 +352,13 @@ class Client(object):
:param infohash: INFO HASH of torrent. :param infohash: INFO HASH of torrent.
""" """
return self._post('command/pause', data={'hash': infohash.lower()}) return self._post('api/v2/torrents/pause', data={'hashes': infohash.lower()})
def pause_all(self): def pause_all(self):
""" """
Pause all torrents. Pause all torrents.
""" """
return self._get('command/pauseAll') return self._post('api/v2/torrents/pause', data={'hashes': 'all'})
def pause_multiple(self, infohash_list): def pause_multiple(self, infohash_list):
""" """
@ -371,18 +367,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/pauseAll', data=data) return self._post('api/v2/torrents/pause', data=data)
def set_label(self, infohash_list, label):
"""
Set the label on multiple torrents.
IMPORTANT: OLD API method, kept as it is to avoid breaking stuffs.
:param infohash_list: Single or list() of infohashes.
"""
data = self._process_infohash_list(infohash_list)
data['label'] = label
return self._post('command/setLabel', data=data)
def set_category(self, infohash_list, category): def set_category(self, infohash_list, category):
""" """
@ -392,7 +377,7 @@ class Client(object):
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
data['category'] = category data['category'] = category
return self._post('command/setCategory', data=data) return self._post('api/v2/torrents/setCategory', data=data)
def resume(self, infohash): def resume(self, infohash):
""" """
@ -400,13 +385,13 @@ class Client(object):
:param infohash: INFO HASH of torrent. :param infohash: INFO HASH of torrent.
""" """
return self._post('command/resume', data={'hash': infohash.lower()}) return self._post('api/v2/torrents/resume', data={'hashes': infohash.lower()})
def resume_all(self): def resume_all(self):
""" """
Resume all torrents. Resume all torrents.
""" """
return self._get('command/resumeAll') return self._post('api/v2/torrents/resume', data={'hashes': 'all'})
def resume_multiple(self, infohash_list): def resume_multiple(self, infohash_list):
""" """
@ -415,7 +400,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/resumeAll', data=data) return self._post('api/v2/torrents/resume', data=data)
def delete(self, infohash_list): def delete(self, infohash_list):
""" """
@ -424,16 +409,21 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/delete', data=data) data['deleteFiles'] = 'false'
return self._post('api/v2/torrents/delete', data=data)
def delete_permanently(self, infohash_list): def delete_permanently(self, infohash_list):
""" """
Permanently delete torrents. Permanently delete torrents.
*** WARNING : This will instruct qBittorrent to delete files
*** from your hard disk. Use with caution.
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/deletePerm', data=data) data['deleteFiles'] = 'true'
return self._post('api/v2/torrents/delete', data=data)
def recheck(self, infohash_list): def recheck(self, infohash_list):
""" """
@ -442,7 +432,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/recheck', data=data) return self._post('api/v2/torrents/recheck', data=data)
def increase_priority(self, infohash_list): def increase_priority(self, infohash_list):
""" """
@ -451,7 +441,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/increasePrio', data=data) return self._post('api/v2/torrents/increasePrio', data=data)
def decrease_priority(self, infohash_list): def decrease_priority(self, infohash_list):
""" """
@ -460,7 +450,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/decreasePrio', data=data) return self._post('api/v2/torrents/decreasePrio', data=data)
def set_max_priority(self, infohash_list): def set_max_priority(self, infohash_list):
""" """
@ -469,7 +459,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/topPrio', data=data) return self._post('api/v2/torrents/topPrio', data=data)
def set_min_priority(self, infohash_list): def set_min_priority(self, infohash_list):
""" """
@ -478,7 +468,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/bottomPrio', data=data) return self._post('api/v2/torrents/bottomPrio', data=data)
def set_file_priority(self, infohash, file_id, priority): def set_file_priority(self, infohash, file_id, priority):
""" """
@ -488,7 +478,7 @@ class Client(object):
:param file_id: ID of the file to set priority. :param file_id: ID of the file to set priority.
:param priority: Priority level of the file. :param priority: Priority level of the file.
""" """
if priority not in [0, 1, 2, 7]: if priority not in [0, 1, 6, 7]:
raise ValueError("Invalid priority, refer WEB-UI docs for info.") raise ValueError("Invalid priority, refer WEB-UI docs for info.")
elif not isinstance(file_id, int): elif not isinstance(file_id, int):
raise TypeError("File ID must be an int") raise TypeError("File ID must be an int")
@ -497,7 +487,7 @@ class Client(object):
'id': file_id, 'id': file_id,
'priority': priority} 'priority': priority}
return self._post('command/setFilePrio', data=data) return self._post('api/v2/torrents/filePrio', data=data)
# Get-set global download and upload speed limits. # Get-set global download and upload speed limits.
@ -505,7 +495,7 @@ class Client(object):
""" """
Get global download speed limit. Get global download speed limit.
""" """
return self._get('command/getGlobalDlLimit') return self._get('api/v2/transfer/downloadLimit')
def set_global_download_limit(self, limit): def set_global_download_limit(self, limit):
""" """
@ -513,7 +503,7 @@ class Client(object):
:param limit: Speed limit in bytes. :param limit: Speed limit in bytes.
""" """
return self._post('command/setGlobalDlLimit', data={'limit': limit}) return self._post('api/v2/transfer/setDownloadLimit', data={'limit': limit})
global_download_limit = property(get_global_download_limit, global_download_limit = property(get_global_download_limit,
set_global_download_limit) set_global_download_limit)
@ -522,7 +512,7 @@ class Client(object):
""" """
Get global upload speed limit. Get global upload speed limit.
""" """
return self._get('command/getGlobalUpLimit') return self._get('api/v2/transfer/uploadLimit')
def set_global_upload_limit(self, limit): def set_global_upload_limit(self, limit):
""" """
@ -530,7 +520,7 @@ class Client(object):
:param limit: Speed limit in bytes. :param limit: Speed limit in bytes.
""" """
return self._post('command/setGlobalUpLimit', data={'limit': limit}) return self._post('api/v2/transfer/setUploadLimit', data={'limit': limit})
global_upload_limit = property(get_global_upload_limit, global_upload_limit = property(get_global_upload_limit,
set_global_upload_limit) set_global_upload_limit)
@ -543,7 +533,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/getTorrentsDlLimit', data=data) return self._post('api/v2/torrents/downloadLimit', data=data)
def set_torrent_download_limit(self, infohash_list, limit): def set_torrent_download_limit(self, infohash_list, limit):
""" """
@ -554,7 +544,7 @@ class Client(object):
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
data.update({'limit': limit}) data.update({'limit': limit})
return self._post('command/setTorrentsDlLimit', data=data) return self._post('api/v2/torrents/setDownloadLimit', data=data)
def get_torrent_upload_limit(self, infohash_list): def get_torrent_upload_limit(self, infohash_list):
""" """
@ -563,7 +553,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/getTorrentsUpLimit', data=data) return self._post('api/v2/torrents/uploadLimit', data=data)
def set_torrent_upload_limit(self, infohash_list, limit): def set_torrent_upload_limit(self, infohash_list, limit):
""" """
@ -574,26 +564,26 @@ class Client(object):
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
data.update({'limit': limit}) data.update({'limit': limit})
return self._post('command/setTorrentsUpLimit', data=data) return self._post('api/v2/torrents/setUploadLimit', data=data)
# setting preferences # setting preferences
def set_preferences(self, **kwargs): def set_preferences(self, **kwargs):
""" """
Set preferences of qBittorrent. Set preferences of qBittorrent.
Read all possible preferences @ http://git.io/vEgDQ Read all possible preferences @ https://git.io/fx2Y9
:param kwargs: set preferences in kwargs form. :param kwargs: set preferences in kwargs form.
""" """
json_data = "json={}".format(json.dumps(kwargs)) json_data = "json={}".format(json.dumps(kwargs))
headers = {'content-type': 'application/x-www-form-urlencoded'} headers = {'content-type': 'application/x-www-form-urlencoded'}
return self._post('command/setPreferences', data=json_data, return self._post('api/v2/app/setPreferences', data=json_data,
headers=headers) headers=headers)
def get_alternative_speed_status(self): def get_alternative_speed_status(self):
""" """
Get Alternative speed limits. (1/0) Get Alternative speed limits. (1/0)
""" """
return self._get('command/alternativeSpeedLimitsEnabled') return self._get('api/v2/transfer/speedLimitsMode')
alternative_speed_status = property(get_alternative_speed_status) alternative_speed_status = property(get_alternative_speed_status)
@ -601,7 +591,7 @@ class Client(object):
""" """
Toggle alternative speed limits. Toggle alternative speed limits.
""" """
return self._get('command/toggleAlternativeSpeedLimits') return self._get('api/v2/transfer/toggleSpeedLimitsMode')
def toggle_sequential_download(self, infohash_list): def toggle_sequential_download(self, infohash_list):
""" """
@ -610,7 +600,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/toggleSequentialDownload', data=data) return self._post('api/v2/torrents/toggleSequentialDownload', data=data)
def toggle_first_last_piece_priority(self, infohash_list): def toggle_first_last_piece_priority(self, infohash_list):
""" """
@ -619,7 +609,7 @@ class Client(object):
:param infohash_list: Single or list() of infohashes. :param infohash_list: Single or list() of infohashes.
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
return self._post('command/toggleFirstLastPiecePrio', data=data) return self._post('api/v2/torrents/toggleFirstLastPiecePrio', data=data)
def force_start(self, infohash_list, value=True): def force_start(self, infohash_list, value=True):
""" """
@ -630,4 +620,4 @@ class Client(object):
""" """
data = self._process_infohash_list(infohash_list) data = self._process_infohash_list(infohash_list)
data.update({'value': json.dumps(value)}) data.update({'value': json.dumps(value)})
return self._post('command/setForceStart', data=data) return self._post('api/v2/torrents/setForceStart', data=data)

File diff suppressed because it is too large Load Diff

View File

@ -123,10 +123,13 @@ USE_WATCHDIR = False
SNPOOL = None SNPOOL = None
NZBPOOL = None NZBPOOL = None
SEARCHPOOL = None SEARCHPOOL = None
PPPOOL = None
DDLPOOL = None
SNATCHED_QUEUE = Queue.Queue() SNATCHED_QUEUE = Queue.Queue()
NZB_QUEUE = Queue.Queue() NZB_QUEUE = Queue.Queue()
PP_QUEUE = Queue.Queue() PP_QUEUE = Queue.Queue()
SEARCH_QUEUE = Queue.Queue() SEARCH_QUEUE = Queue.Queue()
DDL_QUEUE = Queue.Queue()
SEARCH_TIER_DATE = None SEARCH_TIER_DATE = None
COMICSORT = None COMICSORT = None
PULLBYFILE = False PULLBYFILE = False
@ -142,6 +145,7 @@ LOCAL_IP = None
DOWNLOAD_APIKEY = None DOWNLOAD_APIKEY = None
APILOCK = False APILOCK = False
SEARCHLOCK = False SEARCHLOCK = False
DDL_LOCK = False
CMTAGGER_PATH = None CMTAGGER_PATH = None
STATIC_COMICRN_VERSION = "1.01" STATIC_COMICRN_VERSION = "1.01"
STATIC_APC_VERSION = "2.04" STATIC_APC_VERSION = "2.04"
@ -162,11 +166,11 @@ def initialize(config_file):
with INIT_LOCK: with INIT_LOCK:
global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \ global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \
DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \ DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, DDL_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \ IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \
USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \ USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \
PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \ PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \
MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, LOG_LEVEL, \ MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, DDL_LOCK, LOG_LEVEL, \
SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO, SEARCH_TIER_DATE SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO, SEARCH_TIER_DATE
cc = mylar.config.Config(config_file) cc = mylar.config.Config(config_file)
@ -367,6 +371,9 @@ def start():
search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60) - (duration_diff*60))) search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60) - (duration_diff*60)))
logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL)) logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL))
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC')) SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
else:
ss = searchit.CurrentSearcher()
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=None, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]): if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]):
logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....') logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
@ -390,12 +397,18 @@ def start():
SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE") SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE")
SEARCHPOOL.start() SEARCHPOOL.start()
if all([CONFIG.POST_PROCESSING is True, CONFIG.API_ENABLED is True]): if CONFIG.POST_PROCESSING is True:
logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....') logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....')
PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE") PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE")
PPPOOL.start() PPPOOL.start()
logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....') logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....')
if CONFIG.ENABLE_DDL is True:
logger.info('[DDL-QUEUE] DDL Download queue enabled & monitoring for requests....')
DDLPOOL = threading.Thread(target=helpers.ddl_downloader, args=(DDL_QUEUE,), name="DDL-QUEUE")
DDLPOOL.start()
logger.info('[DDL-QUEUE] Succesfully started DDL Download Queuer....')
helpers.latestdate_fix() helpers.latestdate_fix()
if CONFIG.ALT_PULL == 2: if CONFIG.ALT_PULL == 2:
@ -491,7 +504,7 @@ def dbcheck():
c.execute('SELECT ReleaseDate from storyarcs') c.execute('SELECT ReleaseDate from storyarcs')
except sqlite3.OperationalError: except sqlite3.OperationalError:
try: try:
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)') c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist') c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
c.execute('DROP TABLE readinglist') c.execute('DROP TABLE readinglist')
except sqlite3.OperationalError: except sqlite3.OperationalError:
@ -514,7 +527,8 @@ def dbcheck():
c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)') c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)') c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)') c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)') c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT, remote_filesize TEXT, updated_date TEXT, mainlink TEXT)')
conn.commit conn.commit
c.close c.close
@ -1024,6 +1038,16 @@ def dbcheck():
except sqlite3.OperationalError: except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT') c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT')
try:
c.execute('SELECT Type from storyarcs')
except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT')
try:
c.execute('SELECT Aliases from storyarcs')
except sqlite3.OperationalError:
c.execute('ALTER TABLE storyarcs ADD COLUMN Aliases TEXT')
## -- searchresults Table -- ## -- searchresults Table --
try: try:
c.execute('SELECT SRID from searchresults') c.execute('SELECT SRID from searchresults')
@ -1075,6 +1099,22 @@ def dbcheck():
except sqlite3.OperationalError: except sqlite3.OperationalError:
c.execute('ALTER TABLE jobhistory ADD COLUMN status TEXT') c.execute('ALTER TABLE jobhistory ADD COLUMN status TEXT')
## -- DDL_info Table --
try:
c.execute('SELECT remote_filesize from ddl_info')
except sqlite3.OperationalError:
c.execute('ALTER TABLE ddl_info ADD COLUMN remote_filesize TEXT')
try:
c.execute('SELECT updated_date from ddl_info')
except sqlite3.OperationalError:
c.execute('ALTER TABLE ddl_info ADD COLUMN updated_date TEXT')
try:
c.execute('SELECT mainlink from ddl_info')
except sqlite3.OperationalError:
c.execute('ALTER TABLE ddl_info ADD COLUMN mainlink TEXT')
#if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere #if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the #prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the
#value in the sql so we can display it in the details screen for everyone to wonder at. #value in the sql so we can display it in the details screen for everyone to wonder at.
@ -1223,6 +1263,29 @@ def halt():
SEARCHPOOL.join(5) SEARCHPOOL.join(5)
except AssertionError: except AssertionError:
os._exit(0) os._exit(0)
if PPPOOL is not None:
logger.info('Terminating the post-processing queue thread.')
try:
PPPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
PP_QUEUE.put('exit')
PPPOOL.join(5)
except AssertionError:
os._exit(0)
if DDLPOOL is not None:
logger.info('Terminating the DDL download queue thread.')
try:
DDLPOOL.join(10)
logger.info('Joined pool for termination - successful')
except KeyboardInterrupt:
DDL_QUEUE.put('exit')
DDLPOOL.join(5)
except AssertionError:
os._exit(0)
_INITIALIZED = False _INITIALIZED = False
def shutdown(restart=False, update=False, maintenance=False): def shutdown(restart=False, update=False, maintenance=False):

View File

@ -358,6 +358,11 @@ class Api(object):
else: else:
comicid = kwargs['comicid'] comicid = kwargs['comicid']
if 'ddl' not in kwargs:
ddl = False
else:
ddl = True
if 'apc_version' not in kwargs: if 'apc_version' not in kwargs:
logger.info('Received API Request for PostProcessing %s [%s]. Queueing...' % (self.nzb_name, self.nzb_folder)) logger.info('Received API Request for PostProcessing %s [%s]. Queueing...' % (self.nzb_name, self.nzb_folder))
mylar.PP_QUEUE.put({'nzb_name': self.nzb_name, mylar.PP_QUEUE.put({'nzb_name': self.nzb_name,
@ -365,7 +370,8 @@ class Api(object):
'issueid': issueid, 'issueid': issueid,
'failed': failed, 'failed': failed,
'comicid': comicid, 'comicid': comicid,
'apicall': True}) 'apicall': True,
'ddl': ddl})
self.data = 'Successfully submitted request for post-processing for %s' % self.nzb_name self.data = 'Successfully submitted request for post-processing for %s' % self.nzb_name
#fp = process.Process(self.nzb_name, self.nzb_folder, issueid=issueid, failed=failed, comicid=comicid, apicall=True) #fp = process.Process(self.nzb_name, self.nzb_folder, issueid=issueid, failed=failed, comicid=comicid, apicall=True)
#self.data = fp.post_process() #self.data = fp.post_process()

View File

@ -1,3 +1,18 @@
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import urllib2 import urllib2
import json import json
import re import re
@ -13,7 +28,7 @@ import cfscrape
from operator import itemgetter from operator import itemgetter
import mylar import mylar
from mylar import logger, filechecker, helpers from mylar import db, logger, filechecker, helpers
class info32p(object): class info32p(object):
@ -35,7 +50,6 @@ class info32p(object):
self.method = None self.method = None
lses = self.LoginSession(mylar.CONFIG.USERNAME_32P, mylar.CONFIG.PASSWORD_32P) lses = self.LoginSession(mylar.CONFIG.USERNAME_32P, mylar.CONFIG.PASSWORD_32P)
if not lses.login(): if not lses.login():
if not self.test: if not self.test:
logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module) logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module)
@ -49,6 +63,7 @@ class info32p(object):
logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module) logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module)
self.authkey = lses.authkey self.authkey = lses.authkey
self.passkey = lses.passkey self.passkey = lses.passkey
self.session = lses.ses
self.uid = lses.uid self.uid = lses.uid
try: try:
mylar.INKDROPS_32P = int(math.floor(float(lses.inkdrops['results'][0]['inkdrops']))) mylar.INKDROPS_32P = int(math.floor(float(lses.inkdrops['results'][0]['inkdrops'])))
@ -67,26 +82,26 @@ class info32p(object):
feedinfo = [] feedinfo = []
try: try:
with cfscrape.create_scraper() as s: # with cfscrape.create_scraper(delay=15) as s:
s.headers = self.headers # s.headers = self.headers
cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) # cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
cj.load() # cj.load()
s.cookies = cj # s.cookies = cj
if mylar.CONFIG.VERIFY_32P == 1 or mylar.CONFIG.VERIFY_32P == True: if mylar.CONFIG.VERIFY_32P == 1 or mylar.CONFIG.VERIFY_32P == True:
verify = True verify = True
else: else:
verify = False verify = False
logger.fdebug('[32P] Verify SSL set to : %s' % verify) # logger.fdebug('[32P] Verify SSL set to : %s' % verify)
if not verify: if not verify:
#32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$ # #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$
from requests.packages.urllib3.exceptions import InsecureRequestWarning from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning) requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# post to the login form # post to the login form
r = s.post(self.url, verify=verify, allow_redirects=True) r = self.session.post(self.url, verify=verify, allow_redirects=True)
#logger.debug(self.module + " Content session reply" + r.text) #logger.debug(self.module + " Content session reply" + r.text)
@ -246,147 +261,177 @@ class info32p(object):
logger.warn('No results found for search on 32P.') logger.warn('No results found for search on 32P.')
return "no results" return "no results"
with cfscrape.create_scraper() as s: # with cfscrape.create_scraper(delay=15) as s:
s.headers = self.headers # s.headers = self.headers
cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat")) # cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
cj.load() # cj.load()
s.cookies = cj # s.cookies = cj
data = [] data = []
pdata = [] pdata = []
pubmatch = False pubmatch = False
if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]): if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]):
data.append({"id": torrentid, data.append({"id": torrentid,
"series": series_search}) "series": series_search})
else: else:
if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]): if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]):
if mylar.CONFIG.SEARCH_32P is True:
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
params = {'action': 'serieslist', 'filter': series_search}
time.sleep(1) #just to make sure we don't hammer, 1s pause.
t = self.session.get(url, params=params, verify=True, allow_redirects=True)
soup = BeautifulSoup(t.content, "html.parser")
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
for r in results:
if mylar.CONFIG.SEARCH_32P is True: if mylar.CONFIG.SEARCH_32P is True:
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F torrentid = r['data-id']
params = {'action': 'serieslist', 'filter': series_search} torrentname = r.findNext(text=True)
time.sleep(1) #just to make sure we don't hammer, 1s pause. torrentname = torrentname.strip()
t = s.get(url, params=params, verify=True, allow_redirects=True) else:
soup = BeautifulSoup(t.content, "html.parser") torrentid = r['id']
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"}) torrentname = r['series']
for r in results: as_d = filechecker.FileChecker()
if mylar.CONFIG.SEARCH_32P is True: as_dinfo = as_d.dynamic_replace(torrentname)
torrentid = r['data-id'] seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
torrentname = r.findNext(text=True) logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search))
torrentname = torrentname.strip() if seriesresult.lower() == mod_series.lower():
else: logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
torrentid = r['id'] data.append({"id": torrentid,
torrentname = r['series'] "series": torrentname})
elif publisher_search.lower() in seriesresult.lower():
as_d = filechecker.FileChecker() logger.fdebug('[MATCH] Publisher match.')
as_dinfo = as_d.dynamic_replace(torrentname) tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip()
seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip() as_t = filechecker.FileChecker()
logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search)) as_tinfo = as_t.dynamic_replace(tmp_torrentname)
if seriesresult.lower() == mod_series.lower(): if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower():
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid)) logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
data.append({"id": torrentid, pdata.append({"id": torrentid,
"series": torrentname}) "series": torrentname})
elif publisher_search.lower() in seriesresult.lower(): pubmatch = True
logger.fdebug('[MATCH] Publisher match.')
tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip()
as_t = filechecker.FileChecker()
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower():
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
pdata.append({"id": torrentid,
"series": torrentname})
pubmatch = True
logger.fdebug('%s series listed for searching that match.' % len(data)) logger.fdebug('%s series listed for searching that match.' % len(data))
else:
logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id']))
pdata.append({"id": chk_id['id'],
"series": chk_id['series']})
pubmatch = True
if all([len(data) == 0, len(pdata) == 0]):
return "no results"
else: else:
dataset = [] logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id']))
if len(data) > 0: pdata.append({"id": chk_id['id'],
dataset += data "series": chk_id['series']})
if len(pdata) > 0: pubmatch = True
dataset += pdata
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']) and any([len(data) == 1, len(pdata) == 1]): if all([len(data) == 0, len(pdata) == 0]):
#update the 32p_reference so we avoid doing a url lookup next time return "no results"
helpers.checkthe_id(comic_id, dataset) else:
dataset = []
if len(data) > 0:
dataset += data
if len(pdata) > 0:
dataset += pdata
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']) and any([len(data) == 1, len(pdata) == 1]):
#update the 32p_reference so we avoid doing a url lookup next time
helpers.checkthe_id(comic_id, dataset)
else:
if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']):
pass
else: else:
if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']): logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
pass
else:
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
results32p = [] results32p = []
resultlist = {} resultlist = {}
for x in dataset: for x in dataset:
#for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st) #for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st)
payload = {"action": "groupsearch", payload = {"action": "groupsearch",
"id": x['id'], #searchid, "id": x['id'], #searchid,
"issue": issue_search} "issue": issue_search}
#in order to match up against 0-day stuff, volume has to be none at this point #in order to match up against 0-day stuff, volume has to be none at this point
#when doing other searches tho, this should be allowed to go through #when doing other searches tho, this should be allowed to go through
#if all([volume_search != 'None', volume_search is not None]): #if all([volume_search != 'None', volume_search is not None]):
# payload.update({'volume': re.sub('v', '', volume_search).strip()}) # payload.update({'volume': re.sub('v', '', volume_search).strip()})
if series_search.startswith('0-Day Comics Pack'): if series_search.startswith('0-Day Comics Pack'):
payload.update({"volume": volume_search}) payload.update({"volume": volume_search})
payload = json.dumps(payload) payload = json.dumps(payload)
payload = json.loads(payload) payload = json.loads(payload)
logger.fdebug('payload: %s' % payload) logger.fdebug('payload: %s' % payload)
url = 'https://32pag.es/ajax.php' url = 'https://32pag.es/ajax.php'
time.sleep(1) #just to make sure we don't hammer, 1s pause. time.sleep(1) #just to make sure we don't hammer, 1s pause.
try: try:
d = s.get(url, params=payload, verify=True, allow_redirects=True) d = self.session.get(url, params=payload, verify=True, allow_redirects=True)
except Exception as e: except Exception as e:
logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url)) logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url))
try: try:
searchResults = d.json() searchResults = d.json()
except: except Exception as e:
searchResults = d.text searchResults = d.text
logger.debug('%s Search Result did not return valid JSON, falling back on text: %s' % (self.module, searchResults.text)) logger.debug('[%s] %s Search Result did not return valid JSON, falling back on text: %s' % (e, self.module, searchResults.text))
return False return False
if searchResults['status'] == 'success' and searchResults['count'] > 0: if searchResults['status'] == 'success' and searchResults['count'] > 0:
logger.fdebug('successfully retrieved %s search results' % searchResults['count']) logger.fdebug('successfully retrieved %s search results' % searchResults['count'])
for a in searchResults['details']: for a in searchResults['details']:
if series_search.startswith('0-Day Comics Pack'): if series_search.startswith('0-Day Comics Pack'):
title = series_search title = series_search
else: else:
title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'] title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues']
results32p.append({'link': a['id'], results32p.append({'link': a['id'],
'title': title, 'title': title,
'filesize': a['size'], 'filesize': a['size'],
'issues': a['issues'], 'issues': a['issues'],
'pack': a['pack'], 'pack': a['pack'],
'format': a['format'], 'format': a['format'],
'language': a['language'], 'language': a['language'],
'seeders': a['seeders'], 'seeders': a['seeders'],
'leechers': a['leechers'], 'leechers': a['leechers'],
'scanner': a['scanner'], 'scanner': a['scanner'],
'chkit': {'id': x['id'], 'series': x['series']}, 'chkit': {'id': x['id'], 'series': x['series']},
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'), 'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'),
'int_pubdate': float(a['upload_time'])}) 'int_pubdate': float(a['upload_time'])})
else:
logger.fdebug('32P did not return any valid search results.')
if len(results32p) > 0:
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
logger.debug('%s Resultslist: %s' % (self.module, resultlist))
else: else:
resultlist = 'no results' logger.fdebug('32P did not return any valid search results.')
if len(results32p) > 0:
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
logger.debug('%s Resultslist: %s' % (self.module, resultlist))
else:
resultlist = 'no results'
return resultlist return resultlist
def downloadfile(self, payload, filepath):
url = 'https://32pag.es/torrents.php'
try:
r = self.session.get(url, params=payload, verify=True, stream=True, allow_redirects=True)
except Exception as e:
logger.error('%s [%s] Could not POST URL %s' % ('[32P-DOWNLOADER]', e, url))
return False
if str(r.status_code) != '200':
logger.warn('Unable to download torrent from 32P [Status Code returned: %s]' % r.status_code)
if str(r.status_code) == '404' and site == '32P':
logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.')
helpers.delete_cache_entry(linkit)
else:
logger.info('content: %s' % r.content)
return False
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return True
def delete_cache_entry(self, id):
myDB = db.DBConnection()
myDB.action("DELETE FROM rssdb WHERE link=? AND Site='32P'", [id])
class LoginSession(object): class LoginSession(object):
def __init__(self, un, pw, session_path=None): def __init__(self, un, pw, session_path=None):
''' '''
@ -399,7 +444,7 @@ class info32p(object):
''' '''
self.module = '[32P-AUTHENTICATION]' self.module = '[32P-AUTHENTICATION]'
try: try:
self.ses = cfscrape.create_scraper() self.ses = cfscrape.create_scraper(delay=15)
except Exception as e: except Exception as e:
logger.error('%s Can\'t create session with cfscrape' % self.module) logger.error('%s Can\'t create session with cfscrape' % self.module)
@ -466,7 +511,7 @@ class info32p(object):
if r.status_code != 200: if r.status_code != 200:
if r.status_code == 302: if r.status_code == 302:
newloc = r.headers.get('location', '') newloc = r.headers.get('Location', '')
logger.warn('Got redirect from the POST-ajax action=login GET: %s' % newloc) logger.warn('Got redirect from the POST-ajax action=login GET: %s' % newloc)
self.error = {'status':'redirect-error', 'message':'got redirect from POST-ajax login action : ' + newloc} self.error = {'status':'redirect-error', 'message':'got redirect from POST-ajax login action : ' + newloc}
else: else:
@ -614,16 +659,19 @@ class info32p(object):
if (self.test_skey_valid()): if (self.test_skey_valid()):
logger.fdebug('%s Session key-based login was good.' % self.module) logger.fdebug('%s Session key-based login was good.' % self.module)
self.method = 'Session Cookie retrieved OK.' self.method = 'Session Cookie retrieved OK.'
return True return {'ses': self.ses,
'status': True}
if (self.test_login()): if (self.test_login()):
logger.fdebug('%s Credential-based login was good.' % self.module) logger.fdebug('%s Credential-based login was good.' % self.module)
self.method = 'Credential-based login OK.' self.method = 'Credential-based login OK.'
return True return {'ses': self.ses,
'status': True}
logger.warn('%s Both session key and credential-based logins failed.' % self.module) logger.warn('%s Both session key and credential-based logins failed.' % self.module)
self.method = 'Both session key & credential login failed.' self.method = 'Both session key & credential login failed.'
return False return {'ses': self.ses,
'status': False}
#if __name__ == '__main__': #if __name__ == '__main__':

View File

@ -58,8 +58,8 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
else: else:
shutil.copy(filepath, new_filepath) shutil.copy(filepath, new_filepath)
filepath = new_filepath filepath = new_filepath
except: except Exception as e:
logger.warn(module + ' Unexpected Error: %s' % sys.exc_info()[0]) logger.warn('%s Unexpected Error: %s [%s]' % (module, sys.exc_info()[0], e))
logger.warn(module + ' Unable to create temporary directory to perform meta-tagging. Processing without metatagging.') logger.warn(module + ' Unable to create temporary directory to perform meta-tagging. Processing without metatagging.')
tidyup(og_filepath, new_filepath, new_folder, manualmeta) tidyup(og_filepath, new_filepath, new_folder, manualmeta)
return "fail" return "fail"

View File

@ -75,6 +75,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'ALTERNATE_LATEST_SERIES_COVERS': (bool, 'General', False), 'ALTERNATE_LATEST_SERIES_COVERS': (bool, 'General', False),
'SHOW_ICONS': (bool, 'General', False), 'SHOW_ICONS': (bool, 'General', False),
'FORMAT_BOOKTYPE': (bool, 'General', False), 'FORMAT_BOOKTYPE': (bool, 'General', False),
'CLEANUP_CACHE': (bool, 'General', False),
'SECURE_DIR': (str, 'General', None), 'SECURE_DIR': (str, 'General', None),
'RSS_CHECKINTERVAL': (int, 'Scheduler', 20), 'RSS_CHECKINTERVAL': (int, 'Scheduler', 20),
@ -210,6 +211,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'SAB_PRIORITY': (str, 'SABnzbd', "Default"), 'SAB_PRIORITY': (str, 'SABnzbd', "Default"),
'SAB_TO_MYLAR': (bool, 'SABnzbd', False), 'SAB_TO_MYLAR': (bool, 'SABnzbd', False),
'SAB_DIRECTORY': (str, 'SABnzbd', None), 'SAB_DIRECTORY': (str, 'SABnzbd', None),
'SAB_VERSION': (str, 'SABnzbd', None),
'SAB_CLIENT_POST_PROCESSING': (bool, 'SABnzbd', False), #0/False: ComicRN.py, #1/True: Completed Download Handling 'SAB_CLIENT_POST_PROCESSING': (bool, 'SABnzbd', False), #0/False: ComicRN.py, #1/True: Completed Download Handling
'NZBGET_HOST': (str, 'NZBGet', None), 'NZBGET_HOST': (str, 'NZBGet', None),
@ -346,7 +348,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'QBITTORRENT_PASSWORD': (str, 'qBittorrent', None), 'QBITTORRENT_PASSWORD': (str, 'qBittorrent', None),
'QBITTORRENT_LABEL': (str, 'qBittorrent', None), 'QBITTORRENT_LABEL': (str, 'qBittorrent', None),
'QBITTORRENT_FOLDER': (str, 'qBittorrent', None), 'QBITTORRENT_FOLDER': (str, 'qBittorrent', None),
'QBITTORRENT_STARTONLOAD': (bool, 'qBittorrent', False), 'QBITTORRENT_LOADACTION': (str, 'qBittorrent', 'default'), #default, force_start, paused
'OPDS_ENABLE': (bool, 'OPDS', False), 'OPDS_ENABLE': (bool, 'OPDS', False),
'OPDS_AUTHENTICATION': (bool, 'OPDS', False), 'OPDS_AUTHENTICATION': (bool, 'OPDS', False),
@ -791,6 +793,26 @@ class Config(object):
logger.error('SECURE-DIR-MOVE] Unable to move cookies file into secure location. This is a fatal error.') logger.error('SECURE-DIR-MOVE] Unable to move cookies file into secure location. This is a fatal error.')
sys.exit() sys.exit()
if self.CLEANUP_CACHE is True:
logger.fdebug('[Cache Cleanup] Cache Cleanup initiated. Will delete items from cache that are no longer needed.')
cache_types = ['*.nzb', '*.torrent', '*.zip', '*.html', 'mylar_*']
cntr = 0
for x in cache_types:
for f in glob.glob(os.path.join(self.CACHE_DIR,x)):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except Exception as e:
logger.warn('[ERROR] Unable to remove %s from cache. Could be a possible permissions issue ?' % f)
cntr+=1
if cntr > 1:
logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items' % cntr)
else:
logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Nothing to clean!')
if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]): if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]):
self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag') self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag')
logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR) logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR)
@ -816,7 +838,6 @@ class Config(object):
mylar.RSS_STATUS = 'Waiting' mylar.RSS_STATUS = 'Waiting'
elif self.ENABLE_RSS is False and mylar.RSS_STATUS == 'Waiting': elif self.ENABLE_RSS is False and mylar.RSS_STATUS == 'Waiting':
mylar.RSS_STATUS = 'Paused' mylar.RSS_STATUS = 'Paused'
logger.info('self.enable_rss is %s [%s]' % (self.ENABLE_RSS, mylar.RSS_STATUS))
if not helpers.is_number(self.CHMOD_DIR): if not helpers.is_number(self.CHMOD_DIR):
logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777") logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777")
@ -864,8 +885,10 @@ class Config(object):
else: else:
logger.fdebug('Successfully created ComicTagger Settings location.') logger.fdebug('Successfully created ComicTagger Settings location.')
if self.DDL_LOCATION is None: if not self.DDL_LOCATION:
self.DDL_LOCATION = self.CACHE_DIR self.DDL_LOCATION = self.CACHE_DIR
if self.ENABLE_DDL is True:
logger.info('Setting DDL Location set to : %s' % self.DDL_LOCATION)
if self.MODE_32P is False and self.RSSFEED_32P is not None: if self.MODE_32P is False and self.RSSFEED_32P is not None:
mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P) mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P)
@ -896,6 +919,12 @@ class Config(object):
elif self.SAB_PRIORITY == "4": self.SAB_PRIORITY = "Paused" elif self.SAB_PRIORITY == "4": self.SAB_PRIORITY = "Paused"
else: self.SAB_PRIORITY = "Default" else: self.SAB_PRIORITY = "Default"
if self.SAB_VERSION is not None:
config.set('SABnzbd', 'sab_version', self.SAB_VERSION)
if int(re.sub("[^0-9]", '', self.SAB_VERSION).strip()) < int(re.sub("[^0-9]", '', '0.8.0').strip()) and self.SAB_CLIENT_POST_PROCESSING is True:
logger.warn('Your SABnzbd client is less than 0.8.0, and does not support Completed Download Handling which is enabled. Disabling CDH.')
self.SAB_CLIENT_POST_PROCESSING = False
mylar.USE_WATCHDIR = False mylar.USE_WATCHDIR = False
mylar.USE_UTORRENT = False mylar.USE_UTORRENT = False
mylar.USE_RTORRENT = False mylar.USE_RTORRENT = False

View File

@ -72,7 +72,7 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
elif type == 'storyarc': elif type == 'storyarc':
PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date' PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date'
elif type == 'comicyears': elif type == 'comicyears':
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck&offset=' + str(offset) PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck,aliases&offset=' + str(offset)
elif type == 'import': elif type == 'import':
PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset) PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset)
elif type == 'update_dates': elif type == 'update_dates':
@ -271,7 +271,7 @@ def GetComicInfo(comicid, dom, safechk=None):
comic['ComicYear'] = '0000' comic['ComicYear'] = '0000'
#safety check, cause you known, dufus'... #safety check, cause you known, dufus'...
if comic['ComicYear'][-1:] == '-': if any([comic['ComicYear'][-1:] == '-', comic['ComicYear'][-1:] == '?']):
comic['ComicYear'] = comic['ComicYear'][:-1] comic['ComicYear'] = comic['ComicYear'][:-1]
try: try:
@ -340,10 +340,10 @@ def GetComicInfo(comicid, dom, safechk=None):
comic['Type'] = 'TPB' comic['Type'] = 'TPB'
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower(): elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
comic['Type'] = 'HC' comic['Type'] = 'HC'
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and 'can be found' not in comic_desc.lower(): elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
i = 0 i = 0
comic['Type'] = 'One-Shot' comic['Type'] = 'One-Shot'
avoidwords = ['preceding', 'after the special'] avoidwords = ['preceding', 'after the special', 'following the']
while i < 2: while i < 2:
if i == 0: if i == 0:
cbd = 'one-shot' cbd = 'one-shot'
@ -374,16 +374,22 @@ def GetComicInfo(comicid, dom, safechk=None):
#if it's point form bullets, ignore it cause it's not the current volume stuff. #if it's point form bullets, ignore it cause it's not the current volume stuff.
test_it = desc_soup.find('ul') test_it = desc_soup.find('ul')
if test_it: if test_it:
for x in test_it.findAll('a'): for x in test_it.findAll('li'):
micdrop.append(x['data-ref-id']) if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]):
mic_check = x.find('a')
micdrop.append(mic_check['data-ref-id'])
for fc in desclinks: for fc in desclinks:
#logger.info('fc: %s' % fc) try:
fc_id = fc['data-ref-id'] fc_id = fc['data-ref-id']
#logger.info('fc_id: %s' % fc_id) except:
continue
if fc_id in micdrop: if fc_id in micdrop:
continue continue
fc_name = fc.findNext(text=True) fc_name = fc.findNext(text=True)
if fc_id.startswith('4000'): if fc_id.startswith('4000'):
fc_cid = None fc_cid = None
fc_isid = fc_id fc_isid = fc_id
@ -394,17 +400,24 @@ def GetComicInfo(comicid, dom, safechk=None):
fc_cid = fc_id fc_cid = fc_id
fc_isid = None fc_isid = None
issuerun = fc.next_sibling issuerun = fc.next_sibling
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ') if issuerun is not None:
if len(lines) > 0: lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
for x in sorted(lines, reverse=True): if len(lines) > 0:
srchline = issuerun.rfind(x) for x in sorted(lines, reverse=True):
if srchline != -1: srchline = issuerun.rfind(x)
try: if srchline != -1:
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ': try:
issuerun = issuerun[:srchline+len(x)] if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
break issuerun = issuerun[:srchline+len(x)]
except: break
continue except Exception as e:
logger.warn('[ERROR] %s' % e)
continue
else:
iss_start = fc_name.find('#')
issuerun = fc_name[iss_start:].strip()
fc_name = fc_name[:iss_start].strip()
if issuerun.endswith('.') or issuerun.endswith(','): if issuerun.endswith('.') or issuerun.endswith(','):
#logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1])) #logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1]))
issuerun = issuerun[:-1] issuerun = issuerun[:-1]
@ -412,7 +425,8 @@ def GetComicInfo(comicid, dom, safechk=None):
issuerun = issuerun[:-4].strip() issuerun = issuerun[:-4].strip()
elif issuerun.endswith(' and'): elif issuerun.endswith(' and'):
issuerun = issuerun[:-3].strip() issuerun = issuerun[:-3].strip()
else:
continue
# except: # except:
# pass # pass
issue_list.append({'series': fc_name, issue_list.append({'series': fc_name,
@ -422,7 +436,10 @@ def GetComicInfo(comicid, dom, safechk=None):
#first_collect = cis #first_collect = cis
logger.info('Collected issues in volume: %s' % issue_list) logger.info('Collected issues in volume: %s' % issue_list)
comic['Issue_List'] = issue_list if len(issue_list) == 0:
comic['Issue_List'] = 'None'
else:
comic['Issue_List'] = issue_list
else: else:
comic['Issue_List'] = 'None' comic['Issue_List'] = 'None'
@ -708,11 +725,12 @@ def GetSeriesYears(dom):
tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1] tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1]
desdeck = 0 desdeck = 0
tempseries['Volume'] = 'None'
#the description field actually holds the Volume# - so let's grab it #the description field actually holds the Volume# - so let's grab it
desc_soup = None
try: try:
descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText
desc_soup = Soup(descchunk, "html.parser")
desclinks = desc_soup.findAll('a')
comic_desc = drophtml(descchunk) comic_desc = drophtml(descchunk)
desdeck +=1 desdeck +=1
except: except:
@ -726,6 +744,139 @@ def GetSeriesYears(dom):
except: except:
comic_deck = 'None' comic_deck = 'None'
#comic['ComicDescription'] = comic_desc
try:
tempseries['Aliases'] = dm.getElementsByTagName('aliases')[0].firstChild.wholeText
tempseries['Aliases'] = re.sub('\n', '##', tempseries['Aliases']).strip()
if tempseries['Aliases'][-2:] == '##':
tempseries['Aliases'] = tempseries['Aliases'][:-2]
#logger.fdebug('Aliases: ' + str(aliases))
except:
tempseries['Aliases'] = 'None'
tempseries['Volume'] = 'None' #noversion'
#figure out if it's a print / digital edition.
tempseries['Type'] = 'None'
if comic_deck != 'None':
if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'one shot' in re.sub('-', '', comic_deck.lower()).strip(), 'hardcover' in comic_deck.lower()]):
if 'print' in comic_deck.lower():
tempseries['Type'] = 'Print'
elif 'digital' in comic_deck.lower():
tempseries['Type'] = 'Digital'
elif 'paperback' in comic_deck.lower():
tempseries['Type'] = 'TPB'
elif 'hardcover' in comic_deck.lower():
tempseries['Type'] = 'HC'
elif 'oneshot' in re.sub('-', '', comic_deck.lower()).strip():
tempseries['Type'] = 'One-Shot'
if comic_desc != 'None' and tempseries['Type'] == 'None':
if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower():
tempseries['Type'] = 'Print'
elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower():
tempseries['Type'] = 'Digital'
elif all(['paperback' in comic_desc[:60].lower(), 'paperback can be found' not in comic_desc.lower()]) or 'collects' in comic_desc[:60].lower():
tempseries['Type'] = 'TPB'
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
tempseries['Type'] = 'HC'
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
i = 0
tempseries['Type'] = 'One-Shot'
avoidwords = ['preceding', 'after the special', 'following the']
while i < 2:
if i == 0:
cbd = 'one-shot'
elif i == 1:
cbd = 'one shot'
tmp1 = comic_desc[:60].lower().find(cbd)
if tmp1 != -1:
for x in avoidwords:
tmp2 = comic_desc[:tmp1].lower().find(x)
if tmp2 != -1:
logger.fdebug('FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print')
tempseries['Type'] = 'Print'
i = 3
break
i+=1
else:
tempseries['Type'] = 'Print'
if all([comic_desc != 'None', 'trade paperback' in comic_desc[:30].lower(), 'collecting' in comic_desc[:40].lower()]):
#ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145.
first_collect = comic_desc.lower().find('collecting')
#logger.info('first_collect: %s' % first_collect)
#logger.info('comic_desc: %s' % comic_desc)
#logger.info('desclinks: %s' % desclinks)
issue_list = []
micdrop = []
if desc_soup is not None:
#if it's point form bullets, ignore it cause it's not the current volume stuff.
test_it = desc_soup.find('ul')
if test_it:
for x in test_it.findAll('li'):
if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]):
mic_check = x.find('a')
micdrop.append(mic_check['data-ref-id'])
for fc in desclinks:
#logger.info('fc: %s' % fc)
fc_id = fc['data-ref-id']
#logger.info('fc_id: %s' % fc_id)
if fc_id in micdrop:
continue
fc_name = fc.findNext(text=True)
if fc_id.startswith('4000'):
fc_cid = None
fc_isid = fc_id
iss_start = fc_name.find('#')
issuerun = fc_name[iss_start:].strip()
fc_name = fc_name[:iss_start].strip()
elif fc_id.startswith('4050'):
fc_cid = fc_id
fc_isid = None
issuerun = fc.next_sibling
if issuerun is not None:
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
if len(lines) > 0:
for x in sorted(lines, reverse=True):
srchline = issuerun.rfind(x)
if srchline != -1:
try:
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
issuerun = issuerun[:srchline+len(x)]
break
except Exception as e:
logger.warn('[ERROR] %s' % e)
continue
else:
iss_start = fc_name.find('#')
issuerun = fc_name[iss_start:].strip()
fc_name = fc_name[:iss_start].strip()
if issuerun.endswith('.') or issuerun.endswith(','):
#logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1]))
issuerun = issuerun[:-1]
if issuerun.endswith(' and '):
issuerun = issuerun[:-4].strip()
elif issuerun.endswith(' and'):
issuerun = issuerun[:-3].strip()
else:
continue
# except:
# pass
issue_list.append({'series': fc_name,
'comicid': fc_cid,
'issueid': fc_isid,
'issues': issuerun})
#first_collect = cis
logger.info('Collected issues in volume: %s' % issue_list)
tempseries['Issue_List'] = issue_list
else:
tempseries['Issue_List'] = 'None'
while (desdeck > 0): while (desdeck > 0):
if desdeck == 1: if desdeck == 1:
if comic_desc == 'None': if comic_desc == 'None':
@ -750,11 +901,11 @@ def GetSeriesYears(dom):
if i == 0: if i == 0:
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'} basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind) logger.fdebug('volume X format - %s: %s' % (i, vfind))
else: else:
vfind = comicDes[:v_find] # if it's fifth volume format vfind = comicDes[:v_find] # if it's fifth volume format
basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'} basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
logger.fdebug('X volume format - ' + str(i) + ': ' + vfind) logger.fdebug('X volume format - %s: %s' % (i, vfind))
volconv = '' volconv = ''
for nums in basenums: for nums in basenums:
if nums in vfind.lower(): if nums in vfind.lower():
@ -763,6 +914,7 @@ def GetSeriesYears(dom):
break break
#logger.info('volconv: ' + str(volconv)) #logger.info('volconv: ' + str(volconv))
#now we attempt to find the character position after the word 'volume'
if i == 0: if i == 0:
volthis = vfind.lower().find('volume') volthis = vfind.lower().find('volume')
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
@ -780,7 +932,7 @@ def GetSeriesYears(dom):
ledigit = re.sub("[^0-9]", "", vf[0]) ledigit = re.sub("[^0-9]", "", vf[0])
if ledigit != '': if ledigit != '':
tempseries['Volume'] = ledigit tempseries['Volume'] = ledigit
logger.fdebug("Volume information found! Adding to series record : volume " + tempseries['Volume']) logger.fdebug("Volume information found! Adding to series record : volume %s" % tempseries['Volume'])
break break
except: except:
pass pass
@ -790,7 +942,7 @@ def GetSeriesYears(dom):
i += 1 i += 1
if tempseries['Volume'] == 'None': if tempseries['Volume'] == 'None':
logger.fdebug('tempseries[Volume]:' + str(tempseries['Volume'])) logger.fdebug('tempseries[Volume]: %s' % tempseries['Volume'])
desdeck -= 1 desdeck -= 1
else: else:
break break
@ -800,7 +952,9 @@ def GetSeriesYears(dom):
"ComicName": tempseries['Series'], "ComicName": tempseries['Series'],
"SeriesYear": tempseries['SeriesYear'], "SeriesYear": tempseries['SeriesYear'],
"Publisher": tempseries['Publisher'], "Publisher": tempseries['Publisher'],
"Volume": tempseries['Volume']}) "Volume": tempseries['Volume'],
"Aliases": tempseries['Aliases'],
"Type": tempseries['Type']})
return serieslist return serieslist

View File

@ -26,8 +26,7 @@ import time
import Queue import Queue
import mylar import mylar
import logger
from mylar import logger
db_lock = threading.Lock() db_lock = threading.Lock()
mylarQueue = Queue.Queue() mylarQueue = Queue.Queue()

View File

@ -50,7 +50,8 @@ class FileChecker(object):
self.watchcomic = re.sub('\?', '', watchcomic).strip() #strip the ? sepearte since it affects the regex. self.watchcomic = re.sub('\?', '', watchcomic).strip() #strip the ? sepearte since it affects the regex.
self.watchcomic = re.sub(u'\u2014', ' - ', watchcomic).strip() #replace the \u2014 with a normal - because this world is f'd up enough to have something like that. self.watchcomic = re.sub(u'\u2014', ' - ', watchcomic).strip() #replace the \u2014 with a normal - because this world is f'd up enough to have something like that.
self.watchcomic = re.sub(u'\u2013', ' - ', watchcomic).strip() #replace the \u2013 with a normal - because again, people are dumb. self.watchcomic = re.sub(u'\u2013', ' - ', watchcomic).strip() #replace the \u2013 with a normal - because again, people are dumb.
self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore') if type(self.watchcomic) != str:
self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore')
else: else:
self.watchcomic = None self.watchcomic = None
@ -107,7 +108,6 @@ class FileChecker(object):
self.AS_Alt = AS_Alternates['AS_Alt'] self.AS_Alt = AS_Alternates['AS_Alt']
self.AS_Tuple = AS_Alternates['AS_Tuple'] self.AS_Tuple = AS_Alternates['AS_Tuple']
def listFiles(self): def listFiles(self):
comiclist = [] comiclist = []
watchmatch = {} watchmatch = {}
@ -122,6 +122,7 @@ class FileChecker(object):
'comiclocation': runresults['comiclocation'], 'comiclocation': runresults['comiclocation'],
'series_name': runresults['series_name'], 'series_name': runresults['series_name'],
'series_name_decoded': runresults['series_name_decoded'], 'series_name_decoded': runresults['series_name_decoded'],
'issueid': runresults['issueid'],
'dynamic_name': runresults['dynamic_name'], 'dynamic_name': runresults['dynamic_name'],
'series_volume': runresults['series_volume'], 'series_volume': runresults['series_volume'],
'alt_series': runresults['alt_series'], 'alt_series': runresults['alt_series'],
@ -129,7 +130,8 @@ class FileChecker(object):
'issue_year': runresults['issue_year'], 'issue_year': runresults['issue_year'],
'issue_number': runresults['issue_number'], 'issue_number': runresults['issue_number'],
'scangroup': runresults['scangroup'], 'scangroup': runresults['scangroup'],
'reading_order': runresults['reading_order'] 'reading_order': runresults['reading_order'],
'booktype': runresults['booktype']
} }
else: else:
filelist = self.traverse_directories(self.dir) filelist = self.traverse_directories(self.dir)
@ -159,6 +161,7 @@ class FileChecker(object):
'comiclocation': runresults['comiclocation'], 'comiclocation': runresults['comiclocation'],
'series_name': runresults['series_name'], 'series_name': runresults['series_name'],
'series_name_decoded': runresults['series_name_decoded'], 'series_name_decoded': runresults['series_name_decoded'],
'issueid': runresults['issueid'],
'alt_series': runresults['alt_series'], 'alt_series': runresults['alt_series'],
'alt_issue': runresults['alt_issue'], 'alt_issue': runresults['alt_issue'],
'dynamic_name': runresults['dynamic_name'], 'dynamic_name': runresults['dynamic_name'],
@ -166,7 +169,8 @@ class FileChecker(object):
'issue_year': runresults['issue_year'], 'issue_year': runresults['issue_year'],
'issue_number': runresults['issue_number'], 'issue_number': runresults['issue_number'],
'scangroup': runresults['scangroup'], 'scangroup': runresults['scangroup'],
'reading_order': runresults['reading_order'] 'reading_order': runresults['reading_order'],
'booktype': runresults['booktype']
}) })
else: else:
comiclist.append({ comiclist.append({
@ -179,7 +183,9 @@ class FileChecker(object):
'IssueYear': runresults['issue_year'], 'IssueYear': runresults['issue_year'],
'JusttheDigits': runresults['justthedigits'], 'JusttheDigits': runresults['justthedigits'],
'AnnualComicID': runresults['annual_comicid'], 'AnnualComicID': runresults['annual_comicid'],
'scangroup': runresults['scangroup'] 'issueid': runresults['issueid'],
'scangroup': runresults['scangroup'],
'booktype': runresults['booktype']
}) })
comiccnt +=1 comiccnt +=1
else: else:
@ -194,7 +200,9 @@ class FileChecker(object):
'alt_issue': runresults['alt_issue'], 'alt_issue': runresults['alt_issue'],
'issue_year': runresults['issue_year'], 'issue_year': runresults['issue_year'],
'issue_number': runresults['issue_number'], 'issue_number': runresults['issue_number'],
'scangroup': runresults['scangroup'] 'issueid': runresults['issueid'],
'scangroup': runresults['scangroup'],
'booktype': runresults['booktype']
}) })
watchmatch['comiccount'] = comiccnt watchmatch['comiccount'] = comiccnt
@ -226,13 +234,12 @@ class FileChecker(object):
ab = len(path) ab = len(path)
tmppath = subpath[ab:] tmppath = subpath[ab:]
else: else:
tmppath = re.sub(path, '', subpath).strip() tmppath = subpath.replace(path, '').strip()
path_list = os.path.normpath(tmppath) path_list = os.path.normpath(tmppath)
if '/' == path_list[0] or '\\' == path_list[0]: if '/' == path_list[0] or '\\' == path_list[0]:
#need to remove any leading slashes so the os join can properly join the components #need to remove any leading slashes so the os join can properly join the components
path_list = path_list[1:] path_list = path_list[1:]
#path_list = tmppath.split(os.sep)[-1]
logger.fdebug('[SUB-PATH] subpath set to : ' + path_list) logger.fdebug('[SUB-PATH] subpath set to : ' + path_list)
@ -283,6 +290,16 @@ class FileChecker(object):
modfilename = modfilename.replace('()','').strip() modfilename = modfilename.replace('()','').strip()
issueid = None
x = modfilename.find('[__')
if x != -1:
y = modfilename.find('__]', x)
if y != -1:
issueid = modfilename[x+3:y]
logger.fdebug('issueid: %s' % issueid)
modfilename = '%s %s'.strip() % (modfilename[:x], modfilename[y+3:])
logger.fdebug('issueid %s removed successsfully: %s' % (issueid, modfilename))
#here we take a snapshot of the current modfilename, the intent is that we will remove characters that match #here we take a snapshot of the current modfilename, the intent is that we will remove characters that match
#as we discover them - namely volume, issue #, years, etc #as we discover them - namely volume, issue #, years, etc
#the remaining strings should be the series title and/or issue title if present (has to be detected properly) #the remaining strings should be the series title and/or issue title if present (has to be detected properly)
@ -390,7 +407,7 @@ class FileChecker(object):
lastmod_position = 0 lastmod_position = 0
booktype = 'issue' booktype = 'issue'
#exceptions that are considered alpha-numeric issue numbers #exceptions that are considered alpha-numeric issue numbers
exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU') exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU', 'SUMMER', 'SPRING', 'FALL', 'WINTER')
#unicode characters, followed by int value #unicode characters, followed by int value
# num_exceptions = [{iss:u'\xbd',val:.5},{iss:u'\xbc',val:.25}, {iss:u'\xe',val:.75}, {iss:u'\221e',val:'infinity'}] # num_exceptions = [{iss:u'\xbd',val:.5},{iss:u'\xbc',val:.25}, {iss:u'\xe',val:.75}, {iss:u'\221e',val:'infinity'}]
@ -444,7 +461,14 @@ class FileChecker(object):
'position': split_file.index(sf), 'position': split_file.index(sf),
'mod_position': self.char_file_position(modfilename, sf, lastmod_position), 'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
'validcountchk': validcountchk}) 'validcountchk': validcountchk})
else:
test_position = modfilename[self.char_file_position(modfilename, sf,lastmod_position)-1]
if test_position == '#':
possible_issuenumbers.append({'number': sf,
'position': split_file.index(sf),
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
'validcountchk': validcountchk})
if sf == 'XCV': if sf == 'XCV':
# new 2016-09-19 \ attempt to check for XCV which replaces any unicode above # new 2016-09-19 \ attempt to check for XCV which replaces any unicode above
for x in list(wrds): for x in list(wrds):
@ -1052,6 +1076,7 @@ class FileChecker(object):
'comiclocation': self.dir, 'comiclocation': self.dir,
'series_name': series_name, 'series_name': series_name,
'series_name_decoded': series_name_decoded, 'series_name_decoded': series_name_decoded,
'issueid': issueid,
'alt_series': alt_series, 'alt_series': alt_series,
'alt_issue': alt_issue, 'alt_issue': alt_issue,
'dynamic_name': dreplace, 'dynamic_name': dreplace,
@ -1061,6 +1086,7 @@ class FileChecker(object):
'issue_year': issue_year, 'issue_year': issue_year,
'annual_comicid': None, 'annual_comicid': None,
'scangroup': scangroup, 'scangroup': scangroup,
'booktype': booktype,
'reading_order': None} 'reading_order': None}
if self.justparse: if self.justparse:
@ -1071,6 +1097,7 @@ class FileChecker(object):
'comiclocation': self.dir, 'comiclocation': self.dir,
'series_name': series_name, 'series_name': series_name,
'series_name_decoded': series_name_decoded, 'series_name_decoded': series_name_decoded,
'issueid': issueid,
'alt_series': alt_series, 'alt_series': alt_series,
'alt_issue': alt_issue, 'alt_issue': alt_issue,
'dynamic_name': self.dynamic_replace(series_name)['mod_seriesname'], 'dynamic_name': self.dynamic_replace(series_name)['mod_seriesname'],
@ -1078,6 +1105,7 @@ class FileChecker(object):
'issue_year': issue_year, 'issue_year': issue_year,
'issue_number': issue_number, 'issue_number': issue_number,
'scangroup': scangroup, 'scangroup': scangroup,
'booktype': booktype,
'reading_order': reading_order} 'reading_order': reading_order}
series_info = {} series_info = {}
@ -1087,12 +1115,14 @@ class FileChecker(object):
'comiclocation': self.dir, 'comiclocation': self.dir,
'series_name': series_name, 'series_name': series_name,
'series_name_decoded': series_name_decoded, 'series_name_decoded': series_name_decoded,
'issueid': issueid,
'alt_series': alt_series, 'alt_series': alt_series,
'alt_issue': alt_issue, 'alt_issue': alt_issue,
'series_volume': issue_volume, 'series_volume': issue_volume,
'issue_year': issue_year, 'issue_year': issue_year,
'issue_number': issue_number, 'issue_number': issue_number,
'scangroup': scangroup} 'scangroup': scangroup,
'booktype': booktype}
return self.matchIT(series_info) return self.matchIT(series_info)
@ -1252,9 +1282,11 @@ class FileChecker(object):
'alt_series': series_info['alt_series'], 'alt_series': series_info['alt_series'],
'alt_issue': series_info['alt_issue'], 'alt_issue': series_info['alt_issue'],
'issue_year': series_info['issue_year'], 'issue_year': series_info['issue_year'],
'issueid': series_info['issueid'],
'justthedigits': justthedigits, 'justthedigits': justthedigits,
'annual_comicid': annual_comicid, 'annual_comicid': annual_comicid,
'scangroup': series_info['scangroup']} 'scangroup': series_info['scangroup'],
'booktype': series_info['booktype']}
else: else:
#logger.fdebug('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']') #logger.fdebug('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']')
@ -1263,12 +1295,14 @@ class FileChecker(object):
'sub': series_info['sub'], 'sub': series_info['sub'],
'comiclocation': series_info['comiclocation'], 'comiclocation': series_info['comiclocation'],
'series_name': series_info['series_name'], 'series_name': series_info['series_name'],
'alt_series': series_info['alt_series'], 'alt_series': series_info['alt_series'],
'alt_issue': series_info['alt_issue'], 'alt_issue': series_info['alt_issue'],
'issue_number': series_info['issue_number'], 'issue_number': series_info['issue_number'],
'series_volume': series_info['series_volume'], 'series_volume': series_info['series_volume'],
'issue_year': series_info['issue_year'], 'issue_year': series_info['issue_year'],
'scangroup': series_info['scangroup']} 'issueid': series_info['issueid'],
'scangroup': series_info['scangroup'],
'booktype': series_info['booktype']}
def char_file_position(self, file, findchar, lastpos): def char_file_position(self, file, findchar, lastpos):

View File

@ -10,7 +10,7 @@ import mylar
import unicodedata import unicodedata
import urllib import urllib
def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix): def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix, booktype=None):
cName = searchName cName = searchName
#clean up searchName due to webparse/redudant naming that would return too specific of results. #clean up searchName due to webparse/redudant naming that would return too specific of results.
@ -39,7 +39,12 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
encodeSearch = urllib.quote_plus(searchName) encodeSearch = urllib.quote_plus(searchName)
splitSearch = encodeSearch.split(" ") splitSearch = encodeSearch.split(" ")
if len(searchIssue) == 1: tmpsearchIssue = searchIssue
if any([booktype == 'One-Shot', booktype == 'TPB']):
tmpsearchIssue = '1'
loop = 4
elif len(searchIssue) == 1:
loop = 3 loop = 3
elif len(searchIssue) == 2: elif len(searchIssue) == 2:
loop = 2 loop = 2
@ -71,17 +76,24 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
i = 1 i = 1
while (i <= loop): while (i <= loop):
if i == 1: if i == 1:
searchmethod = searchIssue searchmethod = tmpsearchIssue
elif i == 2: elif i == 2:
searchmethod = '0' + searchIssue searchmethod = '0' + tmpsearchIssue
elif i == 3: elif i == 3:
searchmethod = '00' + searchIssue searchmethod = '00' + tmpsearchIssue
elif i == 4:
searchmethod = tmpsearchIssue
else: else:
break break
joinSearch = "+".join(splitSearch) + "+" +searchmethod if i == 4:
logger.fdebug('Now searching experimental for %s to try and ensure all the bases are covered' % cName)
joinSearch = "+".join(splitSearch)
else:
logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod)
joinSearch = "+".join(splitSearch) + "+" +searchmethod
logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod)
if mylar.CONFIG.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr" if mylar.CONFIG.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr"
elif mylar.CONFIG.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz" elif mylar.CONFIG.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz"

354
mylar/getcomics.py Normal file
View File

@ -0,0 +1,354 @@
# -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
import urllib
from threading import Thread
import os
import sys
import re
import gzip
import time
import datetime
import json
from bs4 import BeautifulSoup
import requests
import cfscrape
import zipfile
import logger
import mylar
from mylar import db
class GC(object):
def __init__(self, query=None, issueid=None, comicid=None):
self.valreturn = []
self.url = 'https://getcomics.info'
self.query = query
self.comicid = comicid
self.issueid = issueid
self.local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, "getcomics.html")
self.headers = {'Accept-encoding': 'gzip', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Referer': 'https://getcomics.info/'}
def search(self):
with cfscrape.create_scraper() as s:
cf_cookievalue, cf_user_agent = s.get_tokens(self.url, headers=self.headers)
t = s.get(self.url+'/', params={'s': self.query}, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True)
with open(self.local_filename, 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return self.search_results()
def loadsite(self, id, link):
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
with cfscrape.create_scraper() as s:
self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers)
t = s.get(link, verify=True, cookies=self.cf_cookievalue, headers=self.headers, stream=True)
with open(title+'.html', 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def search_results(self):
results = {}
resultlist = []
soup = BeautifulSoup(open(self.local_filename), 'html.parser')
resultline = soup.find("span", {"class": "cover-article-count"}).get_text(strip=True)
logger.info('There are %s results' % re.sub('Articles', '', resultline).strip())
for f in soup.findAll("article"):
id = f['id']
lk = f.find('a')
link = lk['href']
titlefind = f.find("h1", {"class": "post-title"})
title = titlefind.get_text(strip=True)
title = re.sub(u'\u2013', '-', title).strip()
filename = title
issues = None
pack = False
#see if it's a pack type
issfind_st = title.find('#')
issfind_en = title.find('-', issfind_st)
if issfind_en != -1:
if all([title[issfind_en+1] == ' ', title[issfind_en+2].isdigit()]):
iss_en = title.find(' ', issfind_en+2)
if iss_en != -1:
issues = title[issfind_st+1:iss_en]
pack = True
if title[issfind_en+1].isdigit():
iss_en = title.find(' ', issfind_en+1)
if iss_en != -1:
issues = title[issfind_st+1:iss_en]
pack = True
# if it's a pack - remove the issue-range and the possible issue years (cause it most likely will span) and pass thru as separate items
if pack is True:
title = re.sub(issues, '', title).strip()
if title.endswith('#'):
title = title[:-1].strip()
option_find = f.find("p", {"style": "text-align: center;"})
i = 0
while i <= 2:
option_find = option_find.findNext(text=True)
if 'Year' in option_find:
year = option_find.findNext(text=True)
year = re.sub('\|', '', year).strip()
if pack is True and '-' in year:
title = re.sub('\('+year+'\)', '', title).strip()
else:
size = option_find.findNext(text=True)
if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]):
if 'MB' in size:
size = re.sub('MB', 'M', size).strip()
elif 'GB' in size:
size = re.sub('GB', 'G', size).strip()
if '//' in size:
nwsize = size.find('//')
size = re.sub('\[', '', size[:nwsize]).strip()
else:
size = '0 M'
i+=1
dateline = f.find('time')
datefull = dateline['datetime']
datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d"))
resultlist.append({"title": title,
"pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'),
"filename": filename,
"size": re.sub(' ', '', size).strip(),
"pack": pack,
"issues": issues,
"link": link,
"year": year,
"id": re.sub('post-', '', id).strip(),
"site": 'DDL'})
logger.fdebug('%s [%s]' % (title, size))
results['entries'] = resultlist
return results
def parse_downloadresults(self, id, mainlink):
myDB = db.DBConnection()
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
soup = BeautifulSoup(open(title+'.html'), 'html.parser')
orig_find = soup.find("p", {"style": "text-align: center;"})
i = 0
option_find = orig_find
while True: #i <= 10:
prev_option = option_find
option_find = option_find.findNext(text=True)
if i == 0:
series = option_find
elif 'Year' in option_find:
year = option_find.findNext(text=True)
year = re.sub('\|', '', year).strip()
else:
if 'Size' in prev_option:
size = option_find #.findNext(text=True)
possible_more = orig_find.next_sibling
break
i+=1
logger.fdebug('Now downloading: %s [%s] / %s ... this can take a while (go get some take-out)...' % (series, year, size))
link = None
for f in soup.findAll("div", {"class": "aio-pulse"}):
lk = f.find('a')
if lk['title'] == 'Download Now':
link = lk['href']
site = lk['title']
break #get the first link just to test
if link is None:
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
return
links = []
if possible_more.name == 'ul':
bb = possible_more.findAll('li')
for x in bb:
volume = x.findNext(text=True)
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
linkline = x.find('a')
link = linkline['href']
site = linkline.findNext(text=True)
links.append({"volume": volume,
"site": site,
"link": link})
else:
check_extras = soup.findAll("h3")
for sb in check_extras:
header = sb.findNext(text=True)
if header == 'TPBs':
nxt = sb.next_sibling
if nxt.name == 'ul':
bb = nxt.findAll('li')
for x in bb:
volume = x.findNext(text=True)
if u'\u2013' in volume:
volume = re.sub(u'\u2013', '-', volume)
linkline = x.find('a')
link = linkline['href']
site = linkline.findNext(text=True)
links.append({"volume": volume,
"site": site,
"link": link})
if link is None:
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
return {'success': False}
for x in links:
logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link']))
ctrlval = {'id': id}
vals = {'series': series,
'year': year,
'size': size,
'issueid': self.issueid,
'comicid': self.comicid,
'link': link,
'status': 'Queued'}
myDB.upsert('ddl_info', vals, ctrlval)
mylar.DDL_QUEUE.put({'link': link,
'mainlink': mainlink,
'series': series,
'year': year,
'size': size,
'comicid': self.comicid,
'issueid': self.issueid,
'id': id})
return {'success': True}
def downloadit(self, id, link, mainlink):
if mylar.DDL_LOCK is True:
logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.')
return
else:
mylar.DDL_LOCK = True
myDB = db.DBConnection()
filename = None
try:
with cfscrape.create_scraper() as s:
cf_cookievalue, cf_user_agent = s.get_tokens(mainlink, headers=self.headers)
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True)
filename = os.path.basename(urllib.unquote(t.url).decode('utf-8'))
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
#write the filename to the db for tracking purposes...
myDB.upsert('ddl_info', {'filename': filename}, {'id': id})
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
buf = StringIO(t.content)
f = gzip.GzipFile(fileobj=buf)
with open(path, 'wb') as f:
for chunk in t.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
except Exception as e:
logger.error('[ERROR] %s' % e)
mylar.DDL_LOCK = False
return ({"success": False,
"filename": filename,
"path": None})
else:
mylar.DDL_LOCK = False
if os.path.isfile(path):
if path.endswith('.zip'):
new_path = os.path.join(mylar.CONFIG.DDL_LOCATION, re.sub('.zip', '', filename).strip())
logger.info('Zip file detected. Unzipping into new modified path location: %s' % new_path)
try:
zip_f = zipfile.ZipFile(path, 'r')
zip_f.extractall(new_path)
zip_f.close()
except Exception as e:
logger.warn('[ERROR: %s] Unable to extract zip file: %s' % (e, new_path))
return ({"success": False,
"filename": filename,
"path": None})
else:
try:
os.remove(path)
except Exception as e:
logger.warn('[ERROR: %s] Unable to remove zip file from %s after extraction.' % (e, path))
filename = None
else:
new_path = path
return ({"success": True,
"filename": filename,
"path": new_path})
def issue_list(self, pack):
#packlist = [x.strip() for x in pack.split(',)]
packlist = pack.replace('+', ' ').replace(',', ' ').split()
print packlist
plist = []
pack_issues = []
for pl in packlist:
if '-' in pl:
plist.append(range(int(pl[:pl.find('-')]),int(pl[pl.find('-')+1:])+1))
else:
if 'TPBs' not in pl:
plist.append(int(pl))
else:
plist.append('TPBs')
for pi in plist:
if type(pi) == list:
for x in pi:
pack_issues.append(x)
else:
pack_issues.append(pi)
pack_issues.sort()
print "pack_issues: %s" % pack_issues
#if __name__ == '__main__':
# ab = GC(sys.argv[1]) #'justice league aquaman') #sys.argv[0])
# #c = ab.search()
# b = ab.loadsite('test', sys.argv[2])
# c = ab.parse_downloadresults('test', '60MB')
# #c = ab.issue_list(sys.argv[2])

View File

@ -21,6 +21,7 @@ from datetime import timedelta, date
import subprocess import subprocess
import requests import requests
import shlex import shlex
import Queue
import json import json
import re import re
import sys import sys
@ -37,7 +38,7 @@ from apscheduler.triggers.interval import IntervalTrigger
import mylar import mylar
import logger import logger
from mylar import sabnzbd, nzbget, process from mylar import db, sabnzbd, nzbget, process, getcomics
def multikeysort(items, columns): def multikeysort(items, columns):
@ -183,12 +184,15 @@ def human2bytes(s):
num = re.sub(',', '', s[:-1]) num = re.sub(',', '', s[:-1])
#assert num.isdigit() and letter in symbols #assert num.isdigit() and letter in symbols
#use below assert statement to handle sizes with decimal places #use below assert statement to handle sizes with decimal places
assert float(num) and letter in symbols if num != '0':
num = float(num) assert float(num) and letter in symbols
prefix = {symbols[0]: 1} num = float(num)
for i, s in enumerate(symbols[1:]): prefix = {symbols[0]: 1}
prefix[s] = 1 << (i +1) *10 for i, s in enumerate(symbols[1:]):
return int(num * prefix[letter]) prefix[s] = 1 << (i +1) *10
return int(num * prefix[letter])
else:
return 0
def replace_all(text, dic): def replace_all(text, dic):
for i, j in dic.iteritems(): for i, j in dic.iteritems():
@ -263,7 +267,7 @@ def decimal_issue(iss):
return deciss, dec_except return deciss, dec_except
def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False): def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
comicid = str(comicid) # it's coming in unicoded... comicid = str(comicid) # it's coming in unicoded...
@ -715,7 +719,7 @@ def ComicSort(comicorder=None, sequence=None, imported=None):
if sequence: if sequence:
# if it's on startup, load the sql into a tuple for use to avoid record-locking # if it's on startup, load the sql into a tuple for use to avoid record-locking
i = 0 i = 0
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE") comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
comicorderlist = [] comicorderlist = []
@ -800,7 +804,7 @@ def updateComicLocation():
# - set NEWCOMDIR = new ComicLocation # - set NEWCOMDIR = new ComicLocation
#after running, set ComicLocation to new location in Configuration GUI #after running, set ComicLocation to new location in Configuration GUI
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if mylar.CONFIG.NEWCOM_DIR is not None: if mylar.CONFIG.NEWCOM_DIR is not None:
logger.info('Performing a one-time mass update to Comic Location') logger.info('Performing a one-time mass update to Comic Location')
@ -932,7 +936,7 @@ def cleanhtml(raw_html):
def issuedigits(issnum): def issuedigits(issnum):
import db #import db
int_issnum = None int_issnum = None
@ -1092,8 +1096,16 @@ def issuedigits(issnum):
a+=1 a+=1
int_issnum = (int(issno) * 1000) + ordtot int_issnum = (int(issno) * 1000) + ordtot
elif invchk == "true": elif invchk == "true":
logger.fdebug('this does not have an issue # that I can parse properly.') if any([issnum.lower() == 'fall', issnum.lower() == 'spring', issnum.lower() == 'summer', issnum.lower() == 'winter']):
return 999999999999999 inu = 0
ordtot = 0
while (inu < len(issnum)):
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
inu+=1
int_issnum = ordtot
else:
logger.fdebug('this does not have an issue # that I can parse properly.')
return 999999999999999
else: else:
if issnum == '9-5': if issnum == '9-5':
issnum = u'9\xbd' issnum = u'9\xbd'
@ -1118,7 +1130,7 @@ def issuedigits(issnum):
def checkthepub(ComicID): def checkthepub(ComicID):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
publishers = ['marvel', 'dc', 'darkhorse'] publishers = ['marvel', 'dc', 'darkhorse']
pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone() pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
@ -1135,7 +1147,7 @@ def checkthepub(ComicID):
return mylar.CONFIG.INDIE_PUB return mylar.CONFIG.INDIE_PUB
def annual_update(): def annual_update():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
annuallist = myDB.select('SELECT * FROM annuals') annuallist = myDB.select('SELECT * FROM annuals')
if annuallist is None: if annuallist is None:
@ -1191,7 +1203,7 @@ def renamefile_readingorder(readorder):
return readord return readord
def latestdate_fix(): def latestdate_fix():
import db #import db
datefix = [] datefix = []
cnupdate = [] cnupdate = []
myDB = db.DBConnection() myDB = db.DBConnection()
@ -1243,7 +1255,7 @@ def latestdate_fix():
return return
def upgrade_dynamic(): def upgrade_dynamic():
import db #import db
dynamic_comiclist = [] dynamic_comiclist = []
myDB = db.DBConnection() myDB = db.DBConnection()
#update the comicdb to include the Dynamic Names (and any futher changes as required) #update the comicdb to include the Dynamic Names (and any futher changes as required)
@ -1282,7 +1294,6 @@ def upgrade_dynamic():
def checkFolder(folderpath=None): def checkFolder(folderpath=None):
from mylar import PostProcessor from mylar import PostProcessor
import Queue
queue = Queue.Queue() queue = Queue.Queue()
#monitor a selected folder for 'snatched' files that haven't been processed #monitor a selected folder for 'snatched' files that haven't been processed
@ -1328,7 +1339,7 @@ def LoadAlternateSearchNames(seriesname_alt, comicid):
return Alternate_Names return Alternate_Names
def havetotals(refreshit=None): def havetotals(refreshit=None):
import db #import db
comics = [] comics = []
myDB = db.DBConnection() myDB = db.DBConnection()
@ -1816,7 +1827,7 @@ def IssueDetails(filelocation, IssueID=None, justinfo=False):
return issuedetails return issuedetails
def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None): def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if IssueID: if IssueID:
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone() issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
@ -1848,7 +1859,7 @@ def int_num(s):
return float(s) return float(s)
def listPull(weeknumber, year): def listPull(weeknumber, year):
import db #import db
library = {} library = {}
myDB = db.DBConnection() myDB = db.DBConnection()
# Get individual comics # Get individual comics
@ -1858,7 +1869,7 @@ def listPull(weeknumber, year):
return library return library
def listLibrary(comicid=None): def listLibrary(comicid=None):
import db #import db
library = {} library = {}
myDB = db.DBConnection() myDB = db.DBConnection()
if comicid is None: if comicid is None:
@ -1885,7 +1896,7 @@ def listLibrary(comicid=None):
return library return library
def listStoryArcs(): def listStoryArcs():
import db #import db
library = {} library = {}
myDB = db.DBConnection() myDB = db.DBConnection()
# Get Distinct Arc IDs # Get Distinct Arc IDs
@ -1899,7 +1910,7 @@ def listStoryArcs():
return library return library
def listoneoffs(weeknumber, year): def listoneoffs(weeknumber, year):
import db #import db
library = [] library = []
myDB = db.DBConnection() myDB = db.DBConnection()
# Get Distinct one-off issues from the pullist that have already been downloaded / snatched # Get Distinct one-off issues from the pullist that have already been downloaded / snatched
@ -1915,7 +1926,7 @@ def listoneoffs(weeknumber, year):
return library return library
def manualArc(issueid, reading_order, storyarcid): def manualArc(issueid, reading_order, storyarcid):
import db #import db
if issueid.startswith('4000-'): if issueid.startswith('4000-'):
issueid = issueid[5:] issueid = issueid[5:]
@ -2051,7 +2062,7 @@ def manualArc(issueid, reading_order, storyarcid):
return return
def listIssues(weeknumber, year): def listIssues(weeknumber, year):
import db #import db
library = [] library = []
myDB = db.DBConnection() myDB = db.DBConnection()
# Get individual issues # Get individual issues
@ -2096,7 +2107,7 @@ def listIssues(weeknumber, year):
return library return library
def incr_snatched(ComicID): def incr_snatched(ComicID):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone() incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone()
logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1)) logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1))
@ -2112,7 +2123,7 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None, r
#storyarcid = the storyarcid of the issue that's being checked for duplication. #storyarcid = the storyarcid of the issue that's being checked for duplication.
#rtnval = the return value of a previous duplicate_filecheck that's re-running against new values #rtnval = the return value of a previous duplicate_filecheck that's re-running against new values
# #
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
logger.info('[DUPECHECK] Duplicate check for ' + filename) logger.info('[DUPECHECK] Duplicate check for ' + filename)
@ -2390,7 +2401,7 @@ def humanize_time(amount, units = 'seconds'):
return buf return buf
def issue_status(IssueID): def issue_status(IssueID):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
IssueID = str(IssueID) IssueID = str(IssueID)
@ -2424,7 +2435,7 @@ def crc(filename):
return hashlib.md5(filename).hexdigest() return hashlib.md5(filename).hexdigest()
def issue_find_ids(ComicName, ComicID, pack, IssueNumber): def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
@ -2551,7 +2562,7 @@ def cleanHost(host, protocol = True, ssl = False, username = None, password = No
return host return host
def checkthe_id(comicid=None, up_vals=None): def checkthe_id(comicid=None, up_vals=None):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if not up_vals: if not up_vals:
chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone() chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone()
@ -2582,7 +2593,7 @@ def checkthe_id(comicid=None, up_vals=None):
myDB.upsert("ref32p", newVal, ctrlVal) myDB.upsert("ref32p", newVal, ctrlVal)
def updatearc_locs(storyarcid, issues): def updatearc_locs(storyarcid, issues):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
issuelist = [] issuelist = []
for x in issues: for x in issues:
@ -2672,7 +2683,7 @@ def updatearc_locs(storyarcid, issues):
def spantheyears(storyarcid): def spantheyears(storyarcid):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid]) totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid])
@ -2736,7 +2747,7 @@ def arcformat(arc, spanyears, publisher):
return dstloc return dstloc
def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False): def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
import db #import db
from base64 import b16encode, b32decode from base64 import b16encode, b32decode
#check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent. #check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent.
@ -2998,7 +3009,7 @@ def weekly_info(week=None, year=None, current=None):
return weekinfo return weekinfo
def latestdate_update(): def latestdate_update():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID') ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID')
if ccheck is None or len(ccheck) == 0: if ccheck is None or len(ccheck) == 0:
@ -3019,6 +3030,57 @@ def latestdate_update():
logger.info('updating latest date for : ' + a['ComicID'] + ' to ' + a['LatestDate'] + ' #' + a['LatestIssue']) logger.info('updating latest date for : ' + a['ComicID'] + ' to ' + a['LatestDate'] + ' #' + a['LatestIssue'])
myDB.upsert("comics", newVal, ctrlVal) myDB.upsert("comics", newVal, ctrlVal)
def ddl_downloader(queue):
myDB = db.DBConnection()
while True:
if mylar.DDL_LOCK is True:
time.sleep(5)
elif mylar.DDL_LOCK is False and queue.qsize() >= 1:
item = queue.get(True)
logger.info('Now loading request from DDL queue: %s' % item['series'])
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
#write this to the table so we have a record of what's going on.
ctrlval = {'id': item['id']}
val = {'status': 'Downloading'}
myDB.upsert('ddl_info', val, ctrlval)
ddz = getcomics.GC()
ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink'])
nval = {'status': 'Completed'}
myDB.upsert('ddl_info', nval, ctrlval)
if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]):
try:
if ddzstat['filename'] is None:
logger.info('%s successfully downloaded - now initiating post-processing.' % (os.path.basename(ddzstat['path'])))
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
'nzb_folder': ddzstat['path'],
'failed': False,
'issueid': None,
'comicid': item['comicid'],
'apicall': True,
'ddl': True})
else:
logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename']))
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
'nzb_folder': ddzstat['path'],
'failed': False,
'issueid': item['issueid'],
'comicid': item['comicid'],
'apicall': True,
'ddl': True})
except Exception as e:
logger.info('process error: %s [%s]' %(e, ddzstat))
elif mylar.CONFIG.POST_PROCESSING is True:
logger.info('File successfully downloaded. Post Processing is not enabled - item retained here: %s' % os.path.join(ddzstat['path'],ddzstat['filename']))
else:
logger.info('[Status: %s] Failed to download: %s ' % (ddzstat['success'], ddzstat))
def postprocess_main(queue): def postprocess_main(queue):
while True: while True:
if mylar.APILOCK is True: if mylar.APILOCK is True:
@ -3032,7 +3094,10 @@ def postprocess_main(queue):
break break
if mylar.APILOCK is False: if mylar.APILOCK is False:
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall']) try:
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'], item['ddl'])
except:
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'])
pp = pprocess.post_process() pp = pprocess.post_process()
time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing
@ -3114,7 +3179,8 @@ def nzb_monitor(queue):
'failed': nzstat['failed'], 'failed': nzstat['failed'],
'issueid': nzstat['issueid'], 'issueid': nzstat['issueid'],
'comicid': nzstat['comicid'], 'comicid': nzstat['comicid'],
'apicall': nzstat['apicall']}) 'apicall': nzstat['apicall'],
'ddl': False})
#cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed']) #cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed'])
#nzpp = cc.post_process() #nzpp = cc.post_process()
except Exception as e: except Exception as e:
@ -3276,7 +3342,7 @@ def date_conversion(originaldate):
def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None): def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None):
jobresults = [] jobresults = []
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
if job is None: if job is None:
@ -3493,7 +3559,7 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
def stupidchk(): def stupidchk():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'") CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'")
ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'") ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'")
@ -3807,7 +3873,7 @@ def publisherImages(publisher):
return comicpublisher return comicpublisher
def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate): def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
watchlist = listLibrary() watchlist = listLibrary()
matchlist = [] matchlist = []
@ -3847,7 +3913,7 @@ def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
def DateAddedFix(): def DateAddedFix():
import db #import db
myDB = db.DBConnection() myDB = db.DBConnection()
DA_A = datetime.datetime.today() DA_A = datetime.datetime.today()
DateAdded = DA_A.strftime('%Y-%m-%d') DateAdded = DA_A.strftime('%Y-%m-%d')
@ -3858,8 +3924,6 @@ def DateAddedFix():
for an in annuals: for an in annuals:
myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]}) myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]})
def file_ops(path,dst,arc=False,one_off=False): def file_ops(path,dst,arc=False,one_off=False):
# # path = source path + filename # # path = source path + filename
# # dst = destination path + filename # # dst = destination path + filename
@ -4004,7 +4068,6 @@ def file_ops(path,dst,arc=False,one_off=False):
else: else:
return False return False
from threading import Thread from threading import Thread
class ThreadWithReturnValue(Thread): class ThreadWithReturnValue(Thread):

View File

@ -240,7 +240,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is False: if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is False:
PRComicImage = os.path.join('cache', str(comicid) + ".jpg") PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage) ComicImage = helpers.replacetheslash(PRComicImage)
if os.path.isfile(os.path.join(comlocation, 'cover.jpg')) is True: if os.path.isfile(PRComicImage) is True:
logger.fdebug('Cover already exists for series. Not redownloading.') logger.fdebug('Cover already exists for series. Not redownloading.')
else: else:
covercheck = helpers.getImage(comicid, comic['ComicImage']) covercheck = helpers.getImage(comicid, comic['ComicImage'])
@ -248,18 +248,15 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
logger.info('Attempting to retrieve alternate comic image for the series.') logger.info('Attempting to retrieve alternate comic image for the series.')
covercheck = helpers.getImage(comicid, comic['ComicImageALT']) covercheck = helpers.getImage(comicid, comic['ComicImageALT'])
PRComicImage = os.path.join('cache', str(comicid) + ".jpg") #if the comic cover local is checked, save a cover.jpg to the series folder.
ComicImage = helpers.replacetheslash(PRComicImage) if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(PRComicImage) is False]):
try:
#if the comic cover local is checked, save a cover.jpg to the series folder. comiclocal = os.path.join(comlocation, 'cover.jpg')
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]): shutil.copyfile(PRComicImage, comiclocal)
try: if mylar.CONFIG.ENFORCE_PERMS:
comiclocal = os.path.join(comlocation, 'cover.jpg') filechecker.setperms(comiclocal)
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal) except IOError as e:
if mylar.CONFIG.ENFORCE_PERMS: logger.error('Unable to save cover (' + str(comiclocal) + ') into series directory (' + str(comlocation) + ') at this time.')
filechecker.setperms(comiclocal)
except IOError as e:
logger.error('Unable to save cover (' + str(comiclocal) + ') into series directory (' + str(comlocation) + ') at this time.')
else: else:
ComicImage = None ComicImage = None
@ -350,8 +347,21 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
if anndata: if anndata:
manualAnnual(annchk=anndata) manualAnnual(annchk=anndata)
if all([mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True, lastissueid != importantdates['LatestIssueID']]): if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True: #, lastissueid != importantdates['LatestIssueID']]):
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage']) if os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg') is True:
cover_modtime = datetime.datetime.utcfromtimestamp(os.path.getmtime(os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg')))
cover_mtime = datetime.datetime.strftime(cover_modtime, '%Y-%m-%d')
if importantdates['LatestStoreDate'] != '0000-00-00':
lsd = re.sub('-', '', importantdates['LatestStoreDate']).strip()
else:
lsd = re.sub('-', '', importantdates['LatestDate']).strip()
if re.sub('-', '', cover_mtime).strip() < lsd:
logger.info('Attempting to retrieve new issue cover for display')
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
else:
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
else:
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
else: else:
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID'])) logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
@ -1070,6 +1080,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :) #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
latestiss = "0" latestiss = "0"
latestdate = "0000-00-00" latestdate = "0000-00-00"
latest_stdate = "0000-00-00"
latestissueid = None latestissueid = None
firstiss = "10000000" firstiss = "10000000"
firstdate = "2099-00-00" firstdate = "2099-00-00"
@ -1195,8 +1206,17 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
a+=1 a+=1
int_issnum = (int(issno) * 1000) + ordtot int_issnum = (int(issno) * 1000) + ordtot
elif invchk == "true": elif invchk == "true":
logger.fdebug('this does not have an issue # that I can parse properly.') if any([issnum.lower() == 'fall 2005', issnum.lower() == 'spring 2005', issnum.lower() == 'summer 2006', issnum.lower() == 'winter 2009']):
return issnum = re.sub('[0-9]+', '', issnum).strip()
inu = 0
ordtot = 0
while (inu < len(issnum)):
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
inu+=1
int_issnum = ordtot
else:
logger.fdebug('this does not have an issue # that I can parse properly.')
return
else: else:
if int_issnum is not None: if int_issnum is not None:
pass pass
@ -1232,8 +1252,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
latestissueid = issid latestissueid = issid
if firstval['Issue_Date'] != '0000-00-00': if firstval['Issue_Date'] != '0000-00-00':
latestdate = str(firstval['Issue_Date']) latestdate = str(firstval['Issue_Date'])
latest_stdate = storedate
else: else:
latestdate = storedate latestdate = storedate
latest_stdate = storedate
if firstval['Issue_Date'] < firstdate and firstval['Issue_Date'] != '0000-00-00': if firstval['Issue_Date'] < firstdate and firstval['Issue_Date'] != '0000-00-00':
firstiss = issnum firstiss = issnum
@ -1281,7 +1303,12 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
styear = str(SeriesYear) styear = str(SeriesYear)
if firstdate is not None: if firstdate is not None:
if SeriesYear != firstdate[:4]: if SeriesYear != firstdate[:4]:
logger.fdebug('Series start date (%s) crosses over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV will say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate)) if firstdate[:4] == '2099':
logger.fdebug('Series start date (%s) differs from First Issue start date as First Issue date is unknown - assuming Series Year as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear))
else:
logger.fdebug('Series start date (%s) cannot be properly determined and/or it might cross over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate))
if firstdate == '2099-00-00':
firstdate = '%s-01-01' % SeriesYear
styear = str(firstdate[:4]) styear = str(firstdate[:4])
if firstdate[5:7] == '00': if firstdate[5:7] == '00':
@ -1311,7 +1338,15 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
if recentchk <= 55: if recentchk <= 55:
lastpubdate = 'Present' lastpubdate = 'Present'
else: else:
lastpubdate = str(ltmonth) + ' ' + str(ltyear) if ltmonth == '?':
if ltyear == '0000':
lastpubdate = '?'
else:
lastpubdate = str(ltyear)
elif ltyear == '0000':
lastpubdate = '?'
else:
lastpubdate = str(ltmonth) + ' ' + str(ltyear)
if stmonth == '?' and ('?' in lastpubdate and '0000' in lastpubdate): if stmonth == '?' and ('?' in lastpubdate and '0000' in lastpubdate):
lastpubdate = 'Present' lastpubdate = 'Present'
@ -1348,6 +1383,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
importantdates['LatestIssue'] = latestiss importantdates['LatestIssue'] = latestiss
importantdates['LatestIssueID'] = latestissueid importantdates['LatestIssueID'] = latestissueid
importantdates['LatestDate'] = latestdate importantdates['LatestDate'] = latestdate
importantdates['LatestStoreDate'] = latest_stdate
importantdates['LastPubDate'] = lastpubdate importantdates['LastPubDate'] = lastpubdate
importantdates['SeriesStatus'] = 'Active' importantdates['SeriesStatus'] = 'Active'
@ -1552,10 +1588,10 @@ def image_it(comicid, latestissueid, comlocation, ComicImage):
ComicImage = helpers.replacetheslash(PRComicImage) ComicImage = helpers.replacetheslash(PRComicImage)
#if the comic cover local is checked, save a cover.jpg to the series folder. #if the comic cover local is checked, save a cover.jpg to the series folder.
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]): if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(PRComicImage)]):
try: try:
comiclocal = os.path.join(comlocation, 'cover.jpg') comiclocal = os.path.join(comlocation, 'cover.jpg')
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal) shutil.copyfile(PRComicImage, comiclocal)
if mylar.CONFIG.ENFORCE_PERMS: if mylar.CONFIG.ENFORCE_PERMS:
filechecker.setperms(comiclocal) filechecker.setperms(comiclocal)
except IOError as e: except IOError as e:

View File

@ -28,7 +28,7 @@ def locg(pulldate=None,weeknumber=None,year=None):
if pulldate is None or pulldate == '00000000': if pulldate is None or pulldate == '00000000':
weeknumber = todaydate.strftime("%U") weeknumber = todaydate.strftime("%U")
elif '-' in pulldate: elif '-' in pulldate:
#find the week number #find the week number
weektmp = datetime.date(*(int(s) for s in pulldate.split('-'))) weektmp = datetime.date(*(int(s) for s in pulldate.split('-')))
weeknumber = weektmp.strftime("%U") weeknumber = weektmp.strftime("%U")
#we need to now make sure we default to the correct week #we need to now make sure we default to the correct week
@ -58,82 +58,90 @@ def locg(pulldate=None,weeknumber=None,year=None):
logger.warn(e) logger.warn(e)
return {'status': 'failure'} return {'status': 'failure'}
if r.status_code == '619': if str(r.status_code) == '619':
logger.warn('[' + str(r.status_code) + '] No date supplied, or an invalid date was provided [' + str(pulldate) + ']') logger.warn('[' + str(r.status_code) + '] No date supplied, or an invalid date was provided [' + str(pulldate) + ']')
return {'status': 'failure'} return {'status': 'failure'}
elif r.status_code == '999' or r.status_code == '111': elif str(r.status_code) == '999' or str(r.status_code) == '111':
logger.warn('[' + str(r.status_code) + '] Unable to retrieve data from site - this is a site.specific issue [' + str(pulldate) + ']') logger.warn('[' + str(r.status_code) + '] Unable to retrieve data from site - this is a site.specific issue [' + str(pulldate) + ']')
return {'status': 'failure'} return {'status': 'failure'}
elif str(r.status_code) == '200':
data = r.json()
data = r.json() logger.info('[WEEKLY-PULL] There are ' + str(len(data)) + ' issues for the week of ' + str(weeknumber) + ', ' + str(year))
pull = []
logger.info('[WEEKLY-PULL] There are ' + str(len(data)) + ' issues for the week of ' + str(weeknumber) + ', ' + str(year)) for x in data:
pull = [] pull.append({'series': x['series'],
'alias': x['alias'],
'issue': x['issue'],
'publisher': x['publisher'],
'shipdate': x['shipdate'],
'coverdate': x['coverdate'],
'comicid': x['comicid'],
'issueid': x['issueid'],
'weeknumber': x['weeknumber'],
'annuallink': x['link'],
'year': x['year'],
'volume': x['volume'],
'seriesyear': x['seriesyear'],
'format': x['type']})
shipdate = x['shipdate']
for x in data: myDB = db.DBConnection()
pull.append({'series': x['series'],
'alias': x['alias'],
'issue': x['issue'],
'publisher': x['publisher'],
'shipdate': x['shipdate'],
'coverdate': x['coverdate'],
'comicid': x['comicid'],
'issueid': x['issueid'],
'weeknumber': x['weeknumber'],
'annuallink': x['link'],
'year': x['year'],
'volume': x['volume'],
'seriesyear': x['seriesyear'],
'format': x['type']})
shipdate = x['shipdate']
myDB = db.DBConnection() myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)")
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)") #clear out the upcoming table here so they show the new values properly.
if pulldate == '00000000':
logger.info('Re-creating pullist to ensure everything\'s fresh.')
myDB.action('DELETE FROM weekly WHERE weeknumber=? AND year=?',[int(weeknumber), int(year)])
#clear out the upcoming table here so they show the new values properly. for x in pull:
if pulldate == '00000000': comicid = None
logger.info('Re-creating pullist to ensure everything\'s fresh.') issueid = None
myDB.action('DELETE FROM weekly WHERE weeknumber=? AND year=?',[int(weeknumber), int(year)]) comicname = x['series']
if x['comicid'] is not None:
comicid = x['comicid']
if x['issueid'] is not None:
issueid= x['issueid']
if x['alias'] is not None:
comicname = x['alias']
for x in pull: cl_d = mylar.filechecker.FileChecker()
comicid = None cl_dyninfo = cl_d.dynamic_replace(comicname)
issueid = None dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip()
comicname = x['series']
if x['comicid'] is not None:
comicid = x['comicid']
if x['issueid'] is not None:
issueid= x['issueid']
if x['alias'] is not None:
comicname = x['alias']
cl_d = mylar.filechecker.FileChecker() controlValueDict = {'DYNAMICNAME': dynamic_name,
cl_dyninfo = cl_d.dynamic_replace(comicname) 'ISSUE': re.sub('#', '', x['issue']).strip()}
dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip()
controlValueDict = {'DYNAMICNAME': dynamic_name, newValueDict = {'SHIPDATE': x['shipdate'],
'ISSUE': re.sub('#', '', x['issue']).strip()} 'PUBLISHER': x['publisher'],
'STATUS': 'Skipped',
newValueDict = {'SHIPDATE': x['shipdate'], 'COMIC': comicname,
'PUBLISHER': x['publisher'], 'COMICID': comicid,
'STATUS': 'Skipped', 'ISSUEID': issueid,
'COMIC': comicname, 'WEEKNUMBER': x['weeknumber'],
'COMICID': comicid, 'ANNUALLINK': x['annuallink'],
'ISSUEID': issueid, 'YEAR': x['year'],
'WEEKNUMBER': x['weeknumber'], 'VOLUME': x['volume'],
'ANNUALLINK': x['annuallink'], 'SERIESYEAR': x['seriesyear'],
'YEAR': x['year'], 'FORMAT': x['format']}
'VOLUME': x['volume'], myDB.upsert("weekly", newValueDict, controlValueDict)
'SERIESYEAR': x['seriesyear'],
'FORMAT': x['format']}
myDB.upsert("weekly", newValueDict, controlValueDict)
logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber)) logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber))
#set the last poll date/time here so that we don't start overwriting stuff too much... #set the last poll date/time here so that we don't start overwriting stuff too much...
mylar.CONFIG.PULL_REFRESH = todaydate mylar.CONFIG.PULL_REFRESH = todaydate
return {'status': 'success', return {'status': 'success',
'count': len(data), 'count': len(data),
'weeknumber': weeknumber, 'weeknumber': weeknumber,
'year': year} 'year': year}
else:
if str(r.status_code) == '666':
logger.warn('[%s] The error returned is: %s' % (r.status_code, r.headers))
return {'status': 'update_required'}
else:
logger.warn('[%s] The error returned is: %s' % (r.status_code, r.headers))
return {'status': 'failure'}

View File

@ -228,7 +228,8 @@ class NZBGet(object):
'failed': False, 'failed': False,
'issueid': nzbinfo['issueid'], 'issueid': nzbinfo['issueid'],
'comicid': nzbinfo['comicid'], 'comicid': nzbinfo['comicid'],
'apicall': True} 'apicall': True,
'ddl': False}
else: else:
logger.warn('Could not find completed NZBID %s in history' % nzbid) logger.warn('Could not find completed NZBID %s in history' % nzbid)
return {'status': False} return {'status': False}

View File

@ -21,13 +21,14 @@ import logger
class Process(object): class Process(object):
def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False): def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False, ddl=False):
self.nzb_name = nzb_name self.nzb_name = nzb_name
self.nzb_folder = nzb_folder self.nzb_folder = nzb_folder
self.failed = failed self.failed = failed
self.issueid = issueid self.issueid = issueid
self.comicid = comicid self.comicid = comicid
self.apicall = apicall self.apicall = apicall
self.ddl = ddl
def post_process(self): def post_process(self):
if self.failed == '0': if self.failed == '0':
@ -39,7 +40,7 @@ class Process(object):
retry_outside = False retry_outside = False
if self.failed is False: if self.failed is False:
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall) PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall, ddl=self.ddl)
if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.apicall is True, self.issueid is not None]): if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.apicall is True, self.issueid is not None]):
threading.Thread(target=PostProcess.Process).start() threading.Thread(target=PostProcess.Process).start()
else: else:

View File

@ -920,33 +920,12 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
'authkey': mylar.AUTHKEY_32P, 'authkey': mylar.AUTHKEY_32P,
'id': linkit} 'id': linkit}
headers = None #{'Accept-encoding': 'gzip', dfile = auth32p.info32p()
# 'User-Agent': str(mylar.USER_AGENT)} file_download = dfile.downloadfile(payload, filepath)
#elif site == 'TPSE': if file_download is False:
# pass return "fail"
#linkit should be the magnet link since it's TPSE
#url = linkit
#url = helpers.torrent_create('TPSE', linkit) logger.fdebug('[%s] Saved torrent file to : %s' % (site, filepath))
#if url.startswith('https'):
# tpse_referrer = 'https://torrentproject.se/'
#else:
# tpse_referrer = 'http://torrentproject.se/'
#try:
# scraper = cfscrape.create_scraper()
# cf_cookievalue, cf_user_agent = scraper.get_tokens(url)
# headers = {'Accept-encoding': 'gzip',
# 'User-Agent': cf_user_agent}
#except Exception, e:
# return "fail"
#logger.fdebug('Grabbing torrent from url:' + str(url))
#payload = None
#verify = False
elif site == 'DEM': elif site == 'DEM':
url = helpers.torrent_create('DEM', linkit) url = helpers.torrent_create('DEM', linkit)
@ -991,7 +970,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
payload = None payload = None
verify = False verify = False
if site != 'Public Torrents': if site != 'Public Torrents' and site != '32P':
if not verify: if not verify:
#32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displayed. #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displayed.
#disable SSL warnings - too many 'warning' messages about invalid certificates #disable SSL warnings - too many 'warning' messages about invalid certificates
@ -1008,6 +987,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
except ImportError: except ImportError:
logger.warn('[EPIC FAILURE] Cannot load the requests module') logger.warn('[EPIC FAILURE] Cannot load the requests module')
return "fail" return "fail"
try: try:
scraper = cfscrape.create_scraper() scraper = cfscrape.create_scraper()
if site == 'WWT': if site == 'WWT':
@ -1020,31 +1000,31 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
#r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers) #r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
except Exception, e: except Exception, e:
logger.warn('Error fetching data from %s (%s): %s' % (site, url, e)) logger.warn('Error fetching data from %s (%s): %s' % (site, url, e))
if site == '32P': # if site == '32P':
logger.info('[TOR2CLIENT-32P] Retrying with 32P') # logger.info('[TOR2CLIENT-32P] Retrying with 32P')
if mylar.CONFIG.MODE_32P == 1: # if mylar.CONFIG.MODE_32P == 1:
logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.') # logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.')
feed32p = auth32p.info32p(reauthenticate=True) # feed32p = auth32p.info32p(reauthenticate=True)
feedinfo = feed32p.authenticate() # feedinfo = feed32p.authenticate()
if feedinfo == "disable": # if feedinfo == "disable":
helpers.disable_provider('32P') # helpers.disable_provider('32P')
return "fail" # return "fail"
logger.debug('[TOR2CLIENT-32P] Creating CF Scraper') # logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
scraper = cfscrape.create_scraper() # scraper = cfscrape.create_scraper()
try: # try:
r = scraper.get(url, params=payload, verify=verify, allow_redirects=True) # r = scraper.get(url, params=payload, verify=verify, allow_redirects=True)
except Exception, e: # except Exception, e:
logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e)) # logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e))
return "fail" # return "fail"
else: # else:
logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P') # logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
return "fail" # return "fail"
else: # else:
return "fail" # return "fail"
if any([site == 'DEM', site == 'WWT']) and any([str(r.status_code) == '403', str(r.status_code) == '404', str(r.status_code) == '503']): if any([site == 'DEM', site == 'WWT']) and any([str(r.status_code) == '403', str(r.status_code) == '404', str(r.status_code) == '503']):
if str(r.status_code) != '503': if str(r.status_code) != '503':
@ -1069,15 +1049,6 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
except Exception, e: except Exception, e:
return "fail" return "fail"
if str(r.status_code) != '200':
logger.warn('Unable to download torrent from ' + site + ' [Status Code returned: ' + str(r.status_code) + ']')
if str(r.status_code) == '404' and site == '32P':
logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.')
delete_cache_entry(linkit)
else:
logger.info('content: %s' % r.content)
return "fail"
if any([site == 'DEM', site == 'WWT']): if any([site == 'DEM', site == 'WWT']):
if r.headers.get('Content-Encoding') == 'gzip': if r.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(r.content) buf = StringIO(r.content)
@ -1091,8 +1062,9 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
logger.fdebug('[' + site + '] Saved torrent file to : ' + filepath) logger.fdebug('[' + site + '] Saved torrent file to : ' + filepath)
else: else:
#tpse is magnet links only... if site != '32P':
filepath = linkit #tpse is magnet links only...
filepath = linkit
if mylar.USE_UTORRENT: if mylar.USE_UTORRENT:
uTC = utorrent.utorrentclient() uTC = utorrent.utorrentclient()

View File

@ -131,7 +131,8 @@ class SABnzbd(object):
'failed': False, 'failed': False,
'issueid': nzbinfo['issueid'], 'issueid': nzbinfo['issueid'],
'comicid': nzbinfo['comicid'], 'comicid': nzbinfo['comicid'],
'apicall': True} 'apicall': True,
'ddl': False}
break break
else: else:
logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage']) logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage'])
@ -152,7 +153,8 @@ class SABnzbd(object):
'failed': True, 'failed': True,
'issueid': sendresponse['issueid'], 'issueid': sendresponse['issueid'],
'comicid': sendresponse['comicid'], 'comicid': sendresponse['comicid'],
'apicall': True} 'apicall': True,
'ddl': False}
break break
break break

View File

@ -16,7 +16,7 @@
from __future__ import division from __future__ import division
import mylar import mylar
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker, auth32p, sabnzbd, nzbget, wwt #, getcomics from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker, auth32p, sabnzbd, nzbget, wwt, getcomics
import feedparser import feedparser
import requests import requests
@ -44,7 +44,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
unaltered_ComicName = None unaltered_ComicName = None
if filesafe: if filesafe:
if filesafe != ComicName and mode != 'want_ann': if filesafe != ComicName and mode != 'want_ann':
logger.info('[SEARCH] Special Characters exist within Series Title. Enabling search-safe Name : ' + filesafe) logger.info('[SEARCH] Special Characters exist within Series Title. Enabling search-safe Name : %s' % filesafe)
if AlternateSearch is None or AlternateSearch == 'None': if AlternateSearch is None or AlternateSearch == 'None':
AlternateSearch = filesafe AlternateSearch = filesafe
else: else:
@ -60,7 +60,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
if Publisher: if Publisher:
if Publisher == 'IDW Publishing': if Publisher == 'IDW Publishing':
Publisher = 'IDW' Publisher = 'IDW'
logger.fdebug('Publisher is : ' + Publisher) logger.fdebug('Publisher is : %s' % Publisher)
if IssueArcID and not IssueID: if IssueArcID and not IssueID:
issuetitle = helpers.get_issue_title(IssueArcID) issuetitle = helpers.get_issue_title(IssueArcID)
@ -68,7 +68,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
issuetitle = helpers.get_issue_title(IssueID) issuetitle = helpers.get_issue_title(IssueID)
if issuetitle: if issuetitle:
logger.info('Issue Title given as : ' + issuetitle) logger.fdebug('Issue Title given as : %s' % issuetitle)
else: else:
logger.fdebug('Issue Title not found. Setting to None.') logger.fdebug('Issue Title not found. Setting to None.')
@ -91,8 +91,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
oneoff = True oneoff = True
if SARC: if SARC:
logger.fdebug("Story-ARC Search parameters:") logger.fdebug("Story-ARC Search parameters:")
logger.fdebug("Story-ARC: " + str(SARC)) logger.fdebug("Story-ARC: %s" % SARC)
logger.fdebug("IssueArcID: " + str(IssueArcID)) logger.fdebug("IssueArcID: %s" % IssueArcID)
torprovider = [] torprovider = []
torp = 0 torp = 0
@ -177,14 +177,18 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
prov_order, torznab_info, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts, torznab_hosts, ddlprovider) prov_order, torznab_info, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts, torznab_hosts, ddlprovider)
# end provider order sequencing # end provider order sequencing
logger.info('search provider order is ' + str(prov_order)) logger.fdebug('search provider order is ' + str(prov_order))
#fix for issue dates between Nov-Dec/(Jan-Feb-Mar) #fix for issue dates between Nov-Dec/(Jan-Feb-Mar)
IssDt = str(IssueDate)[5:7] IssDt = str(IssueDate)[5:7]
if IssDt == "12" or IssDt == "11" or IssDt == "01" or IssDt == "02" or IssDt == "03": if any([IssDt == "12", IssDt == "11", IssDt == "01", IssDt == "02", IssDt == "03"]):
IssDateFix = IssDt IssDateFix = IssDt
else: else:
IssDateFix = "no" IssDateFix = "no"
if StoreDate is not None:
StDt = str(StoreDate)[5:7]
if any([StDt == "10", StDt == "12", StDt == "11", StDt == "01", StDt == "02", StDt == "03"]):
IssDateFix = StDt
searchcnt = 0 searchcnt = 0
srchloop = 1 srchloop = 1
@ -326,10 +330,13 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
#sure it's not disabled (it gets auto-disabled on maxing out the API hits) #sure it's not disabled (it gets auto-disabled on maxing out the API hits)
prov_count+=1 prov_count+=1
continue continue
elif all([searchprov == '32P', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]): elif all([searchprov == '32P', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]):
prov_count+=1 prov_count+=1
continue continue
if searchmode == 'rss': if searchmode == 'rss':
if searchprov.lower() == 'ddl':
prov_count+=1
continue
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate, booktype=booktype) findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate, booktype=booktype)
if findit['status'] is False: if findit['status'] is False:
if AlternateSearch is not None and AlternateSearch != "None": if AlternateSearch is not None and AlternateSearch != "None":
@ -351,7 +358,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
else: else:
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate, booktype=booktype) findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate, booktype=booktype)
if all([searchprov == '32P', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]) or all([searchprov == 'experimental', checked_once is False]): if all([searchprov == '32P', checked_once is False]) or all([searchprov.lower() == 'ddl', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]) or all([searchprov == 'experimental', checked_once is False]):
checked_once = True checked_once = True
if findit['status'] is False: if findit['status'] is False:
if AlternateSearch is not None and AlternateSearch != "None": if AlternateSearch is not None and AlternateSearch != "None":
@ -417,7 +424,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
searchprov = mylar.TMP_PROV searchprov = mylar.TMP_PROV
return findit, searchprov return findit, searchprov
else: else:
logger.info('findit: %s' % findit) logger.fdebug('findit: %s' % findit)
#if searchprov == '32P': #if searchprov == '32P':
# pass # pass
if manualsearch is None: if manualsearch is None:
@ -615,9 +622,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if nzbprov == 'ddl': if nzbprov == 'ddl':
cmname = re.sub("%20", " ", str(comsrc)) cmname = re.sub("%20", " ", str(comsrc))
logger.fdebug('Sending request to DDL site for : %s %s' % (findcomic, isssearch)) logger.fdebug('Sending request to DDL site for : %s %s' % (findcomic, isssearch))
#b = getcomics.GC(query=findcomic + ' ' + isssearch) b = getcomics.GC(query='%s %s' % (findcomic, isssearch))
#bb = b.search() bb = b.search()
logger.info('bb returned from DDL: %s' % bb) #logger.info('bb returned from DDL: %s' % bb)
elif RSS == "yes": elif RSS == "yes":
if nzbprov == '32P' or nzbprov == 'Public Torrents': if nzbprov == '32P' or nzbprov == 'Public Torrents':
cmname = re.sub("%20", " ", str(comsrc)) cmname = re.sub("%20", " ", str(comsrc))
@ -644,7 +651,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if ComicName[:17] == '0-Day Comics Pack': if ComicName[:17] == '0-Day Comics Pack':
searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7], 'torrentid_32p': None} searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7], 'torrentid_32p': None}
else: else:
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p} searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p, 'booktype': booktype}
#first we find the id on the serieslist of 32P #first we find the id on the serieslist of 32P
#then we call the ajax against the id and issue# and volume (if exists) #then we call the ajax against the id and issue# and volume (if exists)
a = auth32p.info32p(searchterm=searchterm) a = auth32p.info32p(searchterm=searchterm)
@ -798,7 +805,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
elif nzbprov == 'experimental': elif nzbprov == 'experimental':
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear) #bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
logger.info('sending %s to experimental search' % findcomic) logger.info('sending %s to experimental search' % findcomic)
bb = findcomicfeed.Startit(findcomic, isssearch, comyear, ComicVersion, IssDateFix) bb = findcomicfeed.Startit(findcomic, isssearch, comyear, ComicVersion, IssDateFix, booktype)
# since the regexs in findcomicfeed do the 3 loops, lets force the exit after # since the regexs in findcomicfeed do the 3 loops, lets force the exit after
cmloopit == 1 cmloopit == 1
@ -834,7 +841,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
pack_warning = True pack_warning = True
continue continue
logger.fdebug("checking search result: " + entry['title']) logger.fdebug("checking search result: %s" % entry['title'])
#some nzbsites feel that comics don't deserve a nice regex to strip the crap from the header, the end result is that we're #some nzbsites feel that comics don't deserve a nice regex to strip the crap from the header, the end result is that we're
#dealing with the actual raw header which causes incorrect matches below. #dealing with the actual raw header which causes incorrect matches below.
#this is a temporary cut from the experimental search option (findcomicfeed) as it does this part well usually. #this is a temporary cut from the experimental search option (findcomicfeed) as it does this part well usually.
@ -889,7 +896,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
comsize_b = entry['size'] comsize_b = entry['size']
elif entry['site'] == 'DDL': elif entry['site'] == 'DDL':
comsize_b = helpers.human2bytes(entry['size']) comsize_b = helpers.human2bytes(entry['size'])
except: except Exception as e:
tmpsz = entry.enclosures[0] tmpsz = entry.enclosures[0]
comsize_b = tmpsz['length'] comsize_b = tmpsz['length']
@ -930,20 +937,20 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else: else:
if entry['title'][:17] != '0-Day Comics Pack': if entry['title'][:17] != '0-Day Comics Pack':
comsize_m = helpers.human_size(comsize_b) comsize_m = helpers.human_size(comsize_b)
logger.fdebug("size given as: " + str(comsize_m)) logger.fdebug('size given as: %s' % comsize_m)
#----size constraints. #----size constraints.
#if it's not within size constaints - dump it now and save some time. #if it's not within size constaints - dump it now and save some time.
if mylar.CONFIG.USE_MINSIZE: if mylar.CONFIG.USE_MINSIZE:
conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M") conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M")
logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b)) logger.fdebug('comparing Min threshold %s .. to .. nzb %s' % (conv_minsize, comsize_b))
if int(conv_minsize) > int(comsize_b): if int(conv_minsize) > int(comsize_b):
logger.fdebug("Failure to meet the Minimum size threshold - skipping") logger.fdebug('Failure to meet the Minimum size threshold - skipping')
continue continue
if mylar.CONFIG.USE_MAXSIZE: if mylar.CONFIG.USE_MAXSIZE:
conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M") conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M")
logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b)) logger.fdebug('comparing Max threshold %s .. to .. nzb %s' % (conv_maxsize, comsize_b))
if int(comsize_b) > int(conv_maxsize): if int(comsize_b) > int(conv_maxsize):
logger.fdebug("Failure to meet the Maximium size threshold - skipping") logger.fdebug('Failure to meet the Maximium size threshold - skipping')
continue continue
#---- date constaints. #---- date constaints.
@ -1006,7 +1013,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else: else:
postdate_int = time.mktime(dateconv[:len(dateconv) -1]) postdate_int = time.mktime(dateconv[:len(dateconv) -1])
except: except:
logger.warn('Unable to parse posting date from provider result set for :' + entry['title']) logger.warn('Unable to parse posting date from provider result set for : %s' % entry['title'])
continue continue
if all([digitaldate != '0000-00-00', digitaldate is not None]): if all([digitaldate != '0000-00-00', digitaldate is not None]):
@ -1016,7 +1023,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
i = 1 i = 1
while i <= 1: while i <= 1:
logger.info('i: %s' % i)
if i == 0: if i == 0:
usedate = digitaldate usedate = digitaldate
else: else:
@ -1061,23 +1067,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
#logger.info('dateconv2: %s' % dateconv2.date()) #logger.info('dateconv2: %s' % dateconv2.date())
#logger.info('digconv2: %s' % digconv2.date()) #logger.info('digconv2: %s' % digconv2.date())
if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date(): if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date():
logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate))
elif dateconv2.date() < issconv2.date(): elif dateconv2.date() < issconv2.date():
logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date())) logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date()))
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate))
continue continue
else: else:
logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) logger.fdebug('%s is after store date of %s' % (pubdate, stdate))
except: except:
#if the above fails, drop down to the integer compare method as a failsafe. #if the above fails, drop down to the integer compare method as a failsafe.
if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int: if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int:
logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate)) logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate))
elif postdate_int < issuedate_int: elif postdate_int < issuedate_int:
logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int)) logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int))
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.') logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate))
continue continue
else: else:
logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate)) logger.fdebug('%s is after store date of %s' % (pubdate, stdate))
# -- end size constaints. # -- end size constaints.
if '(digital first)' in ComicTitle.lower(): #entry['title'].lower(): if '(digital first)' in ComicTitle.lower(): #entry['title'].lower():
@ -1088,7 +1094,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
else: else:
thisentry = ComicTitle #entry['title'] thisentry = ComicTitle #entry['title']
logger.fdebug("Entry: " + thisentry) logger.fdebug('Entry: %s' % thisentry)
cleantitle = thisentry cleantitle = thisentry
if 'mixed format' in cleantitle.lower(): if 'mixed format' in cleantitle.lower():
@ -1106,7 +1112,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
parsed_comic = p_comic.listFiles() parsed_comic = p_comic.listFiles()
logger.fdebug('parsed_info: %s' % parsed_comic) logger.fdebug('parsed_info: %s' % parsed_comic)
if parsed_comic['parse_status'] == 'success': if parsed_comic['parse_status'] == 'success' and (all([booktype is None, parsed_comic['booktype'] == 'issue']) or all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or all([booktype == 'One-Shot', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']):
try: try:
fcomic = filechecker.FileChecker(watchcomic=ComicName) fcomic = filechecker.FileChecker(watchcomic=ComicName)
filecomic = fcomic.matchIT(parsed_comic) filecomic = fcomic.matchIT(parsed_comic)
@ -1115,8 +1121,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
continue continue
else: else:
logger.fdebug('match_check: %s' % filecomic) logger.fdebug('match_check: %s' % filecomic)
elif booktype != parsed_comic['booktype']:
logger.fdebug('Booktypes do not match. Looking for %s, this is a %s. Ignoring this result.' % (booktype, parsed_comic['booktype']))
continue
else: else:
logger.fdebug('Unable to parse name properly: %s' % filecomic) logger.fdebug('Unable to parse name properly: %s. Ignoring this result' % filecomic)
continue
#adjust for covers only by removing them entirely... #adjust for covers only by removing them entirely...
vers4year = "no" vers4year = "no"
@ -1172,7 +1182,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
elif ComVersChk == 0: elif ComVersChk == 0:
logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check") logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check")
yearmatch = "true" yearmatch = "true"
elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no": elif any([UseFuzzy == "0", UseFuzzy == "2", UseFuzzy is None, IssDateFix != "no"]) and parsed_comic['issue_year'] is not None:
if parsed_comic['issue_year'][:-2] == '19' or parsed_comic['issue_year'][:-2] == '20': if parsed_comic['issue_year'][:-2] == '19' or parsed_comic['issue_year'][:-2] == '20':
logger.fdebug('year detected: %s' % parsed_comic['issue_year']) logger.fdebug('year detected: %s' % parsed_comic['issue_year'])
result_comyear = parsed_comic['issue_year'] result_comyear = parsed_comic['issue_year']
@ -1267,6 +1277,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
downloadit = False downloadit = False
#-------------------------------------fix this! #-------------------------------------fix this!
try:
pack_test = entry['pack']
except Exception as e:
pack_test = False
if nzbprov == 'Public Torrents' and any([entry['site'] == 'WWT', entry['site'] == 'DEM']): if nzbprov == 'Public Torrents' and any([entry['site'] == 'WWT', entry['site'] == 'DEM']):
if entry['site'] == 'WWT': if entry['site'] == 'WWT':
nzbprov = 'WWT' nzbprov = 'WWT'
@ -1275,7 +1290,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if all([nzbprov == '32P', allow_packs == True, RSS == 'no']): if all([nzbprov == '32P', allow_packs == True, RSS == 'no']):
logger.fdebug('pack:' + entry['pack']) logger.fdebug('pack:' + entry['pack'])
if all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2']): if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', pack_test is True])): #allow_packs is True
if nzbprov == '32P': if nzbprov == '32P':
if entry['pack'] == '2': if entry['pack'] == '2':
logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.') logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.')
@ -1283,21 +1298,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.') logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.')
else: else:
logger.fdebug('[PACK-QUEUE] Invalid Pack.') logger.fdebug('[PACK-QUEUE] Invalid Pack.')
else:
logger.fdebug('[PACK-QUEUE] DDL Pack detected for %s.' % entry['filename'])
#find the pack range. #find the pack range.
pack_issuelist = None pack_issuelist = None
issueid_info = None issueid_info = None
if not entry['title'].startswith('0-Day Comics Pack'): if not entry['title'].startswith('0-Day Comics Pack'):
pack_issuelist = entry['issues'] pack_issuelist = entry['issues']
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber) issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
if issueid_info['valid'] == True: if issueid_info['valid'] == True:
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.') logger.info('Issue Number %s exists within pack. Continuing.' % IssueNumber)
else: else:
logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping') logger.fdebug('Issue Number %s does NOT exist within this pack. Skipping' % IssueNumber)
continue continue
#pack support. #pack support.
nowrite = False nowrite = False
nzbid = generate_id(nzbprov, entry['link']) if all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
nzbid = entry['id']
else:
nzbid = generate_id(nzbprov, entry['link'])
if manual is not True: if manual is not True:
downloadit = True downloadit = True
else: else:
@ -1349,7 +1369,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
logger.fdebug("issue we are looking for is : %s" % findcomiciss) logger.fdebug("issue we are looking for is : %s" % findcomiciss)
logger.fdebug("integer value of issue we are looking for : %s" % intIss) logger.fdebug("integer value of issue we are looking for : %s" % intIss)
else: else:
if intIss is None: if intIss is None and all([booktype == 'One-Shot', helpers.issuedigits(parsed_comic['issue_number']) == 1000]):
intIss = 1000
else:
intIss = 9999999999 intIss = 9999999999
if parsed_comic['issue_number'] is not None: if parsed_comic['issue_number'] is not None:
logger.fdebug("issue we found for is : %s" % parsed_comic['issue_number']) logger.fdebug("issue we found for is : %s" % parsed_comic['issue_number'])
@ -1362,8 +1384,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if parsed_comic['issue_number'] is None: if parsed_comic['issue_number'] is None:
pc_in = None pc_in = None
else: else:
pc_in = int(parsed_comic['issue_number']) pc_in = helpers.issuedigits(parsed_comic['issue_number'])
#issue comparison now as well #issue comparison now as well
if int(intIss) == int(comintIss) or all([cmloopit == 4, findcomiciss is None, pc_in is None]) or all([cmloopit == 4, findcomiciss is None, pc_in == 1]): if int(intIss) == int(comintIss) or all([cmloopit == 4, findcomiciss is None, pc_in is None]) or all([cmloopit == 4, findcomiciss is None, pc_in == 1]):
nowrite = False nowrite = False
@ -1371,6 +1392,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
nzbid = generate_id(nzbprov, entry['id']) nzbid = generate_id(nzbprov, entry['id'])
elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]): elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
nzbid = entry['id'] nzbid = entry['id']
entry['title'] = entry['filename']
else: else:
nzbid = generate_id(nzbprov, entry['link']) nzbid = generate_id(nzbprov, entry['link'])
if manual is not True: if manual is not True:
@ -1457,10 +1479,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
links = entry['link'] links = entry['link']
searchresult = searcher(nzbprov, nzbname, mylar.COMICINFO, links, IssueID, ComicID, tmpprov, newznab=newznab_host, torznab=torznab_host, rss=RSS) searchresult = searcher(nzbprov, nzbname, mylar.COMICINFO, links, IssueID, ComicID, tmpprov, newznab=newznab_host, torznab=torznab_host, rss=RSS)
if searchresult == 'downloadchk-fail' or searchresult == 'double-pp': if any([searchresult == 'downloadchk-fail', searchresult == 'double-pp']):
foundc['status'] = False foundc['status'] = False
continue continue
elif searchresult == 'torrent-fail' or searchresult == 'nzbget-fail' or searchresult == 'sab-fail' or searchresult == 'blackhole-fail': elif any([searchresult == 'torrent-fail', searchresult == 'nzbget-fail', searchresult == 'sab-fail', searchresult == 'blackhole-fail', searchresult == 'ddl-fail']):
foundc['status'] = False foundc['status'] = False
return foundc return foundc
@ -1492,9 +1514,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
if 'Public Torrents' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']): if 'Public Torrents' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']):
tmpprov = re.sub('Public Torrents', nzbprov, tmpprov) tmpprov = re.sub('Public Torrents', nzbprov, tmpprov)
foundcomic.append("yes") foundcomic.append("yes")
logger.info('mylar.COMICINFO: %s' % mylar.COMICINFO)
if mylar.COMICINFO[0]['pack']: if mylar.COMICINFO[0]['pack'] is True:
issinfo = mylar.COMICINFO[0]['pack_issuelist'] try:
issinfo = mylar.COMICINFO[0]['pack_issuelist']
except:
issinfo = mylar.COMICINFO['pack_issuelist']
if issinfo is not None: if issinfo is not None:
#we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack. #we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack.
logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname)) logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname))
@ -1502,9 +1527,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
for isid in issinfo['issues']: for isid in issinfo['issues']:
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff) updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov) updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov)
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov) notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov, True)
else: else:
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov) notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], None, nzbprov, True)
else: else:
if alt_nzbname is None or alt_nzbname == '': if alt_nzbname is None or alt_nzbname == '':
@ -1520,7 +1545,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
cyear = ComicYear cyear = ComicYear
else: else:
cyear = comyear cyear = comyear
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov) notify_snatch(sent_to, ComicName, cyear, IssueNumber, nzbprov, False)
prov_count == 0 prov_count == 0
mylar.TMP_PROV = nzbprov mylar.TMP_PROV = nzbprov
@ -1676,7 +1701,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
UseFuzzy = None UseFuzzy = None
ComicVersion = comic['Volume'] ComicVersion = comic['Volume']
TorrentID_32p = None TorrentID_32p = None
booktype = None booktype = comic['Type']
else: else:
Comicname_filesafe = comic['ComicName_Filesafe'] Comicname_filesafe = comic['ComicName_Filesafe']
SeriesYear = comic['ComicYear'] SeriesYear = comic['ComicYear']
@ -1770,7 +1795,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
StoreDate = result['ReleaseDate'] StoreDate = result['ReleaseDate']
DigitalDate = result['DigitalDate'] DigitalDate = result['DigitalDate']
TorrentID_32p = None TorrentID_32p = None
booktype = None booktype = result['Type']
elif mode == 'pullwant': elif mode == 'pullwant':
ComicName = result['COMIC'] ComicName = result['COMIC']
Comicname_filesafe = helpers.filesafe(ComicName) Comicname_filesafe = helpers.filesafe(ComicName)
@ -1787,7 +1812,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
IssueDate = result['SHIPDATE'] IssueDate = result['SHIPDATE']
StoreDate = IssueDate StoreDate = IssueDate
DigitalDate = '0000-00-00' DigitalDate = '0000-00-00'
booktype = None booktype = result['format']
else: else:
comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone() comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
if mode == 'want_ann': if mode == 'want_ann':
@ -1865,6 +1890,7 @@ def searchIssueIDList(issuelist):
UseFuzzy = comic['UseFuzzy'] UseFuzzy = comic['UseFuzzy']
ComicVersion = comic['ComicVersion'] ComicVersion = comic['ComicVersion']
TorrentID_32p = comic['TorrentID_32P'] TorrentID_32p = comic['TorrentID_32P']
booktype = comic['Type']
if issue['IssueDate'] == None: if issue['IssueDate'] == None:
IssueYear = comic['ComicYear'] IssueYear = comic['ComicYear']
else: else:
@ -1874,7 +1900,7 @@ def searchIssueIDList(issuelist):
else: else:
AllowPacks = False AllowPacks = False
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate']) foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate'], booktype=booktype)
if foundNZB['status'] is True: if foundNZB['status'] is True:
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash']) updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash'])
logger.info('Completed search request.') logger.info('Completed search request.')
@ -2289,13 +2315,16 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
sent_to = None sent_to = None
t_hash = None t_hash = None
if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl': if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl':
ggc = getcomics.GC('nope') ggc = getcomics.GC(issueid=IssueID, comicid=ComicID)
sendsite = ggc.loadsite(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link) sendsite = ggc.loadsite(nzbid, link)
ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid)) ddl_it = ggc.parse_downloadresults(nzbid, link)
logger.info("ddl status response: %s" % ddl_it) logger.info("ddl status response: %s" % ddl_it)
if ddl_it[0]['status'] == 'success': if ddl_it['success'] is True:
nzbname = ddl_it[0]['filename'] logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize()))
logger.info('Successfully retrieved %s from DDL site' % (nzbname)) else:
logger.info('Failed to retrieve %s from the DDL site.' %s (nzbname))
return "ddl-fail"
sent_to = "is downloading it directly via DDL" sent_to = "is downloading it directly via DDL"
elif mylar.USE_BLACKHOLE and all([nzbprov != '32P', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != 'torznab']): elif mylar.USE_BLACKHOLE and all([nzbprov != '32P', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != 'torznab']):
@ -2670,37 +2699,41 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip() if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff) updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff)
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts) #send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov) notify_snatch(sent_to, ComicName, comyear, IssueNumber, nzbprov, False)
mylar.TMP_PROV = nzbprov mylar.TMP_PROV = nzbprov
return return_val return return_val
def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov): def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack):
if pack is False:
if IssueNumber is not None: snline = 'Issue snatched!'
snline = '%s (%s) #%s snatched!' % (modcomicname, comyear, IssueNumber)
else: else:
snline = '%s (%s) snatched!' % (modcomicname, comyear) snline = 'Pack snatched!'
if IssueNumber is not None:
snatched_name = '%s (%s) #%s' % (comicname, comyear, IssueNumber)
else:
snatched_name= '%s (%s)' % (comicname, comyear)
if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH: if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH:
logger.info(u"Sending Prowl notification") logger.info(u"Sending Prowl notification")
prowl = notifiers.PROWL() prowl = notifiers.PROWL()
prowl.notify(nzbname, "Download started using " + sent_to) prowl.notify(snatched_name, "Download started using " + sent_to)
if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH: if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH:
logger.info(u"Sending NMA notification") logger.info(u"Sending NMA notification")
nma = notifiers.NMA() nma = notifiers.NMA()
nma.notify(snline=snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) nma.notify(snline=snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH: if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH:
logger.info(u"Sending Pushover notification") logger.info(u"Sending Pushover notification")
pushover = notifiers.PUSHOVER() pushover = notifiers.PUSHOVER()
pushover.notify(snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) pushover.notify(snline, snatched_nzb=snatched_name, prov=nzbprov, sent_to=sent_to)
if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH: if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH:
logger.info(u"Sending Boxcar notification") logger.info(u"Sending Boxcar notification")
boxcar = notifiers.BOXCAR() boxcar = notifiers.BOXCAR()
boxcar.notify(snatched_nzb=nzbname, sent_to=sent_to, snline=snline) boxcar.notify(snatched_nzb=snatched_name, sent_to=sent_to, snline=snline)
if mylar.CONFIG.PUSHBULLET_ENABLED and mylar.CONFIG.PUSHBULLET_ONSNATCH: if mylar.CONFIG.PUSHBULLET_ENABLED and mylar.CONFIG.PUSHBULLET_ONSNATCH:
logger.info(u"Sending Pushbullet notification") logger.info(u"Sending Pushbullet notification")
pushbullet = notifiers.PUSHBULLET() pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(snline=snline, snatched=nzbname, sent_to=sent_to, prov=nzbprov, method='POST') pushbullet.notify(snline=snline, snatched=snatched_name, sent_to=sent_to, prov=nzbprov, method='POST')
if mylar.CONFIG.TELEGRAM_ENABLED and mylar.CONFIG.TELEGRAM_ONSNATCH: if mylar.CONFIG.TELEGRAM_ENABLED and mylar.CONFIG.TELEGRAM_ONSNATCH:
logger.info(u"Sending Telegram notification") logger.info(u"Sending Telegram notification")
telegram = notifiers.TELEGRAM() telegram = notifiers.TELEGRAM()
@ -2708,7 +2741,7 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov)
if mylar.CONFIG.SLACK_ENABLED and mylar.CONFIG.SLACK_ONSNATCH: if mylar.CONFIG.SLACK_ENABLED and mylar.CONFIG.SLACK_ONSNATCH:
logger.info(u"Sending Slack notification") logger.info(u"Sending Slack notification")
slack = notifiers.SLACK() slack = notifiers.SLACK()
slack.notify("Snatched", snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov) slack.notify("Snatched", snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
return return

View File

@ -11,29 +11,33 @@ class TorrentClient(object):
def __init__(self): def __init__(self):
self.conn = None self.conn = None
def connect(self, host, username, password): def connect(self, host, username, password, test=False):
if self.conn is not None: if self.conn is not None:
return self.connect return self.connect
if not host: if not host:
return {'status': False} return {'status': False, 'error': 'host not specified'}
try: try:
logger.info(host)
self.client = client.Client(host) self.client = client.Client(host)
except Exception as e: except Exception as e:
logger.error('Could not create qBittorrent Object' + str(e)) logger.error('Could not create qBittorrent Object %s' % e)
return {'status': False} return {'status': False, 'error': e}
else: else:
try: try:
self.client.login(username, password) self.client.login(username, password)
except Exception as e: except Exception as e:
logger.error('Could not connect to qBittorrent ' + host) logger.error('Could not connect to qBittorrent: %s' % host)
return {'status': False, 'error': e}
else: else:
return self.client if test is True:
version = self.client.qbittorrent_version
return {'status': True, 'version': version}
else:
return self.client
def find_torrent(self, hash): def find_torrent(self, hash):
logger.debug('Finding Torrent hash: ' + hash) logger.debug('Finding Torrent hash: %s' % hash)
torrent_info = self.get_torrent(hash) torrent_info = self.get_torrent(hash)
if torrent_info: if torrent_info:
return True return True
@ -41,11 +45,11 @@ class TorrentClient(object):
return False return False
def get_torrent(self, hash): def get_torrent(self, hash):
logger.debug('Getting Torrent info hash: ' + hash) logger.debug('Getting Torrent info hash: %s' % hash)
try: try:
torrent_info = self.client.get_torrent(hash) torrent_info = self.client.get_torrent(hash)
except Exception as e: except Exception as e:
logger.error('Could not get torrent info for ' + hash) logger.error('Could not get torrent info for %s' % hash)
return False return False
else: else:
logger.info('Successfully located information for torrent') logger.info('Successfully located information for torrent')
@ -55,7 +59,7 @@ class TorrentClient(object):
def load_torrent(self, filepath): def load_torrent(self, filepath):
if not filepath.startswith('magnet'): if not filepath.startswith('magnet'):
logger.info('filepath to torrent file set to : ' + filepath) logger.info('filepath to torrent file set to : %s' % filepath)
if self.client._is_authenticated is True: if self.client._is_authenticated is True:
logger.info('Checking if Torrent Exists!') logger.info('Checking if Torrent Exists!')
@ -68,67 +72,66 @@ class TorrentClient(object):
logger.debug('Magnet (load_torrent) initiating') logger.debug('Magnet (load_torrent) initiating')
else: else:
hash = self.get_the_hash(filepath) hash = self.get_the_hash(filepath)
logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath))) logger.debug('FileName (load_torrent): %s' % os.path.basename(filepath))
logger.debug('Torrent Hash (load_torrent): "' + hash + '"') logger.debug('Torrent Hash (load_torrent): "%s"' % hash)
#Check if torrent already added #Check if torrent already added
if self.find_torrent(hash): if self.find_torrent(hash):
logger.info('load_torrent: Torrent already exists!') logger.info('load_torrent: Torrent already exists!')
return {'status': False} return {'status': False, 'error': 'Torrent already exists'}
#should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download #should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download
#multiple copies of the same issues that's already downloaded #multiple copies of the same issues that's already downloaded
else: else:
logger.info('Torrent not added yet, trying to add it now!') logger.info('Torrent not added yet, trying to add it now!')
if any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']):
down_dir = None
else:
down_dir = mylar.CONFIG.QBITTORRENT_FOLDER
logger.info('Forcing Download location to: %s' % down_dir)
# Build an arg dict based on user prefs.
addargs = {}
if not any([mylar.CONFIG.QBITTORRENT_LABEL is None, mylar.CONFIG.QBITTORRENT_LABEL == '', mylar.CONFIG.QBITTORRENT_LABEL == 'None']):
addargs.update( { 'category': str(mylar.CONFIG.QBITTORRENT_LABEL) } )
logger.info('Setting download label to: %s' % mylar.CONFIG.QBITTORRENT_LABEL)
if not any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']):
addargs.update( { 'savepath': str(mylar.CONFIG.QBITTORRENT_FOLDER) } )
logger.info('Forcing download location to: %s' % mylar.CONFIG.QBITTORRENT_FOLDER)
if mylar.CONFIG.QBITTORRENT_LOADACTION == 'pause':
addargs.update( { 'paused': 'true' } )
logger.info('Attempting to add torrent in paused state')
if filepath.startswith('magnet'): if filepath.startswith('magnet'):
try: try:
if down_dir is not None: tid = self.client.download_from_link(filepath, **addargs)
tid = self.client.download_from_link(filepath, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL))
else:
tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
except Exception as e: except Exception as e:
logger.debug('Torrent not added') logger.error('Torrent not added')
return {'status': False} return {'status': False, 'error': e}
else: else:
logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.') logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.')
else: else:
try: try:
torrent_content = open(filepath, 'rb') torrent_content = open(filepath, 'rb')
if down_dir is not None: tid = self.client.download_from_file(torrent_content, **addargs)
tid = self.client.download_from_file(torrent_content, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL))
else:
tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
except Exception as e: except Exception as e:
logger.debug('Torrent not added') logger.error('Torrent not added')
return {'status': False} return {'status': False, 'error': e}
else: else:
logger.debug('Successfully submitted for add via file. Verifying item is now on client.') logger.debug('Successfully submitted for add via file. Verifying item is now on client.')
if mylar.CONFIG.QBITTORRENT_STARTONLOAD: if mylar.CONFIG.QBITTORRENT_LOADACTION == 'force_start':
logger.info('attempting to start') logger.info('Attempting to force start torrent')
startit = self.client.force_start(hash)
logger.info('startit returned:' + str(startit))
else:
logger.info('attempting to pause torrent incase it starts')
try: try:
startit = self.client.pause(hash) startit = self.client.force_start(hash)
logger.info('startit paused:' + str(startit)) logger.info('startit returned: %s' % startit)
except: except:
logger.warn('Unable to pause torrent - possibly already paused?') logger.warn('Unable to force start torrent - please check your client.')
else:
logger.info('Client default add action selected. Doing nothing.')
try: try:
time.sleep(5) # wait 5 in case it's not populated yet. time.sleep(5) # wait 5 in case it's not populated yet.
tinfo = self.get_torrent(hash) tinfo = self.get_torrent(hash)
except Exception as e: except Exception as e:
logger.warn('Torrent was not added! Please check logs') logger.warn('Torrent was not added! Please check logs')
return {'status': False} return {'status': False, 'error': e}
else: else:
logger.info('Torrent successfully added!') logger.info('Torrent successfully added!')
filelist = self.client.get_torrent_files(hash) filelist = self.client.get_torrent_files(hash)
@ -160,6 +163,5 @@ class TorrentClient(object):
metainfo = bencode.decode(torrent_file.read()) metainfo = bencode.decode(torrent_file.read())
info = metainfo['info'] info = metainfo['info']
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper() thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
logger.debug('Hash: ' + thehash)
return thehash return thehash

View File

@ -114,12 +114,12 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
#logger.fdebug('%s [%s] Was refreshed less than %s hours ago. Skipping Refresh at this time.' % (ComicName, ComicID, cache_hours)) #logger.fdebug('%s [%s] Was refreshed less than %s hours ago. Skipping Refresh at this time.' % (ComicName, ComicID, cache_hours))
cnt +=1 cnt +=1
continue continue
logger.info('[' + str(cnt) + '/' + str(len(comiclist)) + '] Refreshing :' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']') logger.info('[%s/%s] Refreshing :%s (%s) [%s]' % (cnt, len(comiclist), ComicName, dspyear, ComicID))
else: else:
ComicID = comic['ComicID'] ComicID = comic['ComicID']
ComicName = comic['ComicName'] ComicName = comic['ComicName']
logger.fdebug('Refreshing: ' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']') logger.info('Refreshing/Updating: %s (%s) [%s]' % (ComicName, dspyear, ComicID))
mismatch = "no" mismatch = "no"
if not mylar.CONFIG.CV_ONLY or ComicID[:1] == "G": if not mylar.CONFIG.CV_ONLY or ComicID[:1] == "G":
@ -1104,6 +1104,8 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
temploc = '1' temploc = '1'
else: else:
temploc = None temploc = None
logger.warn('The filename [%s] does not have a valid issue number, and the Edition of the series is %s. You might need to Forcibly Mark the Series as TPB/GN and try this again.' % (tmpfc['ComicFilename'], rescan['Type']))
return
if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]): if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]):
#remove the extension here #remove the extension here
@ -1119,6 +1121,7 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
while True: while True:
try: try:
reiss = reissues[n] reiss = reissues[n]
int_iss = None
except IndexError: except IndexError:
break break
int_iss = helpers.issuedigits(reiss['Issue_Number']) int_iss = helpers.issuedigits(reiss['Issue_Number'])

View File

@ -645,6 +645,8 @@ class WebInterface(object):
seriesYear = cid['SeriesYear'] seriesYear = cid['SeriesYear']
issuePublisher = cid['Publisher'] issuePublisher = cid['Publisher']
seriesVolume = cid['Volume'] seriesVolume = cid['Volume']
bookType = cid['Type']
seriesAliases = cid['Aliases']
if storyarcpublisher is None: if storyarcpublisher is None:
#assume that the arc is the same #assume that the arc is the same
storyarcpublisher = issuePublisher storyarcpublisher = issuePublisher
@ -670,6 +672,8 @@ class WebInterface(object):
"IssuePublisher": issuePublisher, "IssuePublisher": issuePublisher,
"CV_ArcID": arcid, "CV_ArcID": arcid,
"Int_IssueNumber": AD['Int_IssueNumber'], "Int_IssueNumber": AD['Int_IssueNumber'],
"Type": bookType,
"Aliases": seriesAliases,
"Manual": AD['Manual']} "Manual": AD['Manual']}
myDB.upsert("storyarcs", newVals, newCtrl) myDB.upsert("storyarcs", newVals, newCtrl)
@ -2194,6 +2198,41 @@ class WebInterface(object):
annualDelete.exposed = True annualDelete.exposed = True
def queueManage(self): # **args):
myDB = db.DBConnection()
activelist = 'There are currently no items currently downloading via Direct Download (DDL).'
active = myDB.selectone("SELECT * FROM DDL_INFO WHERE STATUS = 'Downloading'").fetchone()
if active is not None:
activelist ={'series': active['series'],
'year': active['year'],
'size': active['size'],
'filename': active['filename'],
'status': active['status'],
'id': active['id']}
resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.'
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID WHERE c.status != 'Downloading'")
if s_info:
resultlist = []
for si in s_info:
issue = si['Issue_Number']
if issue is not None:
issue = '#%s' % issue
resultlist.append({'series': si['ComicName'],
'issue': issue,
'id': si['id'],
'volume': si['ComicVersion'],
'year': si['ComicYear'],
'size': si['size'].strip(),
'comicid': si['ComicID'],
'issueid': si['IssueID'],
'status': si['status']})
logger.info('resultlist: %s' % resultlist)
return serve_template(templatename="queue_management.html", title="Queue Management", activelist=activelist, resultlist=resultlist)
queueManage.exposed = True
def previewRename(self, **args): #comicid=None, comicidlist=None): def previewRename(self, **args): #comicid=None, comicidlist=None):
file_format = mylar.CONFIG.FILE_FORMAT file_format = mylar.CONFIG.FILE_FORMAT
myDB = db.DBConnection() myDB = db.DBConnection()
@ -4104,7 +4143,7 @@ class WebInterface(object):
import random import random
SRID = str(random.randint(100000, 999999)) SRID = str(random.randint(100000, 999999))
logger.info('[IMPORT] Issues found with valid ComicID information for : ' + comicinfo['ComicName'] + ' [' + str(comicinfo['ComicID']) + ']') logger.info('[IMPORT] Issues found with valid ComicID information for : %s [%s]' % (comicinfo['ComicName'], comicinfo['ComicID']))
imported = {'ComicName': comicinfo['ComicName'], imported = {'ComicName': comicinfo['ComicName'],
'DynamicName': comicinfo['DynamicName'], 'DynamicName': comicinfo['DynamicName'],
'Volume': comicinfo['Volume'], 'Volume': comicinfo['Volume'],
@ -4127,7 +4166,7 @@ class WebInterface(object):
# "ComicName": comicinfo['ComicName'], # "ComicName": comicinfo['ComicName'],
# "DynamicName": comicinfo['DynamicName']} # "DynamicName": comicinfo['DynamicName']}
# myDB.upsert("importresults", newVal, ctrlVal) # myDB.upsert("importresults", newVal, ctrlVal)
logger.info('[IMPORT] Successfully verified import sequence data for : ' + comicinfo['ComicName'] + '. Currently adding to your watchlist.') logger.info('[IMPORT] Successfully verified import sequence data for : %s. Currently adding to your watchlist.' % comicinfo['ComicName'])
RemoveIDS.append(comicinfo['ComicID']) RemoveIDS.append(comicinfo['ComicID'])
#we need to remove these items from the comiclist now, so they don't get processed again #we need to remove these items from the comiclist now, so they don't get processed again
@ -4200,9 +4239,10 @@ class WebInterface(object):
else: else:
raise cherrypy.HTTPRedirect("importResults") raise cherrypy.HTTPRedirect("importResults")
else: else:
comicstoIMP.append(result['ComicLocation'])#.decode(mylar.SYS_ENCODING, 'replace')) #logger.fdebug('result: %s' % result)
comicstoIMP.append(result['ComicLocation']) #.decode(mylar.SYS_ENCODING, 'replace'))
getiss = result['IssueNumber'] getiss = result['IssueNumber']
#logger.info('getiss:' + getiss) #logger.fdebug('getiss: %s' % getiss)
if 'annual' in getiss.lower(): if 'annual' in getiss.lower():
tmpiss = re.sub('[^0-9]','', getiss).strip() tmpiss = re.sub('[^0-9]','', getiss).strip()
if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4: if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4:
@ -4217,10 +4257,10 @@ class WebInterface(object):
miniss_num = helpers.issuedigits(minISSUE) miniss_num = helpers.issuedigits(minISSUE)
startiss_num = helpers.issuedigits(startISSUE) startiss_num = helpers.issuedigits(startISSUE)
if int(getiss_num) > int(miniss_num): if int(getiss_num) > int(miniss_num):
#logger.fdebug('Minimum issue now set to : ' + getiss + ' - it was : ' + minISSUE) logger.fdebug('Minimum issue now set to : %s - it was %s' % (getiss, minISSUE))
minISSUE = getiss minISSUE = getiss
if int(getiss_num) < int(startiss_num): if int(getiss_num) < int(startiss_num):
#logger.fdebug('Start issue now set to : ' + getiss + ' - it was : ' + startISSUE) logger.fdebug('Start issue now set to : %s - it was %s' % (getiss, startISSUE))
startISSUE = str(getiss) startISSUE = str(getiss)
if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start. if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start.
startyear = result['ComicYear'] startyear = result['ComicYear']
@ -4545,13 +4585,20 @@ class WebInterface(object):
#---- #----
# to be implemented in the future. # to be implemented in the future.
if mylar.INSTALL_TYPE == 'git': if mylar.INSTALL_TYPE == 'git':
branch_history, err = mylar.versioncheck.runGit("log --pretty=format:'%h - %cr - %an - %s' -n 5") try:
#here we pass the branch_history to the pretty_git module to break it down branch_history, err = mylar.versioncheck.runGit('log --encoding=UTF-8 --pretty=format:"%h - %cr - %an - %s" -n 5')
if branch_history: #here we pass the branch_history to the pretty_git module to break it down
br_hist = self.pretty_git(branch_history) if branch_history:
#br_hist = branch_history.replace("\n", "<br />\n") br_hist = self.pretty_git(branch_history)
else: try:
br_hist = err br_hist = u"" + br_hist.decode('utf-8')
except:
br_hist = br_hist
else:
br_hist = err
except Exception as e:
logger.fdebug('[ERROR] Unable to retrieve git revision history for some reason: %s' % e)
br_hist = 'This would be a nice place to see revision history...'
else: else:
br_hist = 'This would be a nice place to see revision history...' br_hist = 'This would be a nice place to see revision history...'
#---- #----
@ -4649,6 +4696,7 @@ class WebInterface(object):
"sab_priority": mylar.CONFIG.SAB_PRIORITY, "sab_priority": mylar.CONFIG.SAB_PRIORITY,
"sab_directory": mylar.CONFIG.SAB_DIRECTORY, "sab_directory": mylar.CONFIG.SAB_DIRECTORY,
"sab_to_mylar": helpers.checked(mylar.CONFIG.SAB_TO_MYLAR), "sab_to_mylar": helpers.checked(mylar.CONFIG.SAB_TO_MYLAR),
"sab_version": mylar.CONFIG.SAB_VERSION,
"sab_client_post_processing": helpers.checked(mylar.CONFIG.SAB_CLIENT_POST_PROCESSING), "sab_client_post_processing": helpers.checked(mylar.CONFIG.SAB_CLIENT_POST_PROCESSING),
"nzbget_host": mylar.CONFIG.NZBGET_HOST, "nzbget_host": mylar.CONFIG.NZBGET_HOST,
"nzbget_port": mylar.CONFIG.NZBGET_PORT, "nzbget_port": mylar.CONFIG.NZBGET_PORT,
@ -4691,7 +4739,7 @@ class WebInterface(object):
"qbittorrent_password": mylar.CONFIG.QBITTORRENT_PASSWORD, "qbittorrent_password": mylar.CONFIG.QBITTORRENT_PASSWORD,
"qbittorrent_label": mylar.CONFIG.QBITTORRENT_LABEL, "qbittorrent_label": mylar.CONFIG.QBITTORRENT_LABEL,
"qbittorrent_folder": mylar.CONFIG.QBITTORRENT_FOLDER, "qbittorrent_folder": mylar.CONFIG.QBITTORRENT_FOLDER,
"qbittorrent_startonload": helpers.checked(mylar.CONFIG.QBITTORRENT_STARTONLOAD), "qbittorrent_loadaction": mylar.CONFIG.QBITTORRENT_LOADACTION,
"blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR, "blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR,
"usenet_retention": mylar.CONFIG.USENET_RETENTION, "usenet_retention": mylar.CONFIG.USENET_RETENTION,
"nzbsu": helpers.checked(mylar.CONFIG.NZBSU), "nzbsu": helpers.checked(mylar.CONFIG.NZBSU),
@ -5056,7 +5104,7 @@ class WebInterface(object):
def configUpdate(self, **kwargs): def configUpdate(self, **kwargs):
checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search', checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search',
'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload', 'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload',
'enable_torrents', 'qbittorrent_startonload', 'enable_rss', 'nzbsu', 'nzbsu_verify', 'enable_torrents', 'enable_rss', 'nzbsu', 'nzbsu_verify',
'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_public', 'enable_32p', 'enable_torznab', 'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_public', 'enable_32p', 'enable_torznab',
'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing', 'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing',
'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts', 'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts',
@ -5064,7 +5112,7 @@ class WebInterface(object):
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify', 'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
'prowl_enabled', 'prowl_onsnatch', 'nma_enabled', 'nma_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled', 'prowl_enabled', 'prowl_onsnatch', 'nma_enabled', 'nma_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch', 'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
'opds_enable', 'opds_authentication', 'opds_metainfo'] #, 'enable_ddl'] 'opds_enable', 'opds_authentication', 'opds_metainfo', 'enable_ddl']
for checked_config in checked_configs: for checked_config in checked_configs:
if checked_config not in kwargs: if checked_config not in kwargs:
@ -5168,7 +5216,12 @@ class WebInterface(object):
else: else:
verify = False verify = False
version = 'Unknown'
try: try:
v = requests.get(querysab, params={'mode': 'version'}, verify=verify)
if str(v.status_code) == '200':
logger.fdebug('sabnzbd version: %s' % v.content)
version = v.text
r = requests.get(querysab, params=payload, verify=verify) r = requests.get(querysab, params=payload, verify=verify)
except Exception, e: except Exception, e:
logger.warn('Error fetching data from %s: %s' % (querysab, e)) logger.warn('Error fetching data from %s: %s' % (querysab, e))
@ -5183,6 +5236,10 @@ class WebInterface(object):
verify = False verify = False
try: try:
v = requests.get(querysab, params={'mode': 'version'}, verify=verify)
if str(v.status_code) == '200':
logger.fdebug('sabnzbd version: %s' % v.text)
version = v.text
r = requests.get(querysab, params=payload, verify=verify) r = requests.get(querysab, params=payload, verify=verify)
except Exception, e: except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e)) logger.warn('Error fetching data from %s: %s' % (sabhost, e))
@ -5191,7 +5248,7 @@ class WebInterface(object):
return 'Unable to retrieve data from SABnzbd' return 'Unable to retrieve data from SABnzbd'
logger.info('status code: ' + str(r.status_code)) logger.fdebug('status code: ' + str(r.status_code))
if str(r.status_code) != '200': if str(r.status_code) != '200':
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']') logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
@ -5215,7 +5272,9 @@ class WebInterface(object):
mylar.CONFIG.SAB_APIKEY = q_apikey mylar.CONFIG.SAB_APIKEY = q_apikey
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.') logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
logger.info('Connection to SABnzbd tested sucessfully') logger.info('Connection to SABnzbd tested sucessfully')
return "Successfully verified APIkey" mylar.CONFIG.SAB_VERSION = version
return json.dumps({"status": "Successfully verified APIkey.", "version": str(version)})
SABtest.exposed = True SABtest.exposed = True
def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None): def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None):
@ -5627,6 +5686,21 @@ class WebInterface(object):
return "Successfully validated connection to %s" % host return "Successfully validated connection to %s" % host
testrtorrent.exposed = True testrtorrent.exposed = True
def testqbit(self, host, username, password):
import torrent.clients.qbittorrent as QbitClient
qc = QbitClient.TorrentClient()
qclient = qc.connect(host, username, password, True)
if not qclient:
logger.warn('[qBittorrent] Could not establish connection to %s' % host)
return 'Error establishing connection to Qbittorrent'
else:
if qclient['status'] is False:
logger.warn('[qBittorrent] Could not establish connection to %s. Error returned:' % (host, qclient['error']))
return 'Error establishing connection to Qbittorrent'
else:
logger.info('[qBittorrent] Successfully validated connection to %s [%s]' % (host, qclient['version']))
return 'Successfully validated qBittorrent connection'
testqbit.exposed = True
def testnewznab(self, name, host, ssl, apikey): def testnewznab(self, name, host, ssl, apikey):
result = helpers.newznab_test(name, host, ssl, apikey) result = helpers.newznab_test(name, host, ssl, apikey)

View File

@ -81,7 +81,9 @@ def pullit(forcecheck=None, weeknumber=None, year=None):
elif chk_locg['status'] == 'success': elif chk_locg['status'] == 'success':
logger.info('[PULL-LIST] Weekly Pull List successfully loaded with ' + str(chk_locg['count']) + ' issues.') logger.info('[PULL-LIST] Weekly Pull List successfully loaded with ' + str(chk_locg['count']) + ' issues.')
return new_pullcheck(chk_locg['weeknumber'],chk_locg['year']) return new_pullcheck(chk_locg['weeknumber'],chk_locg['year'])
elif chk_log['status'] == 'update_required':
logger.warn('[PULL-LIST] Your version of Mylar is not up-to-date. You MUST update before this works')
return
else: else:
logger.info('[PULL-LIST] Unable to retrieve weekly pull-list. Dropping down to legacy method of PW-file') logger.info('[PULL-LIST] Unable to retrieve weekly pull-list. Dropping down to legacy method of PW-file')
mylar.PULLBYFILE = pull_the_file(newrl) mylar.PULLBYFILE = pull_the_file(newrl)