mirror of https://github.com/evilhero/mylar
Merge branch 'development'
This commit is contained in:
commit
d3cb7f87c5
|
@ -2,7 +2,7 @@
|
|||
font-family: 'Lato';
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
src: local('Lato Regular'), local('Lato-Regular'), url(http://themes.googleusercontent.com/static/fonts/lato/v7/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
|
||||
src: local('Lato Regular'), local('Lato-Regular'), url(https://themes.googleusercontent.com/static/fonts/lato/v7/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
|
||||
}
|
||||
|
||||
body {
|
||||
|
|
|
@ -397,20 +397,36 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row checkbox left clearfix">
|
||||
<div class="row checkbox left clearfix" id="sab_cdh" style="display:unset;">
|
||||
<input type="checkbox" id="sab_client_post_processing" onclick="initConfigCheckbox($this);" name="sab_client_post_processing" value="1" ${config['sab_client_post_processing']} /><label>Enable Completed Download Handling</label>
|
||||
<div id="sabcompletedinfo">
|
||||
<div class="row">
|
||||
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>
|
||||
ComicRN script cannot be used with this enabled</small>
|
||||
ComicRN script cannot be used with this enabled & required SAB version > 0.8.0</small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row checkbox left clearfix" id="sab_nocdh" style="display:none;">
|
||||
<div>
|
||||
<div class="row">
|
||||
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>
|
||||
Completed Download Handling is not available as your version of SABnzbd is not above 0.8.0</small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div align="center" class="row">
|
||||
<img name="sabnzbd_statusicon" id="sabnzbd_statusicon" src="interfaces/default/images/successs.png" style="float:right;visibility:hidden;" height="20" width="20" />
|
||||
<input type="button" value="Test SABnzbd" id="test_sab" style="float:center" /></br>
|
||||
<input type="text" name="sabstatus" style="text-align:center; font-size:11px;" id="sabstatus" size="50" DISABLED />
|
||||
<div name="sabversion" id="sabversion" style="font-size:11px;" align="center">
|
||||
<%
|
||||
if mylar.CONFIG.SAB_VERSION is not None:
|
||||
sabv = 'last tested version: %s' % mylar.CONFIG.SAB_VERSION
|
||||
else:
|
||||
sabv = ''
|
||||
%>
|
||||
<span>${sabv}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</fieldset>
|
||||
|
@ -667,15 +683,15 @@
|
|||
<fieldset id="qbittorrent_options">
|
||||
<div class="row">
|
||||
<label>qBittorrent Host:Port </label>
|
||||
<input type="text" name="qbittorrent_host" value="${config['qbittorrent_host']}" size="30">
|
||||
<input type="text" name="qbittorrent_host" id="qbittorrent_host" value="${config['qbittorrent_host']}" size="30">
|
||||
</div>
|
||||
<div class="row">
|
||||
<label>qBittorrent Username</label>
|
||||
<input type="text" name="qbittorrent_username" value="${config['qbittorrent_username']}" size="30">
|
||||
<input type="text" name="qbittorrent_username" id="qbittorrent_username" value="${config['qbittorrent_username']}" size="30">
|
||||
</div>
|
||||
<div class="row">
|
||||
<label>qBittorrent Password</label>
|
||||
<input type="password" name="qbittorrent_password" value="${config['qbittorrent_password']}" size="30">
|
||||
<input type="password" name="qbittorrent_password" id="qbittorrent_password" value="${config['qbittorrent_password']}" size="30">
|
||||
</div>
|
||||
<div class="row">
|
||||
<label>qBittorrent Label</label>
|
||||
|
@ -687,10 +703,24 @@
|
|||
<input type="text" name="qbittorrent_folder" value="${config['qbittorrent_folder']}" size="30"><br/>
|
||||
<small>Folder path where torrents will be assigned to</small>
|
||||
</div>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input id="qbittorrent_startonload" type="checkbox" name="qbittorrent_startonload" value="1" ${config['qbittorrent_startonload']} /><label>Start Torrent on Successful Load</label>
|
||||
<small>Automatically start torrent on successful loading within qBittorrent client</small>
|
||||
</div>
|
||||
<div class="row">
|
||||
<label>Add torrent action:</label>
|
||||
<select name="qbittorrent_loadaction">
|
||||
%for x in ['default', 'force_start', 'pause']:
|
||||
<%
|
||||
if config['qbittorrent_loadaction'] == x:
|
||||
outputselect = 'selected'
|
||||
else:
|
||||
outputselect = ''
|
||||
%>
|
||||
<option value=${x} ${outputselect}>${x}</option>
|
||||
%endfor
|
||||
</select>
|
||||
</div>
|
||||
<div class="row">
|
||||
<img name="qbittorrent_statusicon" id="qbittorrent_statusicon" src="interfaces/default/images/successs.png" style="float:right;visibility:hidden;" height="20" width="20" />
|
||||
<input type="button" value="Test Connection" id="qbittorrent_test" />
|
||||
</div>
|
||||
</fieldset>
|
||||
</div>
|
||||
</td>
|
||||
|
@ -763,13 +793,11 @@
|
|||
<small class="heading"><span style="float: left; margin-right: .3em; margin-top: 4px;" class="ui-icon ui-icon-info"></span>Note: this is an experimental search - results may be better/worse.</small>
|
||||
</div>
|
||||
</fieldset>
|
||||
<!--
|
||||
<fieldset>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input type="checkbox" id="enable_ddl" name="enable_ddl" value=1 ${config['enable_ddl']} /><legend>Enable DDL (GetComics)</legend>
|
||||
</div>
|
||||
</fieldset>
|
||||
-->
|
||||
<fieldset>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input id="enable_torrent_search" type="checkbox" onclick="initConfigCheckbox($(this));" name="enable_torrent_search" value=1 ${config['enable_torrent_search']} /><legend>Torrents</legned>
|
||||
|
@ -1951,7 +1979,9 @@
|
|||
function numberWithCommas(x) {
|
||||
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
|
||||
};
|
||||
|
||||
function numberWithDecimals(x) {
|
||||
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ".");
|
||||
};
|
||||
$("#test_32p").click(function(){
|
||||
var imagechk = document.getElementById("test32p_statusicon");
|
||||
$.get('test_32p',
|
||||
|
@ -1992,8 +2022,25 @@
|
|||
alert(data.error);
|
||||
return;
|
||||
}
|
||||
$('#sabstatus').val(data);
|
||||
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>");
|
||||
var obj = JSON.parse(data);
|
||||
var versionsab = obj['version'];
|
||||
vsab = numberWithDecimals(versionsab);
|
||||
$('#sabstatus').val(obj['status']);
|
||||
$('#sabversion span').text('SABnzbd version: '+versionsab);
|
||||
if ( vsab < "0.8.0" ){
|
||||
scdh = document.getElementById("sab_cdh");
|
||||
scdh.style.display = "none";
|
||||
nocdh = document.getElementById("sab_nocdh");
|
||||
nocdh.style.display = "unset";
|
||||
scdh_line = document.getElementById("sab_client_post_processing");
|
||||
scdh_line.value = 0;
|
||||
} else {
|
||||
scdh = document.getElementById("sab_cdh");
|
||||
scdh.style.display = "unset";
|
||||
nocdh = document.getElementById("sab_nocdh");
|
||||
nocdh.style.display = "none";
|
||||
}
|
||||
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+obj['status']+"</div>");
|
||||
if ( data.indexOf("Successfully") > -1){
|
||||
imagechk.src = "";
|
||||
imagechk.src = "interfaces/default/images/success.png";
|
||||
|
@ -2116,6 +2163,32 @@
|
|||
$("#add_torznab").before(torformfields);
|
||||
});
|
||||
|
||||
$('#qbittorrent_test').click(function () {
|
||||
var imagechk = document.getElementById("qbittorrent_statusicon");
|
||||
var host = document.getElementById("qbittorrent_host").value;
|
||||
var username = document.getElementById("qbittorrent_username").value;
|
||||
var password = document.getElementById("qbittorrent_password").value;
|
||||
$.get("testqbit",
|
||||
{ host: host, username: username, password: password },
|
||||
function(data){
|
||||
if (data.error != undefined) {
|
||||
alert(data.error);
|
||||
return;
|
||||
}
|
||||
$('#ajaxMsg').html("<div class='msg'><span class='ui-icon ui-icon-check'></span>"+data+"</div>");
|
||||
if ( data.indexOf("Successfully") > -1){
|
||||
imagechk.src = "";
|
||||
imagechk.src = "interfaces/default/images/success.png";
|
||||
imagechk.style.visibility = "visible";
|
||||
} else {
|
||||
imagechk.src = "";
|
||||
imagechk.src = "interfaces/default/images/fail.png";
|
||||
imagechk.style.visibility = "visible";
|
||||
}
|
||||
});
|
||||
$('#ajaxMsg').addClass('success').fadeIn().delay(3000).fadeOut();
|
||||
});
|
||||
|
||||
function addAction() {
|
||||
$('#autoadd').append('<input type="hidden" name="tsab" value=1 />');
|
||||
};
|
||||
|
|
|
@ -60,14 +60,14 @@
|
|||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Enforce Renaming/MetaTagging options (if enabled)</label><br/>
|
||||
-->
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} /><label>Prepend Reading# to filename</label><br/>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} disabled/><label>Prepend Reading# to filename</label><br/>
|
||||
<%
|
||||
if mylar.CONFIG.STORYARCDIR:
|
||||
carcdir = 'StoryArc'
|
||||
else:
|
||||
carcdir = 'GrabBag'
|
||||
%>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} /><label>Copy watchlisted issues to ${carcdir} Directory</label>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} disabled /><label>Copy watchlisted issues to ${carcdir} Directory</label>
|
||||
|
||||
<input type="hidden" name="StoryArcID" value="${storyarcid}">
|
||||
<input type="hidden" name="StoryArcName" value="${storyarcname}">
|
||||
|
@ -75,7 +75,10 @@
|
|||
</fieldset>
|
||||
</form>
|
||||
<div style="display:block;position:relative;top:10px;">
|
||||
|
||||
<!--
|
||||
<input type="submit" value="Update"/>
|
||||
-->
|
||||
</div>
|
||||
<div style="display:block;float:right;position:relative;text-color:black;top:-130px;">
|
||||
<h1><p style="display:inline;float:right;">${storyarcname}</h1>
|
||||
|
@ -111,8 +114,9 @@
|
|||
</div>
|
||||
</div>
|
||||
|
||||
<!--
|
||||
<button type="button" onclick="">Finalize & Rename</button>
|
||||
|
||||
-->
|
||||
<table class="display" id="arc_detail">
|
||||
<thead>
|
||||
<tr>
|
||||
|
@ -315,6 +319,7 @@
|
|||
"sInfoFiltered":"(filtered from _MAX_ total items)"},
|
||||
"iDisplayLength": 25,
|
||||
"sPaginationType": "full_numbers",
|
||||
"stateDuration": 0,
|
||||
"aaSorting": []
|
||||
})
|
||||
resetFilters("item");
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
%endif
|
||||
</div>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} /><label>Prepend Reading# to filename</label>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.CONFIG.READ2FILENAME)} disabled /><label>Prepend Reading# to filename</label>
|
||||
<%
|
||||
if mylar.CONFIG.STORYARCDIR:
|
||||
carcdir = 'StoryArc'
|
||||
|
@ -71,12 +71,15 @@
|
|||
%>
|
||||
</div>
|
||||
<div class="row checkbox left clearfix">
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} /><label>Copy watchlisted issues to ${carcdir} Directory</label>
|
||||
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.CONFIG.COPY2ARCDIR)} disabled/><label>Copy watchlisted issues to ${carcdir} Directory</label>
|
||||
</div>
|
||||
<input type="hidden" name="StoryArcID" value="${storyarcid}">
|
||||
<input type="hidden" name="StoryArcName" value="${storyarcname}">
|
||||
<div style="display:inline;position:relative;top:0px;">
|
||||
|
||||
<!--
|
||||
<input type="submit" value="Update"/>
|
||||
-->
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
Instructions on setting up mylar as a systemd serivce that will run on startup/via systemctl commands...
|
||||
Instructions on setting up mylar as a systemd service that will run on startup/via systemctl commands...
|
||||
|
||||
1 - copy the mylar.service to /lib/systemd/system/mylar.service
|
||||
2 - create a symbolic link to it: ln -s /lib/systemd/system/mylar.service /etc/systemd/system/mylar.service
|
||||
3 - copy mylar.default to /etc/default/mylar (make sure it's renamed from mylar.default to just mylar)
|
||||
4 - copy mylar.initd to /etc/init.d/mylar (rename it to just mylar) and then 'sudo chmod +x /etc/init.d/mylar'
|
||||
5 - edit the /etc/default/mylar file to your defaults (make sure to set MYLAR_USER & MYLAR_HOME as they're required)
|
||||
6 - make systemd aware of new services: sudo sytemctl daemon-reload
|
||||
6 - make systemd aware of new services: sudo systemctl daemon-reload
|
||||
7 - sudo systemctl enable mylar
|
||||
8 - sudo systemctl start mylar
|
||||
9 - to check to see if running/status - sudo sytemctl status mylar
|
||||
9 - to check to see if running/status - sudo systemctl status mylar
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import requests
|
||||
import json
|
||||
|
||||
|
||||
class LoginRequired(Exception):
|
||||
def __str__(self):
|
||||
return 'Please login first.'
|
||||
|
@ -15,7 +14,7 @@ class Client(object):
|
|||
self.url = url
|
||||
|
||||
session = requests.Session()
|
||||
check_prefs = session.get(url+'query/preferences')
|
||||
check_prefs = session.get(url+'api/v2/app/preferences')
|
||||
|
||||
if check_prefs.status_code == 200:
|
||||
self._is_authenticated = True
|
||||
|
@ -24,9 +23,9 @@ class Client(object):
|
|||
elif check_prefs.status_code == 404:
|
||||
self._is_authenticated = False
|
||||
raise RuntimeError("""
|
||||
This wrapper only supports qBittorrent applications
|
||||
with version higher than 3.1.x.
|
||||
Please use the latest qBittorrent release.
|
||||
This wrapper only supports qBittorrent applications with
|
||||
version higher than 4.1.0 (which implemented Web API v2.0).
|
||||
Please use the latest qBittorrent release.
|
||||
""")
|
||||
|
||||
else:
|
||||
|
@ -104,7 +103,7 @@ class Client(object):
|
|||
:return: Response to login request to the API.
|
||||
"""
|
||||
self.session = requests.Session()
|
||||
login = self.session.post(self.url+'login',
|
||||
login = self.session.post(self.url+'api/v2/auth/login',
|
||||
data={'username': username,
|
||||
'password': password})
|
||||
if login.text == 'Ok.':
|
||||
|
@ -116,7 +115,7 @@ class Client(object):
|
|||
"""
|
||||
Logout the current session.
|
||||
"""
|
||||
response = self._get('logout')
|
||||
response = self._get('api/v2/auth/logout')
|
||||
self._is_authenticated = False
|
||||
return response
|
||||
|
||||
|
@ -125,27 +124,20 @@ class Client(object):
|
|||
"""
|
||||
Get qBittorrent version.
|
||||
"""
|
||||
return self._get('version/qbittorrent')
|
||||
return self._get('api/v2/app/version')
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
"""
|
||||
Get WEB API version.
|
||||
"""
|
||||
return self._get('version/api')
|
||||
|
||||
@property
|
||||
def api_min_version(self):
|
||||
"""
|
||||
Get minimum WEB API version.
|
||||
"""
|
||||
return self._get('version/api_min')
|
||||
return self._get('api/v2/app/webapiVersion')
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown qBittorrent.
|
||||
"""
|
||||
return self._get('command/shutdown')
|
||||
return self._get('api/v2/app/shutdown')
|
||||
|
||||
def torrents(self, **filters):
|
||||
"""
|
||||
|
@ -157,6 +149,7 @@ class Client(object):
|
|||
:param reverse: Enable reverse sorting.
|
||||
:param limit: Limit the number of torrents returned.
|
||||
:param offset: Set offset (if less than 0, offset from end).
|
||||
:param hashes: Filter by hashes. Can contain multiple hashes separated by |.
|
||||
|
||||
:return: list() of torrent with matching filter.
|
||||
"""
|
||||
|
@ -166,7 +159,7 @@ class Client(object):
|
|||
name = 'filter' if name == 'status' else name
|
||||
params[name] = value
|
||||
|
||||
return self._get('query/torrents', params=params)
|
||||
return self._get('api/v2/torrents/info', params=params)
|
||||
|
||||
def get_torrent(self, infohash):
|
||||
"""
|
||||
|
@ -174,7 +167,7 @@ class Client(object):
|
|||
|
||||
:param infohash: INFO HASH of the torrent.
|
||||
"""
|
||||
return self._get('query/propertiesGeneral/' + infohash.lower())
|
||||
return self._get('api/v2/torrents/properties', params={'hash': infohash.lower()})
|
||||
|
||||
def get_torrent_trackers(self, infohash):
|
||||
"""
|
||||
|
@ -182,7 +175,7 @@ class Client(object):
|
|||
|
||||
:param infohash: INFO HASH of the torrent.
|
||||
"""
|
||||
return self._get('query/propertiesTrackers/' + infohash.lower())
|
||||
return self._get('api/v2/torrents/trackers', params={'hash': infohash.lower()})
|
||||
|
||||
def get_torrent_webseeds(self, infohash):
|
||||
"""
|
||||
|
@ -190,7 +183,7 @@ class Client(object):
|
|||
|
||||
:param infohash: INFO HASH of the torrent.
|
||||
"""
|
||||
return self._get('query/propertiesWebSeeds/' + infohash.lower())
|
||||
return self._get('api/v2/torrents/webseeds', params={'hash': infohash.lower()})
|
||||
|
||||
def get_torrent_files(self, infohash):
|
||||
"""
|
||||
|
@ -198,14 +191,14 @@ class Client(object):
|
|||
|
||||
:param infohash: INFO HASH of the torrent.
|
||||
"""
|
||||
return self._get('query/propertiesFiles/' + infohash.lower())
|
||||
return self._get('api/v2/torrents/files', params={'hash': infohash.lower()})
|
||||
|
||||
@property
|
||||
def global_transfer_info(self):
|
||||
"""
|
||||
Get JSON data of the global transfer info of qBittorrent.
|
||||
"""
|
||||
return self._get('query/transferInfo')
|
||||
return self._get('api/v2/transfer/info')
|
||||
|
||||
@property
|
||||
def preferences(self):
|
||||
|
@ -228,7 +221,7 @@ class Client(object):
|
|||
qb.preferences()
|
||||
|
||||
"""
|
||||
prefs = self._get('query/preferences')
|
||||
prefs = self._get('api/v2/app/preferences')
|
||||
|
||||
class Proxy(Client):
|
||||
"""
|
||||
|
@ -270,11 +263,11 @@ class Client(object):
|
|||
def sync(self, rid=0):
|
||||
"""
|
||||
Sync the torrents by supplied LAST RESPONSE ID.
|
||||
Read more @ http://git.io/vEgXr
|
||||
Read more @ https://git.io/fxgB8
|
||||
|
||||
:param rid: Response ID of last request.
|
||||
"""
|
||||
return self._get('sync/maindata', params={'rid': rid})
|
||||
return self._get('api/v2/sync/maindata', params={'rid': rid})
|
||||
|
||||
def download_from_link(self, link, **kwargs):
|
||||
"""
|
||||
|
@ -286,22 +279,20 @@ class Client(object):
|
|||
|
||||
:return: Empty JSON data.
|
||||
"""
|
||||
# old:new format
|
||||
old_arg_map = {'save_path': 'savepath'} # , 'label': 'category'}
|
||||
|
||||
# convert old option names to new option names
|
||||
options = kwargs.copy()
|
||||
for old_arg, new_arg in old_arg_map.items():
|
||||
if options.get(old_arg) and not options.get(new_arg):
|
||||
options[new_arg] = options[old_arg]
|
||||
|
||||
options['urls'] = link
|
||||
|
||||
# workaround to send multipart/formdata request
|
||||
# http://stackoverflow.com/a/23131823/4726598
|
||||
dummy_file = {'_dummy': (None, '_dummy')}
|
||||
|
||||
return self._post('command/download', data=options, files=dummy_file)
|
||||
# qBittorrent requires adds to be done with multipath/form-data
|
||||
# POST requests for both URLs and .torrent files. Info on this
|
||||
# can be found here, and here:
|
||||
# http://docs.python-requests.org/en/master/user/quickstart/#post-a-multipart-encoded-file
|
||||
# http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files
|
||||
if isinstance(link, list):
|
||||
links = '\n'.join(link)
|
||||
else:
|
||||
links = link
|
||||
torrent_data = {}
|
||||
torrent_data['urls'] = (None, links)
|
||||
for k, v in kwargs.iteritems():
|
||||
torrent_data[k] = (None, v)
|
||||
return self._post('api/v2/torrents/add', data=None, files=torrent_data)
|
||||
|
||||
def download_from_file(self, file_buffer, **kwargs):
|
||||
"""
|
||||
|
@ -313,18 +304,23 @@ class Client(object):
|
|||
|
||||
:return: Empty JSON data.
|
||||
"""
|
||||
# qBittorrent requires adds to be done with multipath/form-data
|
||||
# POST requests for both URLs and .torrent files. Info on this
|
||||
# can be found here, and here:
|
||||
# http://docs.python-requests.org/en/master/user/quickstart/#post-a-multipart-encoded-file
|
||||
# http://docs.python-requests.org/en/master/user/advanced/#post-multiple-multipart-encoded-files
|
||||
if isinstance(file_buffer, list):
|
||||
torrent_files = {}
|
||||
for i, f in enumerate(file_buffer):
|
||||
torrent_files.update({'torrents%s' % i: f})
|
||||
torrent_data = []
|
||||
for f in file_buffer:
|
||||
fname = f.name
|
||||
torrent_data.append(('torrents', (fname, f)))
|
||||
else:
|
||||
torrent_files = {'torrents': file_buffer}
|
||||
fname = file_buffer.name
|
||||
torrent_data = [('torrents', (fname, file_buffer))]
|
||||
for k, v in kwargs.iteritems():
|
||||
torrent_data.append((k, (None, v)))
|
||||
|
||||
data = kwargs.copy()
|
||||
|
||||
if data.get('save_path'):
|
||||
data.update({'savepath': data['save_path']})
|
||||
return self._post('command/upload', data=data, files=torrent_files)
|
||||
return self._post('api/v2/torrents/add', data=None, files=torrent_data)
|
||||
|
||||
def add_trackers(self, infohash, trackers):
|
||||
"""
|
||||
|
@ -335,7 +331,7 @@ class Client(object):
|
|||
"""
|
||||
data = {'hash': infohash.lower(),
|
||||
'urls': trackers}
|
||||
return self._post('command/addTrackers', data=data)
|
||||
return self._post('api/v2/torrents/addTrackers', data=data)
|
||||
|
||||
@staticmethod
|
||||
def _process_infohash_list(infohash_list):
|
||||
|
@ -356,13 +352,13 @@ class Client(object):
|
|||
|
||||
:param infohash: INFO HASH of torrent.
|
||||
"""
|
||||
return self._post('command/pause', data={'hash': infohash.lower()})
|
||||
return self._post('api/v2/torrents/pause', data={'hashes': infohash.lower()})
|
||||
|
||||
def pause_all(self):
|
||||
"""
|
||||
Pause all torrents.
|
||||
"""
|
||||
return self._get('command/pauseAll')
|
||||
return self._post('api/v2/torrents/pause', data={'hashes': 'all'})
|
||||
|
||||
def pause_multiple(self, infohash_list):
|
||||
"""
|
||||
|
@ -371,18 +367,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/pauseAll', data=data)
|
||||
|
||||
def set_label(self, infohash_list, label):
|
||||
"""
|
||||
Set the label on multiple torrents.
|
||||
IMPORTANT: OLD API method, kept as it is to avoid breaking stuffs.
|
||||
|
||||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
data['label'] = label
|
||||
return self._post('command/setLabel', data=data)
|
||||
return self._post('api/v2/torrents/pause', data=data)
|
||||
|
||||
def set_category(self, infohash_list, category):
|
||||
"""
|
||||
|
@ -392,7 +377,7 @@ class Client(object):
|
|||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
data['category'] = category
|
||||
return self._post('command/setCategory', data=data)
|
||||
return self._post('api/v2/torrents/setCategory', data=data)
|
||||
|
||||
def resume(self, infohash):
|
||||
"""
|
||||
|
@ -400,13 +385,13 @@ class Client(object):
|
|||
|
||||
:param infohash: INFO HASH of torrent.
|
||||
"""
|
||||
return self._post('command/resume', data={'hash': infohash.lower()})
|
||||
return self._post('api/v2/torrents/resume', data={'hashes': infohash.lower()})
|
||||
|
||||
def resume_all(self):
|
||||
"""
|
||||
Resume all torrents.
|
||||
"""
|
||||
return self._get('command/resumeAll')
|
||||
return self._post('api/v2/torrents/resume', data={'hashes': 'all'})
|
||||
|
||||
def resume_multiple(self, infohash_list):
|
||||
"""
|
||||
|
@ -415,7 +400,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/resumeAll', data=data)
|
||||
return self._post('api/v2/torrents/resume', data=data)
|
||||
|
||||
def delete(self, infohash_list):
|
||||
"""
|
||||
|
@ -424,16 +409,21 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/delete', data=data)
|
||||
data['deleteFiles'] = 'false'
|
||||
return self._post('api/v2/torrents/delete', data=data)
|
||||
|
||||
def delete_permanently(self, infohash_list):
|
||||
"""
|
||||
Permanently delete torrents.
|
||||
|
||||
*** WARNING : This will instruct qBittorrent to delete files
|
||||
*** from your hard disk. Use with caution.
|
||||
|
||||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/deletePerm', data=data)
|
||||
data['deleteFiles'] = 'true'
|
||||
return self._post('api/v2/torrents/delete', data=data)
|
||||
|
||||
def recheck(self, infohash_list):
|
||||
"""
|
||||
|
@ -442,7 +432,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/recheck', data=data)
|
||||
return self._post('api/v2/torrents/recheck', data=data)
|
||||
|
||||
def increase_priority(self, infohash_list):
|
||||
"""
|
||||
|
@ -451,7 +441,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/increasePrio', data=data)
|
||||
return self._post('api/v2/torrents/increasePrio', data=data)
|
||||
|
||||
def decrease_priority(self, infohash_list):
|
||||
"""
|
||||
|
@ -460,7 +450,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/decreasePrio', data=data)
|
||||
return self._post('api/v2/torrents/decreasePrio', data=data)
|
||||
|
||||
def set_max_priority(self, infohash_list):
|
||||
"""
|
||||
|
@ -469,7 +459,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/topPrio', data=data)
|
||||
return self._post('api/v2/torrents/topPrio', data=data)
|
||||
|
||||
def set_min_priority(self, infohash_list):
|
||||
"""
|
||||
|
@ -478,7 +468,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/bottomPrio', data=data)
|
||||
return self._post('api/v2/torrents/bottomPrio', data=data)
|
||||
|
||||
def set_file_priority(self, infohash, file_id, priority):
|
||||
"""
|
||||
|
@ -488,7 +478,7 @@ class Client(object):
|
|||
:param file_id: ID of the file to set priority.
|
||||
:param priority: Priority level of the file.
|
||||
"""
|
||||
if priority not in [0, 1, 2, 7]:
|
||||
if priority not in [0, 1, 6, 7]:
|
||||
raise ValueError("Invalid priority, refer WEB-UI docs for info.")
|
||||
elif not isinstance(file_id, int):
|
||||
raise TypeError("File ID must be an int")
|
||||
|
@ -497,7 +487,7 @@ class Client(object):
|
|||
'id': file_id,
|
||||
'priority': priority}
|
||||
|
||||
return self._post('command/setFilePrio', data=data)
|
||||
return self._post('api/v2/torrents/filePrio', data=data)
|
||||
|
||||
# Get-set global download and upload speed limits.
|
||||
|
||||
|
@ -505,7 +495,7 @@ class Client(object):
|
|||
"""
|
||||
Get global download speed limit.
|
||||
"""
|
||||
return self._get('command/getGlobalDlLimit')
|
||||
return self._get('api/v2/transfer/downloadLimit')
|
||||
|
||||
def set_global_download_limit(self, limit):
|
||||
"""
|
||||
|
@ -513,7 +503,7 @@ class Client(object):
|
|||
|
||||
:param limit: Speed limit in bytes.
|
||||
"""
|
||||
return self._post('command/setGlobalDlLimit', data={'limit': limit})
|
||||
return self._post('api/v2/transfer/setDownloadLimit', data={'limit': limit})
|
||||
|
||||
global_download_limit = property(get_global_download_limit,
|
||||
set_global_download_limit)
|
||||
|
@ -522,7 +512,7 @@ class Client(object):
|
|||
"""
|
||||
Get global upload speed limit.
|
||||
"""
|
||||
return self._get('command/getGlobalUpLimit')
|
||||
return self._get('api/v2/transfer/uploadLimit')
|
||||
|
||||
def set_global_upload_limit(self, limit):
|
||||
"""
|
||||
|
@ -530,7 +520,7 @@ class Client(object):
|
|||
|
||||
:param limit: Speed limit in bytes.
|
||||
"""
|
||||
return self._post('command/setGlobalUpLimit', data={'limit': limit})
|
||||
return self._post('api/v2/transfer/setUploadLimit', data={'limit': limit})
|
||||
|
||||
global_upload_limit = property(get_global_upload_limit,
|
||||
set_global_upload_limit)
|
||||
|
@ -543,7 +533,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/getTorrentsDlLimit', data=data)
|
||||
return self._post('api/v2/torrents/downloadLimit', data=data)
|
||||
|
||||
def set_torrent_download_limit(self, infohash_list, limit):
|
||||
"""
|
||||
|
@ -554,7 +544,7 @@ class Client(object):
|
|||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
data.update({'limit': limit})
|
||||
return self._post('command/setTorrentsDlLimit', data=data)
|
||||
return self._post('api/v2/torrents/setDownloadLimit', data=data)
|
||||
|
||||
def get_torrent_upload_limit(self, infohash_list):
|
||||
"""
|
||||
|
@ -563,7 +553,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/getTorrentsUpLimit', data=data)
|
||||
return self._post('api/v2/torrents/uploadLimit', data=data)
|
||||
|
||||
def set_torrent_upload_limit(self, infohash_list, limit):
|
||||
"""
|
||||
|
@ -574,26 +564,26 @@ class Client(object):
|
|||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
data.update({'limit': limit})
|
||||
return self._post('command/setTorrentsUpLimit', data=data)
|
||||
return self._post('api/v2/torrents/setUploadLimit', data=data)
|
||||
|
||||
# setting preferences
|
||||
def set_preferences(self, **kwargs):
|
||||
"""
|
||||
Set preferences of qBittorrent.
|
||||
Read all possible preferences @ http://git.io/vEgDQ
|
||||
Read all possible preferences @ https://git.io/fx2Y9
|
||||
|
||||
:param kwargs: set preferences in kwargs form.
|
||||
"""
|
||||
json_data = "json={}".format(json.dumps(kwargs))
|
||||
headers = {'content-type': 'application/x-www-form-urlencoded'}
|
||||
return self._post('command/setPreferences', data=json_data,
|
||||
return self._post('api/v2/app/setPreferences', data=json_data,
|
||||
headers=headers)
|
||||
|
||||
def get_alternative_speed_status(self):
|
||||
"""
|
||||
Get Alternative speed limits. (1/0)
|
||||
"""
|
||||
return self._get('command/alternativeSpeedLimitsEnabled')
|
||||
return self._get('api/v2/transfer/speedLimitsMode')
|
||||
|
||||
alternative_speed_status = property(get_alternative_speed_status)
|
||||
|
||||
|
@ -601,7 +591,7 @@ class Client(object):
|
|||
"""
|
||||
Toggle alternative speed limits.
|
||||
"""
|
||||
return self._get('command/toggleAlternativeSpeedLimits')
|
||||
return self._get('api/v2/transfer/toggleSpeedLimitsMode')
|
||||
|
||||
def toggle_sequential_download(self, infohash_list):
|
||||
"""
|
||||
|
@ -610,7 +600,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/toggleSequentialDownload', data=data)
|
||||
return self._post('api/v2/torrents/toggleSequentialDownload', data=data)
|
||||
|
||||
def toggle_first_last_piece_priority(self, infohash_list):
|
||||
"""
|
||||
|
@ -619,7 +609,7 @@ class Client(object):
|
|||
:param infohash_list: Single or list() of infohashes.
|
||||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
return self._post('command/toggleFirstLastPiecePrio', data=data)
|
||||
return self._post('api/v2/torrents/toggleFirstLastPiecePrio', data=data)
|
||||
|
||||
def force_start(self, infohash_list, value=True):
|
||||
"""
|
||||
|
@ -630,4 +620,4 @@ class Client(object):
|
|||
"""
|
||||
data = self._process_infohash_list(infohash_list)
|
||||
data.update({'value': json.dumps(value)})
|
||||
return self._post('command/setForceStart', data=data)
|
||||
return self._post('api/v2/torrents/setForceStart', data=data)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -123,10 +123,13 @@ USE_WATCHDIR = False
|
|||
SNPOOL = None
|
||||
NZBPOOL = None
|
||||
SEARCHPOOL = None
|
||||
PPPOOL = None
|
||||
DDLPOOL = None
|
||||
SNATCHED_QUEUE = Queue.Queue()
|
||||
NZB_QUEUE = Queue.Queue()
|
||||
PP_QUEUE = Queue.Queue()
|
||||
SEARCH_QUEUE = Queue.Queue()
|
||||
DDL_QUEUE = Queue.Queue()
|
||||
SEARCH_TIER_DATE = None
|
||||
COMICSORT = None
|
||||
PULLBYFILE = False
|
||||
|
@ -142,6 +145,7 @@ LOCAL_IP = None
|
|||
DOWNLOAD_APIKEY = None
|
||||
APILOCK = False
|
||||
SEARCHLOCK = False
|
||||
DDL_LOCK = False
|
||||
CMTAGGER_PATH = None
|
||||
STATIC_COMICRN_VERSION = "1.01"
|
||||
STATIC_APC_VERSION = "2.04"
|
||||
|
@ -162,11 +166,11 @@ def initialize(config_file):
|
|||
with INIT_LOCK:
|
||||
|
||||
global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \
|
||||
DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
|
||||
DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, DDL_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
|
||||
IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, WWT_CF_COOKIEVALUE, \
|
||||
USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \
|
||||
PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \
|
||||
MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, LOG_LEVEL, \
|
||||
MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, DDL_LOCK, LOG_LEVEL, \
|
||||
SCHED_RSS_LAST, SCHED_WEEKLY_LAST, SCHED_MONITOR_LAST, SCHED_SEARCH_LAST, SCHED_VERSION_LAST, SCHED_DBUPDATE_LAST, COMICINFO, SEARCH_TIER_DATE
|
||||
|
||||
cc = mylar.config.Config(config_file)
|
||||
|
@ -367,6 +371,9 @@ def start():
|
|||
search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60) - (duration_diff*60)))
|
||||
logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL))
|
||||
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
else:
|
||||
ss = searchit.CurrentSearcher()
|
||||
SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=None, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
|
||||
|
||||
if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]):
|
||||
logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
|
||||
|
@ -390,12 +397,18 @@ def start():
|
|||
SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE")
|
||||
SEARCHPOOL.start()
|
||||
|
||||
if all([CONFIG.POST_PROCESSING is True, CONFIG.API_ENABLED is True]):
|
||||
if CONFIG.POST_PROCESSING is True:
|
||||
logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....')
|
||||
PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE")
|
||||
PPPOOL.start()
|
||||
logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....')
|
||||
|
||||
if CONFIG.ENABLE_DDL is True:
|
||||
logger.info('[DDL-QUEUE] DDL Download queue enabled & monitoring for requests....')
|
||||
DDLPOOL = threading.Thread(target=helpers.ddl_downloader, args=(DDL_QUEUE,), name="DDL-QUEUE")
|
||||
DDLPOOL.start()
|
||||
logger.info('[DDL-QUEUE] Succesfully started DDL Download Queuer....')
|
||||
|
||||
helpers.latestdate_fix()
|
||||
|
||||
if CONFIG.ALT_PULL == 2:
|
||||
|
@ -491,7 +504,7 @@ def dbcheck():
|
|||
c.execute('SELECT ReleaseDate from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
try:
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
|
||||
c.execute('INSERT INTO storyarcs(StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, ReleaseDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual) SELECT StoryArcID, ComicName, IssueNumber, SeriesYear, IssueYEAR, StoryArc, TotalIssues, Status, inCacheDir, Location, IssueArcID, ReadingOrder, IssueID, ComicID, StoreDate, IssueDate, Publisher, IssuePublisher, IssueName, CV_ArcID, Int_IssueNumber, DynamicComicName, Volume, Manual FROM readinglist')
|
||||
c.execute('DROP TABLE readinglist')
|
||||
except sqlite3.OperationalError:
|
||||
|
@ -514,7 +527,8 @@ def dbcheck():
|
|||
c.execute('CREATE TABLE IF NOT EXISTS oneoffhistory (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, Status TEXT, weeknumber TEXT, year TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS jobhistory (JobName TEXT, prev_run_datetime timestamp, prev_run_timestamp REAL, next_run_datetime timestamp, next_run_timestamp REAL, last_run_completed TEXT, successful_completions TEXT, failed_completions TEXT, status TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS manualresults (provider TEXT, id TEXT, kind TEXT, comicname TEXT, volume TEXT, oneoff TEXT, fullprov TEXT, issuenumber TEXT, modcomicname TEXT, name TEXT, link TEXT, size TEXT, pack_numbers TEXT, pack_issuelist TEXT, comicyear TEXT, issuedate TEXT, tmpprov TEXT, pack TEXT, issueid TEXT, comicid TEXT, sarc TEXT, issuearcid TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS storyarcs(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, ReleaseDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT, CV_ArcID TEXT, Int_IssueNumber INT, DynamicComicName TEXT, Volume TEXT, Manual TEXT, DateAdded TEXT, DigitalDate TEXT, Type TEXT, Aliases TEXT)')
|
||||
c.execute('CREATE TABLE IF NOT EXISTS ddl_info (ID TEXT UNIQUE, series TEXT, year TEXT, filename TEXT, size TEXT, issueid TEXT, comicid TEXT, link TEXT, status TEXT, remote_filesize TEXT, updated_date TEXT, mainlink TEXT)')
|
||||
conn.commit
|
||||
c.close
|
||||
|
||||
|
@ -1024,6 +1038,16 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN DigitalDate TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT Type from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN Type TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT Aliases from storyarcs')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE storyarcs ADD COLUMN Aliases TEXT')
|
||||
|
||||
## -- searchresults Table --
|
||||
try:
|
||||
c.execute('SELECT SRID from searchresults')
|
||||
|
@ -1075,6 +1099,22 @@ def dbcheck():
|
|||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE jobhistory ADD COLUMN status TEXT')
|
||||
|
||||
## -- DDL_info Table --
|
||||
try:
|
||||
c.execute('SELECT remote_filesize from ddl_info')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE ddl_info ADD COLUMN remote_filesize TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT updated_date from ddl_info')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE ddl_info ADD COLUMN updated_date TEXT')
|
||||
|
||||
try:
|
||||
c.execute('SELECT mainlink from ddl_info')
|
||||
except sqlite3.OperationalError:
|
||||
c.execute('ALTER TABLE ddl_info ADD COLUMN mainlink TEXT')
|
||||
|
||||
#if it's prior to Wednesday, the issue counts will be inflated by one as the online db's everywhere
|
||||
#prepare for the next 'new' release of a series. It's caught in updater.py, so let's just store the
|
||||
#value in the sql so we can display it in the details screen for everyone to wonder at.
|
||||
|
@ -1223,6 +1263,29 @@ def halt():
|
|||
SEARCHPOOL.join(5)
|
||||
except AssertionError:
|
||||
os._exit(0)
|
||||
|
||||
if PPPOOL is not None:
|
||||
logger.info('Terminating the post-processing queue thread.')
|
||||
try:
|
||||
PPPOOL.join(10)
|
||||
logger.info('Joined pool for termination - successful')
|
||||
except KeyboardInterrupt:
|
||||
PP_QUEUE.put('exit')
|
||||
PPPOOL.join(5)
|
||||
except AssertionError:
|
||||
os._exit(0)
|
||||
|
||||
if DDLPOOL is not None:
|
||||
logger.info('Terminating the DDL download queue thread.')
|
||||
try:
|
||||
DDLPOOL.join(10)
|
||||
logger.info('Joined pool for termination - successful')
|
||||
except KeyboardInterrupt:
|
||||
DDL_QUEUE.put('exit')
|
||||
DDLPOOL.join(5)
|
||||
except AssertionError:
|
||||
os._exit(0)
|
||||
|
||||
_INITIALIZED = False
|
||||
|
||||
def shutdown(restart=False, update=False, maintenance=False):
|
||||
|
|
|
@ -358,6 +358,11 @@ class Api(object):
|
|||
else:
|
||||
comicid = kwargs['comicid']
|
||||
|
||||
if 'ddl' not in kwargs:
|
||||
ddl = False
|
||||
else:
|
||||
ddl = True
|
||||
|
||||
if 'apc_version' not in kwargs:
|
||||
logger.info('Received API Request for PostProcessing %s [%s]. Queueing...' % (self.nzb_name, self.nzb_folder))
|
||||
mylar.PP_QUEUE.put({'nzb_name': self.nzb_name,
|
||||
|
@ -365,7 +370,8 @@ class Api(object):
|
|||
'issueid': issueid,
|
||||
'failed': failed,
|
||||
'comicid': comicid,
|
||||
'apicall': True})
|
||||
'apicall': True,
|
||||
'ddl': ddl})
|
||||
self.data = 'Successfully submitted request for post-processing for %s' % self.nzb_name
|
||||
#fp = process.Process(self.nzb_name, self.nzb_folder, issueid=issueid, failed=failed, comicid=comicid, apicall=True)
|
||||
#self.data = fp.post_process()
|
||||
|
|
322
mylar/auth32p.py
322
mylar/auth32p.py
|
@ -1,3 +1,18 @@
|
|||
# This file is part of Mylar.
|
||||
#
|
||||
# Mylar is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Mylar is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import urllib2
|
||||
import json
|
||||
import re
|
||||
|
@ -13,7 +28,7 @@ import cfscrape
|
|||
from operator import itemgetter
|
||||
|
||||
import mylar
|
||||
from mylar import logger, filechecker, helpers
|
||||
from mylar import db, logger, filechecker, helpers
|
||||
|
||||
|
||||
class info32p(object):
|
||||
|
@ -35,7 +50,6 @@ class info32p(object):
|
|||
self.method = None
|
||||
|
||||
lses = self.LoginSession(mylar.CONFIG.USERNAME_32P, mylar.CONFIG.PASSWORD_32P)
|
||||
|
||||
if not lses.login():
|
||||
if not self.test:
|
||||
logger.error('%s [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.' % self.module)
|
||||
|
@ -49,6 +63,7 @@ class info32p(object):
|
|||
logger.fdebug('%s [LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...' % self.module)
|
||||
self.authkey = lses.authkey
|
||||
self.passkey = lses.passkey
|
||||
self.session = lses.ses
|
||||
self.uid = lses.uid
|
||||
try:
|
||||
mylar.INKDROPS_32P = int(math.floor(float(lses.inkdrops['results'][0]['inkdrops'])))
|
||||
|
@ -67,26 +82,26 @@ class info32p(object):
|
|||
feedinfo = []
|
||||
|
||||
try:
|
||||
with cfscrape.create_scraper() as s:
|
||||
s.headers = self.headers
|
||||
cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
|
||||
cj.load()
|
||||
s.cookies = cj
|
||||
# with cfscrape.create_scraper(delay=15) as s:
|
||||
# s.headers = self.headers
|
||||
# cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
|
||||
# cj.load()
|
||||
# s.cookies = cj
|
||||
|
||||
if mylar.CONFIG.VERIFY_32P == 1 or mylar.CONFIG.VERIFY_32P == True:
|
||||
verify = True
|
||||
else:
|
||||
verify = False
|
||||
|
||||
logger.fdebug('[32P] Verify SSL set to : %s' % verify)
|
||||
# logger.fdebug('[32P] Verify SSL set to : %s' % verify)
|
||||
|
||||
if not verify:
|
||||
#32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$
|
||||
# #32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displa$
|
||||
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
||||
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
||||
|
||||
# post to the login form
|
||||
r = s.post(self.url, verify=verify, allow_redirects=True)
|
||||
r = self.session.post(self.url, verify=verify, allow_redirects=True)
|
||||
|
||||
#logger.debug(self.module + " Content session reply" + r.text)
|
||||
|
||||
|
@ -246,147 +261,177 @@ class info32p(object):
|
|||
logger.warn('No results found for search on 32P.')
|
||||
return "no results"
|
||||
|
||||
with cfscrape.create_scraper() as s:
|
||||
s.headers = self.headers
|
||||
cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
|
||||
cj.load()
|
||||
s.cookies = cj
|
||||
data = []
|
||||
pdata = []
|
||||
pubmatch = False
|
||||
# with cfscrape.create_scraper(delay=15) as s:
|
||||
# s.headers = self.headers
|
||||
# cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
|
||||
# cj.load()
|
||||
# s.cookies = cj
|
||||
data = []
|
||||
pdata = []
|
||||
pubmatch = False
|
||||
|
||||
if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]):
|
||||
data.append({"id": torrentid,
|
||||
"series": series_search})
|
||||
else:
|
||||
if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]):
|
||||
if any([series_search.startswith('0-Day Comics Pack'), torrentid is not None]):
|
||||
data.append({"id": torrentid,
|
||||
"series": series_search})
|
||||
else:
|
||||
if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]):
|
||||
if mylar.CONFIG.SEARCH_32P is True:
|
||||
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
|
||||
params = {'action': 'serieslist', 'filter': series_search}
|
||||
time.sleep(1) #just to make sure we don't hammer, 1s pause.
|
||||
t = self.session.get(url, params=params, verify=True, allow_redirects=True)
|
||||
soup = BeautifulSoup(t.content, "html.parser")
|
||||
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
|
||||
|
||||
for r in results:
|
||||
if mylar.CONFIG.SEARCH_32P is True:
|
||||
url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
|
||||
params = {'action': 'serieslist', 'filter': series_search}
|
||||
time.sleep(1) #just to make sure we don't hammer, 1s pause.
|
||||
t = s.get(url, params=params, verify=True, allow_redirects=True)
|
||||
soup = BeautifulSoup(t.content, "html.parser")
|
||||
results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})
|
||||
torrentid = r['data-id']
|
||||
torrentname = r.findNext(text=True)
|
||||
torrentname = torrentname.strip()
|
||||
else:
|
||||
torrentid = r['id']
|
||||
torrentname = r['series']
|
||||
|
||||
for r in results:
|
||||
if mylar.CONFIG.SEARCH_32P is True:
|
||||
torrentid = r['data-id']
|
||||
torrentname = r.findNext(text=True)
|
||||
torrentname = torrentname.strip()
|
||||
else:
|
||||
torrentid = r['id']
|
||||
torrentname = r['series']
|
||||
|
||||
as_d = filechecker.FileChecker()
|
||||
as_dinfo = as_d.dynamic_replace(torrentname)
|
||||
seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
|
||||
logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search))
|
||||
if seriesresult.lower() == mod_series.lower():
|
||||
as_d = filechecker.FileChecker()
|
||||
as_dinfo = as_d.dynamic_replace(torrentname)
|
||||
seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
|
||||
logger.fdebug('searchresult: %s --- %s [%s]' % (seriesresult, mod_series, publisher_search))
|
||||
if seriesresult.lower() == mod_series.lower():
|
||||
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
|
||||
data.append({"id": torrentid,
|
||||
"series": torrentname})
|
||||
elif publisher_search.lower() in seriesresult.lower():
|
||||
logger.fdebug('[MATCH] Publisher match.')
|
||||
tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip()
|
||||
as_t = filechecker.FileChecker()
|
||||
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
|
||||
if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower():
|
||||
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
|
||||
data.append({"id": torrentid,
|
||||
"series": torrentname})
|
||||
elif publisher_search.lower() in seriesresult.lower():
|
||||
logger.fdebug('[MATCH] Publisher match.')
|
||||
tmp_torrentname = re.sub(publisher_search.lower(), '', seriesresult.lower()).strip()
|
||||
as_t = filechecker.FileChecker()
|
||||
as_tinfo = as_t.dynamic_replace(tmp_torrentname)
|
||||
if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series.lower():
|
||||
logger.fdebug('[MATCH] %s [%s]' % (torrentname, torrentid))
|
||||
pdata.append({"id": torrentid,
|
||||
"series": torrentname})
|
||||
pubmatch = True
|
||||
pdata.append({"id": torrentid,
|
||||
"series": torrentname})
|
||||
pubmatch = True
|
||||
|
||||
logger.fdebug('%s series listed for searching that match.' % len(data))
|
||||
else:
|
||||
logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id']))
|
||||
pdata.append({"id": chk_id['id'],
|
||||
"series": chk_id['series']})
|
||||
pubmatch = True
|
||||
|
||||
if all([len(data) == 0, len(pdata) == 0]):
|
||||
return "no results"
|
||||
logger.fdebug('%s series listed for searching that match.' % len(data))
|
||||
else:
|
||||
dataset = []
|
||||
if len(data) > 0:
|
||||
dataset += data
|
||||
if len(pdata) > 0:
|
||||
dataset += pdata
|
||||
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
|
||||
logger.fdebug('Exact series ID already discovered previously. Setting to : %s [%s]' % (chk_id['series'], chk_id['id']))
|
||||
pdata.append({"id": chk_id['id'],
|
||||
"series": chk_id['series']})
|
||||
pubmatch = True
|
||||
|
||||
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']) and any([len(data) == 1, len(pdata) == 1]):
|
||||
#update the 32p_reference so we avoid doing a url lookup next time
|
||||
helpers.checkthe_id(comic_id, dataset)
|
||||
if all([len(data) == 0, len(pdata) == 0]):
|
||||
return "no results"
|
||||
else:
|
||||
dataset = []
|
||||
if len(data) > 0:
|
||||
dataset += data
|
||||
if len(pdata) > 0:
|
||||
dataset += pdata
|
||||
logger.fdebug(str(len(dataset)) + ' series match the tile being searched for on 32P...')
|
||||
|
||||
if all([chk_id is None, not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']) and any([len(data) == 1, len(pdata) == 1]):
|
||||
#update the 32p_reference so we avoid doing a url lookup next time
|
||||
helpers.checkthe_id(comic_id, dataset)
|
||||
else:
|
||||
if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']):
|
||||
pass
|
||||
else:
|
||||
if all([not series_search.startswith('0-Day Comics Pack'), self.searchterm['torrentid_32p'] is not None, self.searchterm['torrentid_32p'] != 'None']):
|
||||
pass
|
||||
else:
|
||||
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
|
||||
logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')
|
||||
|
||||
results32p = []
|
||||
resultlist = {}
|
||||
results32p = []
|
||||
resultlist = {}
|
||||
|
||||
for x in dataset:
|
||||
#for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st)
|
||||
payload = {"action": "groupsearch",
|
||||
"id": x['id'], #searchid,
|
||||
"issue": issue_search}
|
||||
#in order to match up against 0-day stuff, volume has to be none at this point
|
||||
#when doing other searches tho, this should be allowed to go through
|
||||
#if all([volume_search != 'None', volume_search is not None]):
|
||||
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
|
||||
if series_search.startswith('0-Day Comics Pack'):
|
||||
payload.update({"volume": volume_search})
|
||||
for x in dataset:
|
||||
#for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st)
|
||||
payload = {"action": "groupsearch",
|
||||
"id": x['id'], #searchid,
|
||||
"issue": issue_search}
|
||||
#in order to match up against 0-day stuff, volume has to be none at this point
|
||||
#when doing other searches tho, this should be allowed to go through
|
||||
#if all([volume_search != 'None', volume_search is not None]):
|
||||
# payload.update({'volume': re.sub('v', '', volume_search).strip()})
|
||||
if series_search.startswith('0-Day Comics Pack'):
|
||||
payload.update({"volume": volume_search})
|
||||
|
||||
payload = json.dumps(payload)
|
||||
payload = json.loads(payload)
|
||||
payload = json.dumps(payload)
|
||||
payload = json.loads(payload)
|
||||
|
||||
logger.fdebug('payload: %s' % payload)
|
||||
url = 'https://32pag.es/ajax.php'
|
||||
time.sleep(1) #just to make sure we don't hammer, 1s pause.
|
||||
try:
|
||||
d = s.get(url, params=payload, verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url))
|
||||
logger.fdebug('payload: %s' % payload)
|
||||
url = 'https://32pag.es/ajax.php'
|
||||
time.sleep(1) #just to make sure we don't hammer, 1s pause.
|
||||
try:
|
||||
d = self.session.get(url, params=payload, verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
logger.error('%s [%s] Could not POST URL %s' % (self.module, e, url))
|
||||
|
||||
try:
|
||||
searchResults = d.json()
|
||||
except:
|
||||
searchResults = d.text
|
||||
logger.debug('%s Search Result did not return valid JSON, falling back on text: %s' % (self.module, searchResults.text))
|
||||
return False
|
||||
try:
|
||||
searchResults = d.json()
|
||||
except Exception as e:
|
||||
searchResults = d.text
|
||||
logger.debug('[%s] %s Search Result did not return valid JSON, falling back on text: %s' % (e, self.module, searchResults.text))
|
||||
return False
|
||||
|
||||
if searchResults['status'] == 'success' and searchResults['count'] > 0:
|
||||
logger.fdebug('successfully retrieved %s search results' % searchResults['count'])
|
||||
for a in searchResults['details']:
|
||||
if series_search.startswith('0-Day Comics Pack'):
|
||||
title = series_search
|
||||
else:
|
||||
title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues']
|
||||
results32p.append({'link': a['id'],
|
||||
'title': title,
|
||||
'filesize': a['size'],
|
||||
'issues': a['issues'],
|
||||
'pack': a['pack'],
|
||||
'format': a['format'],
|
||||
'language': a['language'],
|
||||
'seeders': a['seeders'],
|
||||
'leechers': a['leechers'],
|
||||
'scanner': a['scanner'],
|
||||
'chkit': {'id': x['id'], 'series': x['series']},
|
||||
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'),
|
||||
'int_pubdate': float(a['upload_time'])})
|
||||
if searchResults['status'] == 'success' and searchResults['count'] > 0:
|
||||
logger.fdebug('successfully retrieved %s search results' % searchResults['count'])
|
||||
for a in searchResults['details']:
|
||||
if series_search.startswith('0-Day Comics Pack'):
|
||||
title = series_search
|
||||
else:
|
||||
title = self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues']
|
||||
results32p.append({'link': a['id'],
|
||||
'title': title,
|
||||
'filesize': a['size'],
|
||||
'issues': a['issues'],
|
||||
'pack': a['pack'],
|
||||
'format': a['format'],
|
||||
'language': a['language'],
|
||||
'seeders': a['seeders'],
|
||||
'leechers': a['leechers'],
|
||||
'scanner': a['scanner'],
|
||||
'chkit': {'id': x['id'], 'series': x['series']},
|
||||
'pubdate': datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'),
|
||||
'int_pubdate': float(a['upload_time'])})
|
||||
|
||||
else:
|
||||
logger.fdebug('32P did not return any valid search results.')
|
||||
|
||||
if len(results32p) > 0:
|
||||
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
|
||||
logger.debug('%s Resultslist: %s' % (self.module, resultlist))
|
||||
else:
|
||||
resultlist = 'no results'
|
||||
logger.fdebug('32P did not return any valid search results.')
|
||||
|
||||
if len(results32p) > 0:
|
||||
resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
|
||||
logger.debug('%s Resultslist: %s' % (self.module, resultlist))
|
||||
else:
|
||||
resultlist = 'no results'
|
||||
|
||||
return resultlist
|
||||
|
||||
def downloadfile(self, payload, filepath):
|
||||
url = 'https://32pag.es/torrents.php'
|
||||
try:
|
||||
r = self.session.get(url, params=payload, verify=True, stream=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
logger.error('%s [%s] Could not POST URL %s' % ('[32P-DOWNLOADER]', e, url))
|
||||
return False
|
||||
|
||||
if str(r.status_code) != '200':
|
||||
logger.warn('Unable to download torrent from 32P [Status Code returned: %s]' % r.status_code)
|
||||
if str(r.status_code) == '404' and site == '32P':
|
||||
logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.')
|
||||
helpers.delete_cache_entry(linkit)
|
||||
else:
|
||||
logger.info('content: %s' % r.content)
|
||||
return False
|
||||
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=1024):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
f.write(chunk)
|
||||
f.flush()
|
||||
|
||||
return True
|
||||
|
||||
def delete_cache_entry(self, id):
|
||||
myDB = db.DBConnection()
|
||||
myDB.action("DELETE FROM rssdb WHERE link=? AND Site='32P'", [id])
|
||||
|
||||
class LoginSession(object):
|
||||
def __init__(self, un, pw, session_path=None):
|
||||
'''
|
||||
|
@ -399,7 +444,7 @@ class info32p(object):
|
|||
'''
|
||||
self.module = '[32P-AUTHENTICATION]'
|
||||
try:
|
||||
self.ses = cfscrape.create_scraper()
|
||||
self.ses = cfscrape.create_scraper(delay=15)
|
||||
except Exception as e:
|
||||
logger.error('%s Can\'t create session with cfscrape' % self.module)
|
||||
|
||||
|
@ -466,7 +511,7 @@ class info32p(object):
|
|||
|
||||
if r.status_code != 200:
|
||||
if r.status_code == 302:
|
||||
newloc = r.headers.get('location', '')
|
||||
newloc = r.headers.get('Location', '')
|
||||
logger.warn('Got redirect from the POST-ajax action=login GET: %s' % newloc)
|
||||
self.error = {'status':'redirect-error', 'message':'got redirect from POST-ajax login action : ' + newloc}
|
||||
else:
|
||||
|
@ -614,16 +659,19 @@ class info32p(object):
|
|||
if (self.test_skey_valid()):
|
||||
logger.fdebug('%s Session key-based login was good.' % self.module)
|
||||
self.method = 'Session Cookie retrieved OK.'
|
||||
return True
|
||||
return {'ses': self.ses,
|
||||
'status': True}
|
||||
|
||||
if (self.test_login()):
|
||||
logger.fdebug('%s Credential-based login was good.' % self.module)
|
||||
self.method = 'Credential-based login OK.'
|
||||
return True
|
||||
return {'ses': self.ses,
|
||||
'status': True}
|
||||
|
||||
logger.warn('%s Both session key and credential-based logins failed.' % self.module)
|
||||
self.method = 'Both session key & credential login failed.'
|
||||
return False
|
||||
return {'ses': self.ses,
|
||||
'status': False}
|
||||
|
||||
|
||||
#if __name__ == '__main__':
|
||||
|
|
|
@ -58,8 +58,8 @@ def run(dirName, nzbName=None, issueid=None, comversion=None, manual=None, filen
|
|||
else:
|
||||
shutil.copy(filepath, new_filepath)
|
||||
filepath = new_filepath
|
||||
except:
|
||||
logger.warn(module + ' Unexpected Error: %s' % sys.exc_info()[0])
|
||||
except Exception as e:
|
||||
logger.warn('%s Unexpected Error: %s [%s]' % (module, sys.exc_info()[0], e))
|
||||
logger.warn(module + ' Unable to create temporary directory to perform meta-tagging. Processing without metatagging.')
|
||||
tidyup(og_filepath, new_filepath, new_folder, manualmeta)
|
||||
return "fail"
|
||||
|
|
|
@ -75,6 +75,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
|
|||
'ALTERNATE_LATEST_SERIES_COVERS': (bool, 'General', False),
|
||||
'SHOW_ICONS': (bool, 'General', False),
|
||||
'FORMAT_BOOKTYPE': (bool, 'General', False),
|
||||
'CLEANUP_CACHE': (bool, 'General', False),
|
||||
'SECURE_DIR': (str, 'General', None),
|
||||
|
||||
'RSS_CHECKINTERVAL': (int, 'Scheduler', 20),
|
||||
|
@ -210,6 +211,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
|
|||
'SAB_PRIORITY': (str, 'SABnzbd', "Default"),
|
||||
'SAB_TO_MYLAR': (bool, 'SABnzbd', False),
|
||||
'SAB_DIRECTORY': (str, 'SABnzbd', None),
|
||||
'SAB_VERSION': (str, 'SABnzbd', None),
|
||||
'SAB_CLIENT_POST_PROCESSING': (bool, 'SABnzbd', False), #0/False: ComicRN.py, #1/True: Completed Download Handling
|
||||
|
||||
'NZBGET_HOST': (str, 'NZBGet', None),
|
||||
|
@ -346,7 +348,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
|
|||
'QBITTORRENT_PASSWORD': (str, 'qBittorrent', None),
|
||||
'QBITTORRENT_LABEL': (str, 'qBittorrent', None),
|
||||
'QBITTORRENT_FOLDER': (str, 'qBittorrent', None),
|
||||
'QBITTORRENT_STARTONLOAD': (bool, 'qBittorrent', False),
|
||||
'QBITTORRENT_LOADACTION': (str, 'qBittorrent', 'default'), #default, force_start, paused
|
||||
|
||||
'OPDS_ENABLE': (bool, 'OPDS', False),
|
||||
'OPDS_AUTHENTICATION': (bool, 'OPDS', False),
|
||||
|
@ -791,6 +793,26 @@ class Config(object):
|
|||
logger.error('SECURE-DIR-MOVE] Unable to move cookies file into secure location. This is a fatal error.')
|
||||
sys.exit()
|
||||
|
||||
if self.CLEANUP_CACHE is True:
|
||||
logger.fdebug('[Cache Cleanup] Cache Cleanup initiated. Will delete items from cache that are no longer needed.')
|
||||
cache_types = ['*.nzb', '*.torrent', '*.zip', '*.html', 'mylar_*']
|
||||
cntr = 0
|
||||
for x in cache_types:
|
||||
for f in glob.glob(os.path.join(self.CACHE_DIR,x)):
|
||||
try:
|
||||
if os.path.isdir(f):
|
||||
shutil.rmtree(f)
|
||||
else:
|
||||
os.remove(f)
|
||||
except Exception as e:
|
||||
logger.warn('[ERROR] Unable to remove %s from cache. Could be a possible permissions issue ?' % f)
|
||||
cntr+=1
|
||||
|
||||
if cntr > 1:
|
||||
logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Cleaned %s items' % cntr)
|
||||
else:
|
||||
logger.fdebug('[Cache Cleanup] Cache Cleanup finished. Nothing to clean!')
|
||||
|
||||
if all([self.GRABBAG_DIR is None, self.DESTINATION_DIR is not None]):
|
||||
self.GRABBAG_DIR = os.path.join(self.DESTINATION_DIR, 'Grabbag')
|
||||
logger.fdebug('[Grabbag Directory] Setting One-Off directory to default location: %s' % self.GRABBAG_DIR)
|
||||
|
@ -816,7 +838,6 @@ class Config(object):
|
|||
mylar.RSS_STATUS = 'Waiting'
|
||||
elif self.ENABLE_RSS is False and mylar.RSS_STATUS == 'Waiting':
|
||||
mylar.RSS_STATUS = 'Paused'
|
||||
logger.info('self.enable_rss is %s [%s]' % (self.ENABLE_RSS, mylar.RSS_STATUS))
|
||||
|
||||
if not helpers.is_number(self.CHMOD_DIR):
|
||||
logger.fdebug("CHMOD Directory value is not a valid numeric - please correct. Defaulting to 0777")
|
||||
|
@ -864,8 +885,10 @@ class Config(object):
|
|||
else:
|
||||
logger.fdebug('Successfully created ComicTagger Settings location.')
|
||||
|
||||
if self.DDL_LOCATION is None:
|
||||
if not self.DDL_LOCATION:
|
||||
self.DDL_LOCATION = self.CACHE_DIR
|
||||
if self.ENABLE_DDL is True:
|
||||
logger.info('Setting DDL Location set to : %s' % self.DDL_LOCATION)
|
||||
|
||||
if self.MODE_32P is False and self.RSSFEED_32P is not None:
|
||||
mylar.KEYS_32P = self.parse_32pfeed(self.RSSFEED_32P)
|
||||
|
@ -896,6 +919,12 @@ class Config(object):
|
|||
elif self.SAB_PRIORITY == "4": self.SAB_PRIORITY = "Paused"
|
||||
else: self.SAB_PRIORITY = "Default"
|
||||
|
||||
if self.SAB_VERSION is not None:
|
||||
config.set('SABnzbd', 'sab_version', self.SAB_VERSION)
|
||||
if int(re.sub("[^0-9]", '', self.SAB_VERSION).strip()) < int(re.sub("[^0-9]", '', '0.8.0').strip()) and self.SAB_CLIENT_POST_PROCESSING is True:
|
||||
logger.warn('Your SABnzbd client is less than 0.8.0, and does not support Completed Download Handling which is enabled. Disabling CDH.')
|
||||
self.SAB_CLIENT_POST_PROCESSING = False
|
||||
|
||||
mylar.USE_WATCHDIR = False
|
||||
mylar.USE_UTORRENT = False
|
||||
mylar.USE_RTORRENT = False
|
||||
|
|
212
mylar/cv.py
212
mylar/cv.py
|
@ -72,7 +72,7 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
|
|||
elif type == 'storyarc':
|
||||
PULLURL = mylar.CVURL + 'story_arcs/?api_key=' + str(comicapi) + '&format=xml&filter=name:' + str(issueid) + '&field_list=cover_date'
|
||||
elif type == 'comicyears':
|
||||
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck&offset=' + str(offset)
|
||||
PULLURL = mylar.CVURL + 'volumes/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + str(comicidlist) + '&field_list=name,id,start_year,publisher,description,deck,aliases&offset=' + str(offset)
|
||||
elif type == 'import':
|
||||
PULLURL = mylar.CVURL + 'issues/?api_key=' + str(comicapi) + '&format=xml&filter=id:' + (comicidlist) + '&field_list=cover_date,id,issue_number,name,date_last_updated,store_date,volume' + '&offset=' + str(offset)
|
||||
elif type == 'update_dates':
|
||||
|
@ -271,7 +271,7 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
comic['ComicYear'] = '0000'
|
||||
|
||||
#safety check, cause you known, dufus'...
|
||||
if comic['ComicYear'][-1:] == '-':
|
||||
if any([comic['ComicYear'][-1:] == '-', comic['ComicYear'][-1:] == '?']):
|
||||
comic['ComicYear'] = comic['ComicYear'][:-1]
|
||||
|
||||
try:
|
||||
|
@ -340,10 +340,10 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
comic['Type'] = 'TPB'
|
||||
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
|
||||
comic['Type'] = 'HC'
|
||||
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and 'can be found' not in comic_desc.lower():
|
||||
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
|
||||
i = 0
|
||||
comic['Type'] = 'One-Shot'
|
||||
avoidwords = ['preceding', 'after the special']
|
||||
avoidwords = ['preceding', 'after the special', 'following the']
|
||||
while i < 2:
|
||||
if i == 0:
|
||||
cbd = 'one-shot'
|
||||
|
@ -374,16 +374,22 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
#if it's point form bullets, ignore it cause it's not the current volume stuff.
|
||||
test_it = desc_soup.find('ul')
|
||||
if test_it:
|
||||
for x in test_it.findAll('a'):
|
||||
micdrop.append(x['data-ref-id'])
|
||||
for x in test_it.findAll('li'):
|
||||
if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]):
|
||||
mic_check = x.find('a')
|
||||
micdrop.append(mic_check['data-ref-id'])
|
||||
|
||||
for fc in desclinks:
|
||||
#logger.info('fc: %s' % fc)
|
||||
fc_id = fc['data-ref-id']
|
||||
#logger.info('fc_id: %s' % fc_id)
|
||||
try:
|
||||
fc_id = fc['data-ref-id']
|
||||
except:
|
||||
continue
|
||||
|
||||
if fc_id in micdrop:
|
||||
continue
|
||||
|
||||
fc_name = fc.findNext(text=True)
|
||||
|
||||
if fc_id.startswith('4000'):
|
||||
fc_cid = None
|
||||
fc_isid = fc_id
|
||||
|
@ -394,17 +400,24 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
fc_cid = fc_id
|
||||
fc_isid = None
|
||||
issuerun = fc.next_sibling
|
||||
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
|
||||
if len(lines) > 0:
|
||||
for x in sorted(lines, reverse=True):
|
||||
srchline = issuerun.rfind(x)
|
||||
if srchline != -1:
|
||||
try:
|
||||
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
|
||||
issuerun = issuerun[:srchline+len(x)]
|
||||
break
|
||||
except:
|
||||
continue
|
||||
if issuerun is not None:
|
||||
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
|
||||
if len(lines) > 0:
|
||||
for x in sorted(lines, reverse=True):
|
||||
srchline = issuerun.rfind(x)
|
||||
if srchline != -1:
|
||||
try:
|
||||
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
|
||||
issuerun = issuerun[:srchline+len(x)]
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warn('[ERROR] %s' % e)
|
||||
continue
|
||||
else:
|
||||
iss_start = fc_name.find('#')
|
||||
issuerun = fc_name[iss_start:].strip()
|
||||
fc_name = fc_name[:iss_start].strip()
|
||||
|
||||
if issuerun.endswith('.') or issuerun.endswith(','):
|
||||
#logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1]))
|
||||
issuerun = issuerun[:-1]
|
||||
|
@ -412,7 +425,8 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
issuerun = issuerun[:-4].strip()
|
||||
elif issuerun.endswith(' and'):
|
||||
issuerun = issuerun[:-3].strip()
|
||||
|
||||
else:
|
||||
continue
|
||||
# except:
|
||||
# pass
|
||||
issue_list.append({'series': fc_name,
|
||||
|
@ -422,7 +436,10 @@ def GetComicInfo(comicid, dom, safechk=None):
|
|||
#first_collect = cis
|
||||
|
||||
logger.info('Collected issues in volume: %s' % issue_list)
|
||||
comic['Issue_List'] = issue_list
|
||||
if len(issue_list) == 0:
|
||||
comic['Issue_List'] = 'None'
|
||||
else:
|
||||
comic['Issue_List'] = issue_list
|
||||
else:
|
||||
comic['Issue_List'] = 'None'
|
||||
|
||||
|
@ -708,11 +725,12 @@ def GetSeriesYears(dom):
|
|||
tempseries['SeriesYear'] = tempseries['SeriesYear'][:-1]
|
||||
|
||||
desdeck = 0
|
||||
tempseries['Volume'] = 'None'
|
||||
|
||||
#the description field actually holds the Volume# - so let's grab it
|
||||
desc_soup = None
|
||||
try:
|
||||
descchunk = dm.getElementsByTagName('description')[0].firstChild.wholeText
|
||||
desc_soup = Soup(descchunk, "html.parser")
|
||||
desclinks = desc_soup.findAll('a')
|
||||
comic_desc = drophtml(descchunk)
|
||||
desdeck +=1
|
||||
except:
|
||||
|
@ -726,6 +744,139 @@ def GetSeriesYears(dom):
|
|||
except:
|
||||
comic_deck = 'None'
|
||||
|
||||
#comic['ComicDescription'] = comic_desc
|
||||
|
||||
try:
|
||||
tempseries['Aliases'] = dm.getElementsByTagName('aliases')[0].firstChild.wholeText
|
||||
tempseries['Aliases'] = re.sub('\n', '##', tempseries['Aliases']).strip()
|
||||
if tempseries['Aliases'][-2:] == '##':
|
||||
tempseries['Aliases'] = tempseries['Aliases'][:-2]
|
||||
#logger.fdebug('Aliases: ' + str(aliases))
|
||||
except:
|
||||
tempseries['Aliases'] = 'None'
|
||||
|
||||
tempseries['Volume'] = 'None' #noversion'
|
||||
|
||||
#figure out if it's a print / digital edition.
|
||||
tempseries['Type'] = 'None'
|
||||
if comic_deck != 'None':
|
||||
if any(['print' in comic_deck.lower(), 'digital' in comic_deck.lower(), 'paperback' in comic_deck.lower(), 'one shot' in re.sub('-', '', comic_deck.lower()).strip(), 'hardcover' in comic_deck.lower()]):
|
||||
if 'print' in comic_deck.lower():
|
||||
tempseries['Type'] = 'Print'
|
||||
elif 'digital' in comic_deck.lower():
|
||||
tempseries['Type'] = 'Digital'
|
||||
elif 'paperback' in comic_deck.lower():
|
||||
tempseries['Type'] = 'TPB'
|
||||
elif 'hardcover' in comic_deck.lower():
|
||||
tempseries['Type'] = 'HC'
|
||||
elif 'oneshot' in re.sub('-', '', comic_deck.lower()).strip():
|
||||
tempseries['Type'] = 'One-Shot'
|
||||
|
||||
if comic_desc != 'None' and tempseries['Type'] == 'None':
|
||||
if 'print' in comic_desc[:60].lower() and 'print edition can be found' not in comic_desc.lower():
|
||||
tempseries['Type'] = 'Print'
|
||||
elif 'digital' in comic_desc[:60].lower() and 'digital edition can be found' not in comic_desc.lower():
|
||||
tempseries['Type'] = 'Digital'
|
||||
elif all(['paperback' in comic_desc[:60].lower(), 'paperback can be found' not in comic_desc.lower()]) or 'collects' in comic_desc[:60].lower():
|
||||
tempseries['Type'] = 'TPB'
|
||||
elif 'hardcover' in comic_desc[:60].lower() and 'hardcover can be found' not in comic_desc.lower():
|
||||
tempseries['Type'] = 'HC'
|
||||
elif any(['one-shot' in comic_desc[:60].lower(), 'one shot' in comic_desc[:60].lower()]) and any(['can be found' not in comic_desc.lower(), 'following the' not in comic_desc.lower()]):
|
||||
i = 0
|
||||
tempseries['Type'] = 'One-Shot'
|
||||
avoidwords = ['preceding', 'after the special', 'following the']
|
||||
while i < 2:
|
||||
if i == 0:
|
||||
cbd = 'one-shot'
|
||||
elif i == 1:
|
||||
cbd = 'one shot'
|
||||
tmp1 = comic_desc[:60].lower().find(cbd)
|
||||
if tmp1 != -1:
|
||||
for x in avoidwords:
|
||||
tmp2 = comic_desc[:tmp1].lower().find(x)
|
||||
if tmp2 != -1:
|
||||
logger.fdebug('FAKE NEWS: caught incorrect reference to one-shot. Forcing to Print')
|
||||
tempseries['Type'] = 'Print'
|
||||
i = 3
|
||||
break
|
||||
i+=1
|
||||
else:
|
||||
tempseries['Type'] = 'Print'
|
||||
|
||||
if all([comic_desc != 'None', 'trade paperback' in comic_desc[:30].lower(), 'collecting' in comic_desc[:40].lower()]):
|
||||
#ie. Trade paperback collecting Marvel Team-Up #9-11, 48-51, 72, 110 & 145.
|
||||
first_collect = comic_desc.lower().find('collecting')
|
||||
#logger.info('first_collect: %s' % first_collect)
|
||||
#logger.info('comic_desc: %s' % comic_desc)
|
||||
#logger.info('desclinks: %s' % desclinks)
|
||||
issue_list = []
|
||||
micdrop = []
|
||||
if desc_soup is not None:
|
||||
#if it's point form bullets, ignore it cause it's not the current volume stuff.
|
||||
test_it = desc_soup.find('ul')
|
||||
if test_it:
|
||||
for x in test_it.findAll('li'):
|
||||
if any(['Next' in x.findNext(text=True), 'Previous' in x.findNext(text=True)]):
|
||||
mic_check = x.find('a')
|
||||
micdrop.append(mic_check['data-ref-id'])
|
||||
|
||||
for fc in desclinks:
|
||||
#logger.info('fc: %s' % fc)
|
||||
fc_id = fc['data-ref-id']
|
||||
#logger.info('fc_id: %s' % fc_id)
|
||||
if fc_id in micdrop:
|
||||
continue
|
||||
fc_name = fc.findNext(text=True)
|
||||
if fc_id.startswith('4000'):
|
||||
fc_cid = None
|
||||
fc_isid = fc_id
|
||||
iss_start = fc_name.find('#')
|
||||
issuerun = fc_name[iss_start:].strip()
|
||||
fc_name = fc_name[:iss_start].strip()
|
||||
elif fc_id.startswith('4050'):
|
||||
fc_cid = fc_id
|
||||
fc_isid = None
|
||||
issuerun = fc.next_sibling
|
||||
if issuerun is not None:
|
||||
lines = re.sub("[^0-9]", ' ', issuerun).strip().split(' ')
|
||||
if len(lines) > 0:
|
||||
for x in sorted(lines, reverse=True):
|
||||
srchline = issuerun.rfind(x)
|
||||
if srchline != -1:
|
||||
try:
|
||||
if issuerun[srchline+len(x)] == ',' or issuerun[srchline+len(x)] == '.' or issuerun[srchline+len(x)] == ' ':
|
||||
issuerun = issuerun[:srchline+len(x)]
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warn('[ERROR] %s' % e)
|
||||
continue
|
||||
else:
|
||||
iss_start = fc_name.find('#')
|
||||
issuerun = fc_name[iss_start:].strip()
|
||||
fc_name = fc_name[:iss_start].strip()
|
||||
|
||||
if issuerun.endswith('.') or issuerun.endswith(','):
|
||||
#logger.fdebug('Changed issuerun from %s to %s' % (issuerun, issuerun[:-1]))
|
||||
issuerun = issuerun[:-1]
|
||||
if issuerun.endswith(' and '):
|
||||
issuerun = issuerun[:-4].strip()
|
||||
elif issuerun.endswith(' and'):
|
||||
issuerun = issuerun[:-3].strip()
|
||||
else:
|
||||
continue
|
||||
# except:
|
||||
# pass
|
||||
issue_list.append({'series': fc_name,
|
||||
'comicid': fc_cid,
|
||||
'issueid': fc_isid,
|
||||
'issues': issuerun})
|
||||
#first_collect = cis
|
||||
|
||||
logger.info('Collected issues in volume: %s' % issue_list)
|
||||
tempseries['Issue_List'] = issue_list
|
||||
else:
|
||||
tempseries['Issue_List'] = 'None'
|
||||
|
||||
while (desdeck > 0):
|
||||
if desdeck == 1:
|
||||
if comic_desc == 'None':
|
||||
|
@ -750,11 +901,11 @@ def GetSeriesYears(dom):
|
|||
if i == 0:
|
||||
vfind = comicDes[v_find:v_find +15] #if it's volume 5 format
|
||||
basenums = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
|
||||
logger.fdebug('volume X format - ' + str(i) + ': ' + vfind)
|
||||
logger.fdebug('volume X format - %s: %s' % (i, vfind))
|
||||
else:
|
||||
vfind = comicDes[:v_find] # if it's fifth volume format
|
||||
basenums = {'zero': '0', 'first': '1', 'second': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'nineth': '9', 'tenth': '10', 'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5'}
|
||||
logger.fdebug('X volume format - ' + str(i) + ': ' + vfind)
|
||||
logger.fdebug('X volume format - %s: %s' % (i, vfind))
|
||||
volconv = ''
|
||||
for nums in basenums:
|
||||
if nums in vfind.lower():
|
||||
|
@ -763,6 +914,7 @@ def GetSeriesYears(dom):
|
|||
break
|
||||
#logger.info('volconv: ' + str(volconv))
|
||||
|
||||
#now we attempt to find the character position after the word 'volume'
|
||||
if i == 0:
|
||||
volthis = vfind.lower().find('volume')
|
||||
volthis = volthis + 6 # add on the actual word to the position so that we can grab the subsequent digit
|
||||
|
@ -780,7 +932,7 @@ def GetSeriesYears(dom):
|
|||
ledigit = re.sub("[^0-9]", "", vf[0])
|
||||
if ledigit != '':
|
||||
tempseries['Volume'] = ledigit
|
||||
logger.fdebug("Volume information found! Adding to series record : volume " + tempseries['Volume'])
|
||||
logger.fdebug("Volume information found! Adding to series record : volume %s" % tempseries['Volume'])
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
@ -790,7 +942,7 @@ def GetSeriesYears(dom):
|
|||
i += 1
|
||||
|
||||
if tempseries['Volume'] == 'None':
|
||||
logger.fdebug('tempseries[Volume]:' + str(tempseries['Volume']))
|
||||
logger.fdebug('tempseries[Volume]: %s' % tempseries['Volume'])
|
||||
desdeck -= 1
|
||||
else:
|
||||
break
|
||||
|
@ -800,7 +952,9 @@ def GetSeriesYears(dom):
|
|||
"ComicName": tempseries['Series'],
|
||||
"SeriesYear": tempseries['SeriesYear'],
|
||||
"Publisher": tempseries['Publisher'],
|
||||
"Volume": tempseries['Volume']})
|
||||
"Volume": tempseries['Volume'],
|
||||
"Aliases": tempseries['Aliases'],
|
||||
"Type": tempseries['Type']})
|
||||
|
||||
return serieslist
|
||||
|
||||
|
|
|
@ -26,8 +26,7 @@ import time
|
|||
import Queue
|
||||
|
||||
import mylar
|
||||
|
||||
from mylar import logger
|
||||
import logger
|
||||
|
||||
db_lock = threading.Lock()
|
||||
mylarQueue = Queue.Queue()
|
||||
|
|
|
@ -50,7 +50,8 @@ class FileChecker(object):
|
|||
self.watchcomic = re.sub('\?', '', watchcomic).strip() #strip the ? sepearte since it affects the regex.
|
||||
self.watchcomic = re.sub(u'\u2014', ' - ', watchcomic).strip() #replace the \u2014 with a normal - because this world is f'd up enough to have something like that.
|
||||
self.watchcomic = re.sub(u'\u2013', ' - ', watchcomic).strip() #replace the \u2013 with a normal - because again, people are dumb.
|
||||
self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore')
|
||||
if type(self.watchcomic) != str:
|
||||
self.watchcomic = unicodedata.normalize('NFKD', self.watchcomic).encode('ASCII', 'ignore')
|
||||
else:
|
||||
self.watchcomic = None
|
||||
|
||||
|
@ -107,7 +108,6 @@ class FileChecker(object):
|
|||
self.AS_Alt = AS_Alternates['AS_Alt']
|
||||
self.AS_Tuple = AS_Alternates['AS_Tuple']
|
||||
|
||||
|
||||
def listFiles(self):
|
||||
comiclist = []
|
||||
watchmatch = {}
|
||||
|
@ -122,6 +122,7 @@ class FileChecker(object):
|
|||
'comiclocation': runresults['comiclocation'],
|
||||
'series_name': runresults['series_name'],
|
||||
'series_name_decoded': runresults['series_name_decoded'],
|
||||
'issueid': runresults['issueid'],
|
||||
'dynamic_name': runresults['dynamic_name'],
|
||||
'series_volume': runresults['series_volume'],
|
||||
'alt_series': runresults['alt_series'],
|
||||
|
@ -129,7 +130,8 @@ class FileChecker(object):
|
|||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'scangroup': runresults['scangroup'],
|
||||
'reading_order': runresults['reading_order']
|
||||
'reading_order': runresults['reading_order'],
|
||||
'booktype': runresults['booktype']
|
||||
}
|
||||
else:
|
||||
filelist = self.traverse_directories(self.dir)
|
||||
|
@ -159,6 +161,7 @@ class FileChecker(object):
|
|||
'comiclocation': runresults['comiclocation'],
|
||||
'series_name': runresults['series_name'],
|
||||
'series_name_decoded': runresults['series_name_decoded'],
|
||||
'issueid': runresults['issueid'],
|
||||
'alt_series': runresults['alt_series'],
|
||||
'alt_issue': runresults['alt_issue'],
|
||||
'dynamic_name': runresults['dynamic_name'],
|
||||
|
@ -166,7 +169,8 @@ class FileChecker(object):
|
|||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'scangroup': runresults['scangroup'],
|
||||
'reading_order': runresults['reading_order']
|
||||
'reading_order': runresults['reading_order'],
|
||||
'booktype': runresults['booktype']
|
||||
})
|
||||
else:
|
||||
comiclist.append({
|
||||
|
@ -179,7 +183,9 @@ class FileChecker(object):
|
|||
'IssueYear': runresults['issue_year'],
|
||||
'JusttheDigits': runresults['justthedigits'],
|
||||
'AnnualComicID': runresults['annual_comicid'],
|
||||
'scangroup': runresults['scangroup']
|
||||
'issueid': runresults['issueid'],
|
||||
'scangroup': runresults['scangroup'],
|
||||
'booktype': runresults['booktype']
|
||||
})
|
||||
comiccnt +=1
|
||||
else:
|
||||
|
@ -194,7 +200,9 @@ class FileChecker(object):
|
|||
'alt_issue': runresults['alt_issue'],
|
||||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'scangroup': runresults['scangroup']
|
||||
'issueid': runresults['issueid'],
|
||||
'scangroup': runresults['scangroup'],
|
||||
'booktype': runresults['booktype']
|
||||
})
|
||||
|
||||
watchmatch['comiccount'] = comiccnt
|
||||
|
@ -226,13 +234,12 @@ class FileChecker(object):
|
|||
ab = len(path)
|
||||
tmppath = subpath[ab:]
|
||||
else:
|
||||
tmppath = re.sub(path, '', subpath).strip()
|
||||
tmppath = subpath.replace(path, '').strip()
|
||||
|
||||
path_list = os.path.normpath(tmppath)
|
||||
if '/' == path_list[0] or '\\' == path_list[0]:
|
||||
#need to remove any leading slashes so the os join can properly join the components
|
||||
path_list = path_list[1:]
|
||||
#path_list = tmppath.split(os.sep)[-1]
|
||||
logger.fdebug('[SUB-PATH] subpath set to : ' + path_list)
|
||||
|
||||
|
||||
|
@ -283,6 +290,16 @@ class FileChecker(object):
|
|||
|
||||
modfilename = modfilename.replace('()','').strip()
|
||||
|
||||
issueid = None
|
||||
x = modfilename.find('[__')
|
||||
if x != -1:
|
||||
y = modfilename.find('__]', x)
|
||||
if y != -1:
|
||||
issueid = modfilename[x+3:y]
|
||||
logger.fdebug('issueid: %s' % issueid)
|
||||
modfilename = '%s %s'.strip() % (modfilename[:x], modfilename[y+3:])
|
||||
logger.fdebug('issueid %s removed successsfully: %s' % (issueid, modfilename))
|
||||
|
||||
#here we take a snapshot of the current modfilename, the intent is that we will remove characters that match
|
||||
#as we discover them - namely volume, issue #, years, etc
|
||||
#the remaining strings should be the series title and/or issue title if present (has to be detected properly)
|
||||
|
@ -390,7 +407,7 @@ class FileChecker(object):
|
|||
lastmod_position = 0
|
||||
booktype = 'issue'
|
||||
#exceptions that are considered alpha-numeric issue numbers
|
||||
exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU')
|
||||
exceptions = ('NOW', 'AI', 'AU', 'X', 'A', 'B', 'C', 'INH', 'MU', 'SUMMER', 'SPRING', 'FALL', 'WINTER')
|
||||
|
||||
#unicode characters, followed by int value
|
||||
# num_exceptions = [{iss:u'\xbd',val:.5},{iss:u'\xbc',val:.25}, {iss:u'\xe',val:.75}, {iss:u'\221e',val:'infinity'}]
|
||||
|
@ -444,7 +461,14 @@ class FileChecker(object):
|
|||
'position': split_file.index(sf),
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
'validcountchk': validcountchk})
|
||||
|
||||
else:
|
||||
test_position = modfilename[self.char_file_position(modfilename, sf,lastmod_position)-1]
|
||||
if test_position == '#':
|
||||
possible_issuenumbers.append({'number': sf,
|
||||
'position': split_file.index(sf),
|
||||
'mod_position': self.char_file_position(modfilename, sf, lastmod_position),
|
||||
'validcountchk': validcountchk})
|
||||
|
||||
if sf == 'XCV':
|
||||
# new 2016-09-19 \ attempt to check for XCV which replaces any unicode above
|
||||
for x in list(wrds):
|
||||
|
@ -1052,6 +1076,7 @@ class FileChecker(object):
|
|||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'issueid': issueid,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'dynamic_name': dreplace,
|
||||
|
@ -1061,6 +1086,7 @@ class FileChecker(object):
|
|||
'issue_year': issue_year,
|
||||
'annual_comicid': None,
|
||||
'scangroup': scangroup,
|
||||
'booktype': booktype,
|
||||
'reading_order': None}
|
||||
|
||||
if self.justparse:
|
||||
|
@ -1071,6 +1097,7 @@ class FileChecker(object):
|
|||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'issueid': issueid,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'dynamic_name': self.dynamic_replace(series_name)['mod_seriesname'],
|
||||
|
@ -1078,6 +1105,7 @@ class FileChecker(object):
|
|||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup,
|
||||
'booktype': booktype,
|
||||
'reading_order': reading_order}
|
||||
|
||||
series_info = {}
|
||||
|
@ -1087,12 +1115,14 @@ class FileChecker(object):
|
|||
'comiclocation': self.dir,
|
||||
'series_name': series_name,
|
||||
'series_name_decoded': series_name_decoded,
|
||||
'issueid': issueid,
|
||||
'alt_series': alt_series,
|
||||
'alt_issue': alt_issue,
|
||||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup}
|
||||
'scangroup': scangroup,
|
||||
'booktype': booktype}
|
||||
|
||||
return self.matchIT(series_info)
|
||||
|
||||
|
@ -1252,9 +1282,11 @@ class FileChecker(object):
|
|||
'alt_series': series_info['alt_series'],
|
||||
'alt_issue': series_info['alt_issue'],
|
||||
'issue_year': series_info['issue_year'],
|
||||
'issueid': series_info['issueid'],
|
||||
'justthedigits': justthedigits,
|
||||
'annual_comicid': annual_comicid,
|
||||
'scangroup': series_info['scangroup']}
|
||||
'scangroup': series_info['scangroup'],
|
||||
'booktype': series_info['booktype']}
|
||||
|
||||
else:
|
||||
#logger.fdebug('[NO MATCH] ' + filename + ' [WATCHLIST:' + self.watchcomic + ']')
|
||||
|
@ -1263,12 +1295,14 @@ class FileChecker(object):
|
|||
'sub': series_info['sub'],
|
||||
'comiclocation': series_info['comiclocation'],
|
||||
'series_name': series_info['series_name'],
|
||||
'alt_series': series_info['alt_series'],
|
||||
'alt_issue': series_info['alt_issue'],
|
||||
'alt_series': series_info['alt_series'],
|
||||
'alt_issue': series_info['alt_issue'],
|
||||
'issue_number': series_info['issue_number'],
|
||||
'series_volume': series_info['series_volume'],
|
||||
'issue_year': series_info['issue_year'],
|
||||
'scangroup': series_info['scangroup']}
|
||||
'issueid': series_info['issueid'],
|
||||
'scangroup': series_info['scangroup'],
|
||||
'booktype': series_info['booktype']}
|
||||
|
||||
|
||||
def char_file_position(self, file, findchar, lastpos):
|
||||
|
|
|
@ -10,7 +10,7 @@ import mylar
|
|||
import unicodedata
|
||||
import urllib
|
||||
|
||||
def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
||||
def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix, booktype=None):
|
||||
cName = searchName
|
||||
|
||||
#clean up searchName due to webparse/redudant naming that would return too specific of results.
|
||||
|
@ -39,7 +39,12 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
encodeSearch = urllib.quote_plus(searchName)
|
||||
splitSearch = encodeSearch.split(" ")
|
||||
|
||||
if len(searchIssue) == 1:
|
||||
tmpsearchIssue = searchIssue
|
||||
|
||||
if any([booktype == 'One-Shot', booktype == 'TPB']):
|
||||
tmpsearchIssue = '1'
|
||||
loop = 4
|
||||
elif len(searchIssue) == 1:
|
||||
loop = 3
|
||||
elif len(searchIssue) == 2:
|
||||
loop = 2
|
||||
|
@ -71,17 +76,24 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
|
|||
i = 1
|
||||
while (i <= loop):
|
||||
if i == 1:
|
||||
searchmethod = searchIssue
|
||||
searchmethod = tmpsearchIssue
|
||||
elif i == 2:
|
||||
searchmethod = '0' + searchIssue
|
||||
searchmethod = '0' + tmpsearchIssue
|
||||
elif i == 3:
|
||||
searchmethod = '00' + searchIssue
|
||||
searchmethod = '00' + tmpsearchIssue
|
||||
elif i == 4:
|
||||
searchmethod = tmpsearchIssue
|
||||
else:
|
||||
break
|
||||
|
||||
joinSearch = "+".join(splitSearch) + "+" +searchmethod
|
||||
if i == 4:
|
||||
logger.fdebug('Now searching experimental for %s to try and ensure all the bases are covered' % cName)
|
||||
joinSearch = "+".join(splitSearch)
|
||||
else:
|
||||
logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod)
|
||||
joinSearch = "+".join(splitSearch) + "+" +searchmethod
|
||||
|
||||
|
||||
logger.fdebug('Now searching experimental for issue number: %s to try and ensure all the bases are covered' % searchmethod)
|
||||
|
||||
if mylar.CONFIG.PREFERRED_QUALITY == 1: joinSearch = joinSearch + " .cbr"
|
||||
elif mylar.CONFIG.PREFERRED_QUALITY == 2: joinSearch = joinSearch + " .cbz"
|
||||
|
|
|
@ -0,0 +1,354 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Mylar.
|
||||
#
|
||||
# Mylar is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Mylar is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from StringIO import StringIO
|
||||
import urllib
|
||||
from threading import Thread
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import gzip
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from bs4 import BeautifulSoup
|
||||
import requests
|
||||
import cfscrape
|
||||
import zipfile
|
||||
import logger
|
||||
import mylar
|
||||
from mylar import db
|
||||
|
||||
class GC(object):
|
||||
|
||||
def __init__(self, query=None, issueid=None, comicid=None):
|
||||
|
||||
self.valreturn = []
|
||||
|
||||
self.url = 'https://getcomics.info'
|
||||
|
||||
self.query = query
|
||||
|
||||
self.comicid = comicid
|
||||
|
||||
self.issueid = issueid
|
||||
|
||||
self.local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, "getcomics.html")
|
||||
|
||||
self.headers = {'Accept-encoding': 'gzip', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Referer': 'https://getcomics.info/'}
|
||||
|
||||
def search(self):
|
||||
|
||||
with cfscrape.create_scraper() as s:
|
||||
cf_cookievalue, cf_user_agent = s.get_tokens(self.url, headers=self.headers)
|
||||
|
||||
t = s.get(self.url+'/', params={'s': self.query}, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True)
|
||||
|
||||
with open(self.local_filename, 'wb') as f:
|
||||
for chunk in t.iter_content(chunk_size=1024):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
f.write(chunk)
|
||||
f.flush()
|
||||
|
||||
return self.search_results()
|
||||
|
||||
def loadsite(self, id, link):
|
||||
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
|
||||
with cfscrape.create_scraper() as s:
|
||||
self.cf_cookievalue, cf_user_agent = s.get_tokens(link, headers=self.headers)
|
||||
|
||||
t = s.get(link, verify=True, cookies=self.cf_cookievalue, headers=self.headers, stream=True)
|
||||
|
||||
with open(title+'.html', 'wb') as f:
|
||||
for chunk in t.iter_content(chunk_size=1024):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
f.write(chunk)
|
||||
f.flush()
|
||||
|
||||
def search_results(self):
|
||||
results = {}
|
||||
resultlist = []
|
||||
soup = BeautifulSoup(open(self.local_filename), 'html.parser')
|
||||
|
||||
resultline = soup.find("span", {"class": "cover-article-count"}).get_text(strip=True)
|
||||
logger.info('There are %s results' % re.sub('Articles', '', resultline).strip())
|
||||
|
||||
for f in soup.findAll("article"):
|
||||
id = f['id']
|
||||
lk = f.find('a')
|
||||
link = lk['href']
|
||||
titlefind = f.find("h1", {"class": "post-title"})
|
||||
title = titlefind.get_text(strip=True)
|
||||
title = re.sub(u'\u2013', '-', title).strip()
|
||||
filename = title
|
||||
issues = None
|
||||
pack = False
|
||||
#see if it's a pack type
|
||||
issfind_st = title.find('#')
|
||||
issfind_en = title.find('-', issfind_st)
|
||||
if issfind_en != -1:
|
||||
if all([title[issfind_en+1] == ' ', title[issfind_en+2].isdigit()]):
|
||||
iss_en = title.find(' ', issfind_en+2)
|
||||
if iss_en != -1:
|
||||
issues = title[issfind_st+1:iss_en]
|
||||
pack = True
|
||||
if title[issfind_en+1].isdigit():
|
||||
iss_en = title.find(' ', issfind_en+1)
|
||||
if iss_en != -1:
|
||||
issues = title[issfind_st+1:iss_en]
|
||||
pack = True
|
||||
|
||||
# if it's a pack - remove the issue-range and the possible issue years (cause it most likely will span) and pass thru as separate items
|
||||
if pack is True:
|
||||
title = re.sub(issues, '', title).strip()
|
||||
if title.endswith('#'):
|
||||
title = title[:-1].strip()
|
||||
|
||||
option_find = f.find("p", {"style": "text-align: center;"})
|
||||
i = 0
|
||||
while i <= 2:
|
||||
option_find = option_find.findNext(text=True)
|
||||
if 'Year' in option_find:
|
||||
year = option_find.findNext(text=True)
|
||||
year = re.sub('\|', '', year).strip()
|
||||
if pack is True and '-' in year:
|
||||
title = re.sub('\('+year+'\)', '', title).strip()
|
||||
else:
|
||||
size = option_find.findNext(text=True)
|
||||
if all([re.sub(':', '', size).strip() != 'Size', len(re.sub('[^0-9]', '', size).strip()) > 0]):
|
||||
if 'MB' in size:
|
||||
size = re.sub('MB', 'M', size).strip()
|
||||
elif 'GB' in size:
|
||||
size = re.sub('GB', 'G', size).strip()
|
||||
if '//' in size:
|
||||
nwsize = size.find('//')
|
||||
size = re.sub('\[', '', size[:nwsize]).strip()
|
||||
else:
|
||||
size = '0 M'
|
||||
i+=1
|
||||
dateline = f.find('time')
|
||||
datefull = dateline['datetime']
|
||||
datestamp = time.mktime(time.strptime(datefull, "%Y-%m-%d"))
|
||||
resultlist.append({"title": title,
|
||||
"pubdate": datetime.datetime.fromtimestamp(float(datestamp)).strftime('%a, %d %b %Y %H:%M:%S'),
|
||||
"filename": filename,
|
||||
"size": re.sub(' ', '', size).strip(),
|
||||
"pack": pack,
|
||||
"issues": issues,
|
||||
"link": link,
|
||||
"year": year,
|
||||
"id": re.sub('post-', '', id).strip(),
|
||||
"site": 'DDL'})
|
||||
|
||||
logger.fdebug('%s [%s]' % (title, size))
|
||||
|
||||
results['entries'] = resultlist
|
||||
|
||||
return results
|
||||
|
||||
def parse_downloadresults(self, id, mainlink):
|
||||
myDB = db.DBConnection()
|
||||
title = os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + id)
|
||||
soup = BeautifulSoup(open(title+'.html'), 'html.parser')
|
||||
orig_find = soup.find("p", {"style": "text-align: center;"})
|
||||
i = 0
|
||||
option_find = orig_find
|
||||
while True: #i <= 10:
|
||||
prev_option = option_find
|
||||
option_find = option_find.findNext(text=True)
|
||||
if i == 0:
|
||||
series = option_find
|
||||
elif 'Year' in option_find:
|
||||
year = option_find.findNext(text=True)
|
||||
year = re.sub('\|', '', year).strip()
|
||||
else:
|
||||
if 'Size' in prev_option:
|
||||
size = option_find #.findNext(text=True)
|
||||
possible_more = orig_find.next_sibling
|
||||
break
|
||||
i+=1
|
||||
|
||||
logger.fdebug('Now downloading: %s [%s] / %s ... this can take a while (go get some take-out)...' % (series, year, size))
|
||||
|
||||
link = None
|
||||
for f in soup.findAll("div", {"class": "aio-pulse"}):
|
||||
lk = f.find('a')
|
||||
if lk['title'] == 'Download Now':
|
||||
link = lk['href']
|
||||
site = lk['title']
|
||||
break #get the first link just to test
|
||||
|
||||
if link is None:
|
||||
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
|
||||
return
|
||||
|
||||
links = []
|
||||
|
||||
if possible_more.name == 'ul':
|
||||
bb = possible_more.findAll('li')
|
||||
for x in bb:
|
||||
volume = x.findNext(text=True)
|
||||
if u'\u2013' in volume:
|
||||
volume = re.sub(u'\u2013', '-', volume)
|
||||
linkline = x.find('a')
|
||||
link = linkline['href']
|
||||
site = linkline.findNext(text=True)
|
||||
links.append({"volume": volume,
|
||||
"site": site,
|
||||
"link": link})
|
||||
else:
|
||||
check_extras = soup.findAll("h3")
|
||||
for sb in check_extras:
|
||||
header = sb.findNext(text=True)
|
||||
if header == 'TPBs':
|
||||
nxt = sb.next_sibling
|
||||
if nxt.name == 'ul':
|
||||
bb = nxt.findAll('li')
|
||||
for x in bb:
|
||||
volume = x.findNext(text=True)
|
||||
if u'\u2013' in volume:
|
||||
volume = re.sub(u'\u2013', '-', volume)
|
||||
linkline = x.find('a')
|
||||
link = linkline['href']
|
||||
site = linkline.findNext(text=True)
|
||||
links.append({"volume": volume,
|
||||
"site": site,
|
||||
"link": link})
|
||||
|
||||
if link is None:
|
||||
logger.warn('Unable to retrieve any valid immediate download links. They might not exist.')
|
||||
return {'success': False}
|
||||
|
||||
for x in links:
|
||||
logger.fdebug('[%s] %s - %s' % (x['site'], x['volume'], x['link']))
|
||||
|
||||
ctrlval = {'id': id}
|
||||
vals = {'series': series,
|
||||
'year': year,
|
||||
'size': size,
|
||||
'issueid': self.issueid,
|
||||
'comicid': self.comicid,
|
||||
'link': link,
|
||||
'status': 'Queued'}
|
||||
myDB.upsert('ddl_info', vals, ctrlval)
|
||||
|
||||
mylar.DDL_QUEUE.put({'link': link,
|
||||
'mainlink': mainlink,
|
||||
'series': series,
|
||||
'year': year,
|
||||
'size': size,
|
||||
'comicid': self.comicid,
|
||||
'issueid': self.issueid,
|
||||
'id': id})
|
||||
|
||||
return {'success': True}
|
||||
|
||||
def downloadit(self, id, link, mainlink):
|
||||
if mylar.DDL_LOCK is True:
|
||||
logger.fdebug('[DDL] Another item is currently downloading via DDL. Only one item can be downloaded at a time using DDL. Patience.')
|
||||
return
|
||||
else:
|
||||
mylar.DDL_LOCK = True
|
||||
|
||||
myDB = db.DBConnection()
|
||||
filename = None
|
||||
try:
|
||||
with cfscrape.create_scraper() as s:
|
||||
cf_cookievalue, cf_user_agent = s.get_tokens(mainlink, headers=self.headers)
|
||||
t = s.get(link, verify=True, cookies=cf_cookievalue, headers=self.headers, stream=True)
|
||||
|
||||
filename = os.path.basename(urllib.unquote(t.url).decode('utf-8'))
|
||||
|
||||
path = os.path.join(mylar.CONFIG.DDL_LOCATION, filename)
|
||||
|
||||
#write the filename to the db for tracking purposes...
|
||||
myDB.upsert('ddl_info', {'filename': filename}, {'id': id})
|
||||
|
||||
if t.headers.get('content-encoding') == 'gzip': #.get('Content-Encoding') == 'gzip':
|
||||
buf = StringIO(t.content)
|
||||
f = gzip.GzipFile(fileobj=buf)
|
||||
|
||||
with open(path, 'wb') as f:
|
||||
for chunk in t.iter_content(chunk_size=1024):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
f.write(chunk)
|
||||
f.flush()
|
||||
|
||||
except Exception as e:
|
||||
logger.error('[ERROR] %s' % e)
|
||||
mylar.DDL_LOCK = False
|
||||
return ({"success": False,
|
||||
"filename": filename,
|
||||
"path": None})
|
||||
|
||||
else:
|
||||
mylar.DDL_LOCK = False
|
||||
if os.path.isfile(path):
|
||||
if path.endswith('.zip'):
|
||||
new_path = os.path.join(mylar.CONFIG.DDL_LOCATION, re.sub('.zip', '', filename).strip())
|
||||
logger.info('Zip file detected. Unzipping into new modified path location: %s' % new_path)
|
||||
try:
|
||||
zip_f = zipfile.ZipFile(path, 'r')
|
||||
zip_f.extractall(new_path)
|
||||
zip_f.close()
|
||||
except Exception as e:
|
||||
logger.warn('[ERROR: %s] Unable to extract zip file: %s' % (e, new_path))
|
||||
return ({"success": False,
|
||||
"filename": filename,
|
||||
"path": None})
|
||||
else:
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
logger.warn('[ERROR: %s] Unable to remove zip file from %s after extraction.' % (e, path))
|
||||
filename = None
|
||||
else:
|
||||
new_path = path
|
||||
return ({"success": True,
|
||||
"filename": filename,
|
||||
"path": new_path})
|
||||
|
||||
def issue_list(self, pack):
|
||||
#packlist = [x.strip() for x in pack.split(',)]
|
||||
packlist = pack.replace('+', ' ').replace(',', ' ').split()
|
||||
print packlist
|
||||
plist = []
|
||||
pack_issues = []
|
||||
for pl in packlist:
|
||||
if '-' in pl:
|
||||
plist.append(range(int(pl[:pl.find('-')]),int(pl[pl.find('-')+1:])+1))
|
||||
else:
|
||||
if 'TPBs' not in pl:
|
||||
plist.append(int(pl))
|
||||
else:
|
||||
plist.append('TPBs')
|
||||
|
||||
for pi in plist:
|
||||
if type(pi) == list:
|
||||
for x in pi:
|
||||
pack_issues.append(x)
|
||||
else:
|
||||
pack_issues.append(pi)
|
||||
|
||||
pack_issues.sort()
|
||||
print "pack_issues: %s" % pack_issues
|
||||
|
||||
#if __name__ == '__main__':
|
||||
# ab = GC(sys.argv[1]) #'justice league aquaman') #sys.argv[0])
|
||||
# #c = ab.search()
|
||||
# b = ab.loadsite('test', sys.argv[2])
|
||||
# c = ab.parse_downloadresults('test', '60MB')
|
||||
# #c = ab.issue_list(sys.argv[2])
|
151
mylar/helpers.py
151
mylar/helpers.py
|
@ -21,6 +21,7 @@ from datetime import timedelta, date
|
|||
import subprocess
|
||||
import requests
|
||||
import shlex
|
||||
import Queue
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
|
@ -37,7 +38,7 @@ from apscheduler.triggers.interval import IntervalTrigger
|
|||
|
||||
import mylar
|
||||
import logger
|
||||
from mylar import sabnzbd, nzbget, process
|
||||
from mylar import db, sabnzbd, nzbget, process, getcomics
|
||||
|
||||
def multikeysort(items, columns):
|
||||
|
||||
|
@ -183,12 +184,15 @@ def human2bytes(s):
|
|||
num = re.sub(',', '', s[:-1])
|
||||
#assert num.isdigit() and letter in symbols
|
||||
#use below assert statement to handle sizes with decimal places
|
||||
assert float(num) and letter in symbols
|
||||
num = float(num)
|
||||
prefix = {symbols[0]: 1}
|
||||
for i, s in enumerate(symbols[1:]):
|
||||
prefix[s] = 1 << (i +1) *10
|
||||
return int(num * prefix[letter])
|
||||
if num != '0':
|
||||
assert float(num) and letter in symbols
|
||||
num = float(num)
|
||||
prefix = {symbols[0]: 1}
|
||||
for i, s in enumerate(symbols[1:]):
|
||||
prefix[s] = 1 << (i +1) *10
|
||||
return int(num * prefix[letter])
|
||||
else:
|
||||
return 0
|
||||
|
||||
def replace_all(text, dic):
|
||||
for i, j in dic.iteritems():
|
||||
|
@ -263,7 +267,7 @@ def decimal_issue(iss):
|
|||
return deciss, dec_except
|
||||
|
||||
def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
comicid = str(comicid) # it's coming in unicoded...
|
||||
|
||||
|
@ -715,7 +719,7 @@ def ComicSort(comicorder=None, sequence=None, imported=None):
|
|||
if sequence:
|
||||
# if it's on startup, load the sql into a tuple for use to avoid record-locking
|
||||
i = 0
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
|
||||
comicorderlist = []
|
||||
|
@ -800,7 +804,7 @@ def updateComicLocation():
|
|||
# - set NEWCOMDIR = new ComicLocation
|
||||
#after running, set ComicLocation to new location in Configuration GUI
|
||||
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
if mylar.CONFIG.NEWCOM_DIR is not None:
|
||||
logger.info('Performing a one-time mass update to Comic Location')
|
||||
|
@ -932,7 +936,7 @@ def cleanhtml(raw_html):
|
|||
|
||||
|
||||
def issuedigits(issnum):
|
||||
import db
|
||||
#import db
|
||||
|
||||
int_issnum = None
|
||||
|
||||
|
@ -1092,8 +1096,16 @@ def issuedigits(issnum):
|
|||
a+=1
|
||||
int_issnum = (int(issno) * 1000) + ordtot
|
||||
elif invchk == "true":
|
||||
logger.fdebug('this does not have an issue # that I can parse properly.')
|
||||
return 999999999999999
|
||||
if any([issnum.lower() == 'fall', issnum.lower() == 'spring', issnum.lower() == 'summer', issnum.lower() == 'winter']):
|
||||
inu = 0
|
||||
ordtot = 0
|
||||
while (inu < len(issnum)):
|
||||
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
|
||||
inu+=1
|
||||
int_issnum = ordtot
|
||||
else:
|
||||
logger.fdebug('this does not have an issue # that I can parse properly.')
|
||||
return 999999999999999
|
||||
else:
|
||||
if issnum == '9-5':
|
||||
issnum = u'9\xbd'
|
||||
|
@ -1118,7 +1130,7 @@ def issuedigits(issnum):
|
|||
|
||||
|
||||
def checkthepub(ComicID):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
publishers = ['marvel', 'dc', 'darkhorse']
|
||||
pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
|
||||
|
@ -1135,7 +1147,7 @@ def checkthepub(ComicID):
|
|||
return mylar.CONFIG.INDIE_PUB
|
||||
|
||||
def annual_update():
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
annuallist = myDB.select('SELECT * FROM annuals')
|
||||
if annuallist is None:
|
||||
|
@ -1191,7 +1203,7 @@ def renamefile_readingorder(readorder):
|
|||
return readord
|
||||
|
||||
def latestdate_fix():
|
||||
import db
|
||||
#import db
|
||||
datefix = []
|
||||
cnupdate = []
|
||||
myDB = db.DBConnection()
|
||||
|
@ -1243,7 +1255,7 @@ def latestdate_fix():
|
|||
return
|
||||
|
||||
def upgrade_dynamic():
|
||||
import db
|
||||
#import db
|
||||
dynamic_comiclist = []
|
||||
myDB = db.DBConnection()
|
||||
#update the comicdb to include the Dynamic Names (and any futher changes as required)
|
||||
|
@ -1282,7 +1294,6 @@ def upgrade_dynamic():
|
|||
|
||||
def checkFolder(folderpath=None):
|
||||
from mylar import PostProcessor
|
||||
import Queue
|
||||
|
||||
queue = Queue.Queue()
|
||||
#monitor a selected folder for 'snatched' files that haven't been processed
|
||||
|
@ -1328,7 +1339,7 @@ def LoadAlternateSearchNames(seriesname_alt, comicid):
|
|||
return Alternate_Names
|
||||
|
||||
def havetotals(refreshit=None):
|
||||
import db
|
||||
#import db
|
||||
|
||||
comics = []
|
||||
myDB = db.DBConnection()
|
||||
|
@ -1816,7 +1827,7 @@ def IssueDetails(filelocation, IssueID=None, justinfo=False):
|
|||
return issuedetails
|
||||
|
||||
def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
if IssueID:
|
||||
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
|
||||
|
@ -1848,7 +1859,7 @@ def int_num(s):
|
|||
return float(s)
|
||||
|
||||
def listPull(weeknumber, year):
|
||||
import db
|
||||
#import db
|
||||
library = {}
|
||||
myDB = db.DBConnection()
|
||||
# Get individual comics
|
||||
|
@ -1858,7 +1869,7 @@ def listPull(weeknumber, year):
|
|||
return library
|
||||
|
||||
def listLibrary(comicid=None):
|
||||
import db
|
||||
#import db
|
||||
library = {}
|
||||
myDB = db.DBConnection()
|
||||
if comicid is None:
|
||||
|
@ -1885,7 +1896,7 @@ def listLibrary(comicid=None):
|
|||
return library
|
||||
|
||||
def listStoryArcs():
|
||||
import db
|
||||
#import db
|
||||
library = {}
|
||||
myDB = db.DBConnection()
|
||||
# Get Distinct Arc IDs
|
||||
|
@ -1899,7 +1910,7 @@ def listStoryArcs():
|
|||
return library
|
||||
|
||||
def listoneoffs(weeknumber, year):
|
||||
import db
|
||||
#import db
|
||||
library = []
|
||||
myDB = db.DBConnection()
|
||||
# Get Distinct one-off issues from the pullist that have already been downloaded / snatched
|
||||
|
@ -1915,7 +1926,7 @@ def listoneoffs(weeknumber, year):
|
|||
return library
|
||||
|
||||
def manualArc(issueid, reading_order, storyarcid):
|
||||
import db
|
||||
#import db
|
||||
if issueid.startswith('4000-'):
|
||||
issueid = issueid[5:]
|
||||
|
||||
|
@ -2051,7 +2062,7 @@ def manualArc(issueid, reading_order, storyarcid):
|
|||
return
|
||||
|
||||
def listIssues(weeknumber, year):
|
||||
import db
|
||||
#import db
|
||||
library = []
|
||||
myDB = db.DBConnection()
|
||||
# Get individual issues
|
||||
|
@ -2096,7 +2107,7 @@ def listIssues(weeknumber, year):
|
|||
return library
|
||||
|
||||
def incr_snatched(ComicID):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone()
|
||||
logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1))
|
||||
|
@ -2112,7 +2123,7 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None, r
|
|||
#storyarcid = the storyarcid of the issue that's being checked for duplication.
|
||||
#rtnval = the return value of a previous duplicate_filecheck that's re-running against new values
|
||||
#
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
|
||||
logger.info('[DUPECHECK] Duplicate check for ' + filename)
|
||||
|
@ -2390,7 +2401,7 @@ def humanize_time(amount, units = 'seconds'):
|
|||
return buf
|
||||
|
||||
def issue_status(IssueID):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
|
||||
IssueID = str(IssueID)
|
||||
|
@ -2424,7 +2435,7 @@ def crc(filename):
|
|||
return hashlib.md5(filename).hexdigest()
|
||||
|
||||
def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
|
||||
import db
|
||||
#import db
|
||||
|
||||
myDB = db.DBConnection()
|
||||
|
||||
|
@ -2551,7 +2562,7 @@ def cleanHost(host, protocol = True, ssl = False, username = None, password = No
|
|||
return host
|
||||
|
||||
def checkthe_id(comicid=None, up_vals=None):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
if not up_vals:
|
||||
chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone()
|
||||
|
@ -2582,7 +2593,7 @@ def checkthe_id(comicid=None, up_vals=None):
|
|||
myDB.upsert("ref32p", newVal, ctrlVal)
|
||||
|
||||
def updatearc_locs(storyarcid, issues):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
issuelist = []
|
||||
for x in issues:
|
||||
|
@ -2672,7 +2683,7 @@ def updatearc_locs(storyarcid, issues):
|
|||
|
||||
|
||||
def spantheyears(storyarcid):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
|
||||
totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid])
|
||||
|
@ -2736,7 +2747,7 @@ def arcformat(arc, spanyears, publisher):
|
|||
return dstloc
|
||||
|
||||
def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
|
||||
import db
|
||||
#import db
|
||||
from base64 import b16encode, b32decode
|
||||
|
||||
#check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent.
|
||||
|
@ -2998,7 +3009,7 @@ def weekly_info(week=None, year=None, current=None):
|
|||
return weekinfo
|
||||
|
||||
def latestdate_update():
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID')
|
||||
if ccheck is None or len(ccheck) == 0:
|
||||
|
@ -3019,6 +3030,57 @@ def latestdate_update():
|
|||
logger.info('updating latest date for : ' + a['ComicID'] + ' to ' + a['LatestDate'] + ' #' + a['LatestIssue'])
|
||||
myDB.upsert("comics", newVal, ctrlVal)
|
||||
|
||||
def ddl_downloader(queue):
|
||||
myDB = db.DBConnection()
|
||||
while True:
|
||||
if mylar.DDL_LOCK is True:
|
||||
time.sleep(5)
|
||||
|
||||
elif mylar.DDL_LOCK is False and queue.qsize() >= 1:
|
||||
item = queue.get(True)
|
||||
logger.info('Now loading request from DDL queue: %s' % item['series'])
|
||||
if item == 'exit':
|
||||
logger.info('Cleaning up workers for shutdown')
|
||||
break
|
||||
|
||||
#write this to the table so we have a record of what's going on.
|
||||
ctrlval = {'id': item['id']}
|
||||
val = {'status': 'Downloading'}
|
||||
myDB.upsert('ddl_info', val, ctrlval)
|
||||
|
||||
ddz = getcomics.GC()
|
||||
ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink'])
|
||||
|
||||
nval = {'status': 'Completed'}
|
||||
myDB.upsert('ddl_info', nval, ctrlval)
|
||||
|
||||
if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]):
|
||||
try:
|
||||
if ddzstat['filename'] is None:
|
||||
logger.info('%s successfully downloaded - now initiating post-processing.' % (os.path.basename(ddzstat['path'])))
|
||||
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
|
||||
'nzb_folder': ddzstat['path'],
|
||||
'failed': False,
|
||||
'issueid': None,
|
||||
'comicid': item['comicid'],
|
||||
'apicall': True,
|
||||
'ddl': True})
|
||||
else:
|
||||
logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename']))
|
||||
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
|
||||
'nzb_folder': ddzstat['path'],
|
||||
'failed': False,
|
||||
'issueid': item['issueid'],
|
||||
'comicid': item['comicid'],
|
||||
'apicall': True,
|
||||
'ddl': True})
|
||||
except Exception as e:
|
||||
logger.info('process error: %s [%s]' %(e, ddzstat))
|
||||
elif mylar.CONFIG.POST_PROCESSING is True:
|
||||
logger.info('File successfully downloaded. Post Processing is not enabled - item retained here: %s' % os.path.join(ddzstat['path'],ddzstat['filename']))
|
||||
else:
|
||||
logger.info('[Status: %s] Failed to download: %s ' % (ddzstat['success'], ddzstat))
|
||||
|
||||
def postprocess_main(queue):
|
||||
while True:
|
||||
if mylar.APILOCK is True:
|
||||
|
@ -3032,7 +3094,10 @@ def postprocess_main(queue):
|
|||
break
|
||||
|
||||
if mylar.APILOCK is False:
|
||||
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'])
|
||||
try:
|
||||
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'], item['ddl'])
|
||||
except:
|
||||
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'])
|
||||
pp = pprocess.post_process()
|
||||
time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing
|
||||
|
||||
|
@ -3114,7 +3179,8 @@ def nzb_monitor(queue):
|
|||
'failed': nzstat['failed'],
|
||||
'issueid': nzstat['issueid'],
|
||||
'comicid': nzstat['comicid'],
|
||||
'apicall': nzstat['apicall']})
|
||||
'apicall': nzstat['apicall'],
|
||||
'ddl': False})
|
||||
#cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed'])
|
||||
#nzpp = cc.post_process()
|
||||
except Exception as e:
|
||||
|
@ -3276,7 +3342,7 @@ def date_conversion(originaldate):
|
|||
def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None):
|
||||
jobresults = []
|
||||
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
|
||||
if job is None:
|
||||
|
@ -3493,7 +3559,7 @@ def job_management(write=False, job=None, last_run_completed=None, current_run=N
|
|||
|
||||
|
||||
def stupidchk():
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'")
|
||||
ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'")
|
||||
|
@ -3807,7 +3873,7 @@ def publisherImages(publisher):
|
|||
return comicpublisher
|
||||
|
||||
def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
watchlist = listLibrary()
|
||||
matchlist = []
|
||||
|
@ -3847,7 +3913,7 @@ def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
|
|||
|
||||
|
||||
def DateAddedFix():
|
||||
import db
|
||||
#import db
|
||||
myDB = db.DBConnection()
|
||||
DA_A = datetime.datetime.today()
|
||||
DateAdded = DA_A.strftime('%Y-%m-%d')
|
||||
|
@ -3858,8 +3924,6 @@ def DateAddedFix():
|
|||
for an in annuals:
|
||||
myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]})
|
||||
|
||||
|
||||
|
||||
def file_ops(path,dst,arc=False,one_off=False):
|
||||
# # path = source path + filename
|
||||
# # dst = destination path + filename
|
||||
|
@ -4004,7 +4068,6 @@ def file_ops(path,dst,arc=False,one_off=False):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
from threading import Thread
|
||||
|
||||
class ThreadWithReturnValue(Thread):
|
||||
|
|
|
@ -240,7 +240,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is False:
|
||||
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
|
||||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
if os.path.isfile(os.path.join(comlocation, 'cover.jpg')) is True:
|
||||
if os.path.isfile(PRComicImage) is True:
|
||||
logger.fdebug('Cover already exists for series. Not redownloading.')
|
||||
else:
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImage'])
|
||||
|
@ -248,18 +248,15 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
logger.info('Attempting to retrieve alternate comic image for the series.')
|
||||
covercheck = helpers.getImage(comicid, comic['ComicImageALT'])
|
||||
|
||||
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
|
||||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
logger.error('Unable to save cover (' + str(comiclocal) + ') into series directory (' + str(comlocation) + ') at this time.')
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(PRComicImage) is False]):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(PRComicImage, comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
logger.error('Unable to save cover (' + str(comiclocal) + ') into series directory (' + str(comlocation) + ') at this time.')
|
||||
else:
|
||||
ComicImage = None
|
||||
|
||||
|
@ -350,8 +347,21 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
if anndata:
|
||||
manualAnnual(annchk=anndata)
|
||||
|
||||
if all([mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True, lastissueid != importantdates['LatestIssueID']]):
|
||||
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
|
||||
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True: #, lastissueid != importantdates['LatestIssueID']]):
|
||||
if os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg') is True:
|
||||
cover_modtime = datetime.datetime.utcfromtimestamp(os.path.getmtime(os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg')))
|
||||
cover_mtime = datetime.datetime.strftime(cover_modtime, '%Y-%m-%d')
|
||||
if importantdates['LatestStoreDate'] != '0000-00-00':
|
||||
lsd = re.sub('-', '', importantdates['LatestStoreDate']).strip()
|
||||
else:
|
||||
lsd = re.sub('-', '', importantdates['LatestDate']).strip()
|
||||
if re.sub('-', '', cover_mtime).strip() < lsd:
|
||||
logger.info('Attempting to retrieve new issue cover for display')
|
||||
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
|
||||
else:
|
||||
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
|
||||
else:
|
||||
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
|
||||
else:
|
||||
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
|
||||
|
||||
|
@ -1070,6 +1080,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
|
||||
latestiss = "0"
|
||||
latestdate = "0000-00-00"
|
||||
latest_stdate = "0000-00-00"
|
||||
latestissueid = None
|
||||
firstiss = "10000000"
|
||||
firstdate = "2099-00-00"
|
||||
|
@ -1195,8 +1206,17 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
a+=1
|
||||
int_issnum = (int(issno) * 1000) + ordtot
|
||||
elif invchk == "true":
|
||||
logger.fdebug('this does not have an issue # that I can parse properly.')
|
||||
return
|
||||
if any([issnum.lower() == 'fall 2005', issnum.lower() == 'spring 2005', issnum.lower() == 'summer 2006', issnum.lower() == 'winter 2009']):
|
||||
issnum = re.sub('[0-9]+', '', issnum).strip()
|
||||
inu = 0
|
||||
ordtot = 0
|
||||
while (inu < len(issnum)):
|
||||
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
|
||||
inu+=1
|
||||
int_issnum = ordtot
|
||||
else:
|
||||
logger.fdebug('this does not have an issue # that I can parse properly.')
|
||||
return
|
||||
else:
|
||||
if int_issnum is not None:
|
||||
pass
|
||||
|
@ -1232,8 +1252,10 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
latestissueid = issid
|
||||
if firstval['Issue_Date'] != '0000-00-00':
|
||||
latestdate = str(firstval['Issue_Date'])
|
||||
latest_stdate = storedate
|
||||
else:
|
||||
latestdate = storedate
|
||||
latest_stdate = storedate
|
||||
|
||||
if firstval['Issue_Date'] < firstdate and firstval['Issue_Date'] != '0000-00-00':
|
||||
firstiss = issnum
|
||||
|
@ -1281,7 +1303,12 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
styear = str(SeriesYear)
|
||||
if firstdate is not None:
|
||||
if SeriesYear != firstdate[:4]:
|
||||
logger.fdebug('Series start date (%s) crosses over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV will say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate))
|
||||
if firstdate[:4] == '2099':
|
||||
logger.fdebug('Series start date (%s) differs from First Issue start date as First Issue date is unknown - assuming Series Year as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear))
|
||||
else:
|
||||
logger.fdebug('Series start date (%s) cannot be properly determined and/or it might cross over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate))
|
||||
if firstdate == '2099-00-00':
|
||||
firstdate = '%s-01-01' % SeriesYear
|
||||
styear = str(firstdate[:4])
|
||||
|
||||
if firstdate[5:7] == '00':
|
||||
|
@ -1311,7 +1338,15 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
if recentchk <= 55:
|
||||
lastpubdate = 'Present'
|
||||
else:
|
||||
lastpubdate = str(ltmonth) + ' ' + str(ltyear)
|
||||
if ltmonth == '?':
|
||||
if ltyear == '0000':
|
||||
lastpubdate = '?'
|
||||
else:
|
||||
lastpubdate = str(ltyear)
|
||||
elif ltyear == '0000':
|
||||
lastpubdate = '?'
|
||||
else:
|
||||
lastpubdate = str(ltmonth) + ' ' + str(ltyear)
|
||||
|
||||
if stmonth == '?' and ('?' in lastpubdate and '0000' in lastpubdate):
|
||||
lastpubdate = 'Present'
|
||||
|
@ -1348,6 +1383,7 @@ def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, call
|
|||
importantdates['LatestIssue'] = latestiss
|
||||
importantdates['LatestIssueID'] = latestissueid
|
||||
importantdates['LatestDate'] = latestdate
|
||||
importantdates['LatestStoreDate'] = latest_stdate
|
||||
importantdates['LastPubDate'] = lastpubdate
|
||||
importantdates['SeriesStatus'] = 'Active'
|
||||
|
||||
|
@ -1552,10 +1588,10 @@ def image_it(comicid, latestissueid, comlocation, ComicImage):
|
|||
ComicImage = helpers.replacetheslash(PRComicImage)
|
||||
|
||||
#if the comic cover local is checked, save a cover.jpg to the series folder.
|
||||
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True]):
|
||||
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(PRComicImage)]):
|
||||
try:
|
||||
comiclocal = os.path.join(comlocation, 'cover.jpg')
|
||||
shutil.copyfile(os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg'), comiclocal)
|
||||
shutil.copyfile(PRComicImage, comiclocal)
|
||||
if mylar.CONFIG.ENFORCE_PERMS:
|
||||
filechecker.setperms(comiclocal)
|
||||
except IOError as e:
|
||||
|
|
140
mylar/locg.py
140
mylar/locg.py
|
@ -28,7 +28,7 @@ def locg(pulldate=None,weeknumber=None,year=None):
|
|||
if pulldate is None or pulldate == '00000000':
|
||||
weeknumber = todaydate.strftime("%U")
|
||||
elif '-' in pulldate:
|
||||
#find the week number
|
||||
#find the week number
|
||||
weektmp = datetime.date(*(int(s) for s in pulldate.split('-')))
|
||||
weeknumber = weektmp.strftime("%U")
|
||||
#we need to now make sure we default to the correct week
|
||||
|
@ -58,82 +58,90 @@ def locg(pulldate=None,weeknumber=None,year=None):
|
|||
logger.warn(e)
|
||||
return {'status': 'failure'}
|
||||
|
||||
if r.status_code == '619':
|
||||
if str(r.status_code) == '619':
|
||||
logger.warn('[' + str(r.status_code) + '] No date supplied, or an invalid date was provided [' + str(pulldate) + ']')
|
||||
return {'status': 'failure'}
|
||||
elif r.status_code == '999' or r.status_code == '111':
|
||||
return {'status': 'failure'}
|
||||
elif str(r.status_code) == '999' or str(r.status_code) == '111':
|
||||
logger.warn('[' + str(r.status_code) + '] Unable to retrieve data from site - this is a site.specific issue [' + str(pulldate) + ']')
|
||||
return {'status': 'failure'}
|
||||
return {'status': 'failure'}
|
||||
elif str(r.status_code) == '200':
|
||||
data = r.json()
|
||||
|
||||
data = r.json()
|
||||
logger.info('[WEEKLY-PULL] There are ' + str(len(data)) + ' issues for the week of ' + str(weeknumber) + ', ' + str(year))
|
||||
pull = []
|
||||
|
||||
logger.info('[WEEKLY-PULL] There are ' + str(len(data)) + ' issues for the week of ' + str(weeknumber) + ', ' + str(year))
|
||||
pull = []
|
||||
for x in data:
|
||||
pull.append({'series': x['series'],
|
||||
'alias': x['alias'],
|
||||
'issue': x['issue'],
|
||||
'publisher': x['publisher'],
|
||||
'shipdate': x['shipdate'],
|
||||
'coverdate': x['coverdate'],
|
||||
'comicid': x['comicid'],
|
||||
'issueid': x['issueid'],
|
||||
'weeknumber': x['weeknumber'],
|
||||
'annuallink': x['link'],
|
||||
'year': x['year'],
|
||||
'volume': x['volume'],
|
||||
'seriesyear': x['seriesyear'],
|
||||
'format': x['type']})
|
||||
shipdate = x['shipdate']
|
||||
|
||||
for x in data:
|
||||
pull.append({'series': x['series'],
|
||||
'alias': x['alias'],
|
||||
'issue': x['issue'],
|
||||
'publisher': x['publisher'],
|
||||
'shipdate': x['shipdate'],
|
||||
'coverdate': x['coverdate'],
|
||||
'comicid': x['comicid'],
|
||||
'issueid': x['issueid'],
|
||||
'weeknumber': x['weeknumber'],
|
||||
'annuallink': x['link'],
|
||||
'year': x['year'],
|
||||
'volume': x['volume'],
|
||||
'seriesyear': x['seriesyear'],
|
||||
'format': x['type']})
|
||||
shipdate = x['shipdate']
|
||||
myDB = db.DBConnection()
|
||||
|
||||
myDB = db.DBConnection()
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)")
|
||||
|
||||
myDB.action("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text, IssueID text, CV_Last_Update text, DynamicName text, weeknumber text, year text, volume text, seriesyear text, annuallink text, format text, rowid INTEGER PRIMARY KEY)")
|
||||
#clear out the upcoming table here so they show the new values properly.
|
||||
if pulldate == '00000000':
|
||||
logger.info('Re-creating pullist to ensure everything\'s fresh.')
|
||||
myDB.action('DELETE FROM weekly WHERE weeknumber=? AND year=?',[int(weeknumber), int(year)])
|
||||
|
||||
#clear out the upcoming table here so they show the new values properly.
|
||||
if pulldate == '00000000':
|
||||
logger.info('Re-creating pullist to ensure everything\'s fresh.')
|
||||
myDB.action('DELETE FROM weekly WHERE weeknumber=? AND year=?',[int(weeknumber), int(year)])
|
||||
for x in pull:
|
||||
comicid = None
|
||||
issueid = None
|
||||
comicname = x['series']
|
||||
if x['comicid'] is not None:
|
||||
comicid = x['comicid']
|
||||
if x['issueid'] is not None:
|
||||
issueid= x['issueid']
|
||||
if x['alias'] is not None:
|
||||
comicname = x['alias']
|
||||
|
||||
for x in pull:
|
||||
comicid = None
|
||||
issueid = None
|
||||
comicname = x['series']
|
||||
if x['comicid'] is not None:
|
||||
comicid = x['comicid']
|
||||
if x['issueid'] is not None:
|
||||
issueid= x['issueid']
|
||||
if x['alias'] is not None:
|
||||
comicname = x['alias']
|
||||
cl_d = mylar.filechecker.FileChecker()
|
||||
cl_dyninfo = cl_d.dynamic_replace(comicname)
|
||||
dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip()
|
||||
|
||||
cl_d = mylar.filechecker.FileChecker()
|
||||
cl_dyninfo = cl_d.dynamic_replace(comicname)
|
||||
dynamic_name = re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip()
|
||||
controlValueDict = {'DYNAMICNAME': dynamic_name,
|
||||
'ISSUE': re.sub('#', '', x['issue']).strip()}
|
||||
|
||||
controlValueDict = {'DYNAMICNAME': dynamic_name,
|
||||
'ISSUE': re.sub('#', '', x['issue']).strip()}
|
||||
|
||||
newValueDict = {'SHIPDATE': x['shipdate'],
|
||||
'PUBLISHER': x['publisher'],
|
||||
'STATUS': 'Skipped',
|
||||
'COMIC': comicname,
|
||||
'COMICID': comicid,
|
||||
'ISSUEID': issueid,
|
||||
'WEEKNUMBER': x['weeknumber'],
|
||||
'ANNUALLINK': x['annuallink'],
|
||||
'YEAR': x['year'],
|
||||
'VOLUME': x['volume'],
|
||||
'SERIESYEAR': x['seriesyear'],
|
||||
'FORMAT': x['format']}
|
||||
myDB.upsert("weekly", newValueDict, controlValueDict)
|
||||
newValueDict = {'SHIPDATE': x['shipdate'],
|
||||
'PUBLISHER': x['publisher'],
|
||||
'STATUS': 'Skipped',
|
||||
'COMIC': comicname,
|
||||
'COMICID': comicid,
|
||||
'ISSUEID': issueid,
|
||||
'WEEKNUMBER': x['weeknumber'],
|
||||
'ANNUALLINK': x['annuallink'],
|
||||
'YEAR': x['year'],
|
||||
'VOLUME': x['volume'],
|
||||
'SERIESYEAR': x['seriesyear'],
|
||||
'FORMAT': x['format']}
|
||||
myDB.upsert("weekly", newValueDict, controlValueDict)
|
||||
|
||||
logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber))
|
||||
#set the last poll date/time here so that we don't start overwriting stuff too much...
|
||||
mylar.CONFIG.PULL_REFRESH = todaydate
|
||||
logger.info('[PULL-LIST] Successfully populated pull-list into Mylar for the week of: ' + str(weeknumber))
|
||||
#set the last poll date/time here so that we don't start overwriting stuff too much...
|
||||
mylar.CONFIG.PULL_REFRESH = todaydate
|
||||
|
||||
return {'status': 'success',
|
||||
'count': len(data),
|
||||
'weeknumber': weeknumber,
|
||||
'year': year}
|
||||
return {'status': 'success',
|
||||
'count': len(data),
|
||||
'weeknumber': weeknumber,
|
||||
'year': year}
|
||||
|
||||
else:
|
||||
if str(r.status_code) == '666':
|
||||
logger.warn('[%s] The error returned is: %s' % (r.status_code, r.headers))
|
||||
return {'status': 'update_required'}
|
||||
else:
|
||||
logger.warn('[%s] The error returned is: %s' % (r.status_code, r.headers))
|
||||
return {'status': 'failure'}
|
||||
|
||||
|
|
|
@ -228,7 +228,8 @@ class NZBGet(object):
|
|||
'failed': False,
|
||||
'issueid': nzbinfo['issueid'],
|
||||
'comicid': nzbinfo['comicid'],
|
||||
'apicall': True}
|
||||
'apicall': True,
|
||||
'ddl': False}
|
||||
else:
|
||||
logger.warn('Could not find completed NZBID %s in history' % nzbid)
|
||||
return {'status': False}
|
||||
|
|
|
@ -21,13 +21,14 @@ import logger
|
|||
|
||||
class Process(object):
|
||||
|
||||
def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False):
|
||||
def __init__(self, nzb_name, nzb_folder, failed=False, issueid=None, comicid=None, apicall=False, ddl=False):
|
||||
self.nzb_name = nzb_name
|
||||
self.nzb_folder = nzb_folder
|
||||
self.failed = failed
|
||||
self.issueid = issueid
|
||||
self.comicid = comicid
|
||||
self.apicall = apicall
|
||||
self.ddl = ddl
|
||||
|
||||
def post_process(self):
|
||||
if self.failed == '0':
|
||||
|
@ -39,7 +40,7 @@ class Process(object):
|
|||
retry_outside = False
|
||||
|
||||
if self.failed is False:
|
||||
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall)
|
||||
PostProcess = mylar.PostProcessor.PostProcessor(self.nzb_name, self.nzb_folder, self.issueid, queue=queue, comicid=self.comicid, apicall=self.apicall, ddl=self.ddl)
|
||||
if any([self.nzb_name == 'Manual Run', self.nzb_name == 'Manual+Run', self.apicall is True, self.issueid is not None]):
|
||||
threading.Thread(target=PostProcess.Process).start()
|
||||
else:
|
||||
|
|
|
@ -920,33 +920,12 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
'authkey': mylar.AUTHKEY_32P,
|
||||
'id': linkit}
|
||||
|
||||
headers = None #{'Accept-encoding': 'gzip',
|
||||
# 'User-Agent': str(mylar.USER_AGENT)}
|
||||
#elif site == 'TPSE':
|
||||
# pass
|
||||
#linkit should be the magnet link since it's TPSE
|
||||
#url = linkit
|
||||
dfile = auth32p.info32p()
|
||||
file_download = dfile.downloadfile(payload, filepath)
|
||||
if file_download is False:
|
||||
return "fail"
|
||||
|
||||
#url = helpers.torrent_create('TPSE', linkit)
|
||||
|
||||
#if url.startswith('https'):
|
||||
# tpse_referrer = 'https://torrentproject.se/'
|
||||
#else:
|
||||
# tpse_referrer = 'http://torrentproject.se/'
|
||||
|
||||
#try:
|
||||
# scraper = cfscrape.create_scraper()
|
||||
# cf_cookievalue, cf_user_agent = scraper.get_tokens(url)
|
||||
# headers = {'Accept-encoding': 'gzip',
|
||||
# 'User-Agent': cf_user_agent}
|
||||
|
||||
#except Exception, e:
|
||||
# return "fail"
|
||||
|
||||
#logger.fdebug('Grabbing torrent from url:' + str(url))
|
||||
|
||||
#payload = None
|
||||
#verify = False
|
||||
logger.fdebug('[%s] Saved torrent file to : %s' % (site, filepath))
|
||||
|
||||
elif site == 'DEM':
|
||||
url = helpers.torrent_create('DEM', linkit)
|
||||
|
@ -991,7 +970,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
payload = None
|
||||
verify = False
|
||||
|
||||
if site != 'Public Torrents':
|
||||
if site != 'Public Torrents' and site != '32P':
|
||||
if not verify:
|
||||
#32P throws back an insecure warning because it can't validate against the CA. The below suppresses the message just for 32P instead of being displayed.
|
||||
#disable SSL warnings - too many 'warning' messages about invalid certificates
|
||||
|
@ -1008,6 +987,7 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
except ImportError:
|
||||
logger.warn('[EPIC FAILURE] Cannot load the requests module')
|
||||
return "fail"
|
||||
|
||||
try:
|
||||
scraper = cfscrape.create_scraper()
|
||||
if site == 'WWT':
|
||||
|
@ -1020,31 +1000,31 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
#r = requests.get(url, params=payload, verify=verify, stream=True, headers=headers)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s (%s): %s' % (site, url, e))
|
||||
if site == '32P':
|
||||
logger.info('[TOR2CLIENT-32P] Retrying with 32P')
|
||||
if mylar.CONFIG.MODE_32P == 1:
|
||||
# if site == '32P':
|
||||
# logger.info('[TOR2CLIENT-32P] Retrying with 32P')
|
||||
# if mylar.CONFIG.MODE_32P == 1:
|
||||
|
||||
logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.')
|
||||
feed32p = auth32p.info32p(reauthenticate=True)
|
||||
feedinfo = feed32p.authenticate()
|
||||
# logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.')
|
||||
# feed32p = auth32p.info32p(reauthenticate=True)
|
||||
# feedinfo = feed32p.authenticate()
|
||||
|
||||
if feedinfo == "disable":
|
||||
helpers.disable_provider('32P')
|
||||
return "fail"
|
||||
# if feedinfo == "disable":
|
||||
# helpers.disable_provider('32P')
|
||||
# return "fail"
|
||||
|
||||
logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
|
||||
scraper = cfscrape.create_scraper()
|
||||
# logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
|
||||
# scraper = cfscrape.create_scraper()
|
||||
|
||||
try:
|
||||
r = scraper.get(url, params=payload, verify=verify, allow_redirects=True)
|
||||
except Exception, e:
|
||||
logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e))
|
||||
return "fail"
|
||||
else:
|
||||
logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
|
||||
return "fail"
|
||||
else:
|
||||
return "fail"
|
||||
# try:
|
||||
# r = scraper.get(url, params=payload, verify=verify, allow_redirects=True)
|
||||
# except Exception, e:
|
||||
# logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e))
|
||||
# return "fail"
|
||||
# else:
|
||||
# logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
|
||||
# return "fail"
|
||||
# else:
|
||||
# return "fail"
|
||||
|
||||
if any([site == 'DEM', site == 'WWT']) and any([str(r.status_code) == '403', str(r.status_code) == '404', str(r.status_code) == '503']):
|
||||
if str(r.status_code) != '503':
|
||||
|
@ -1069,15 +1049,6 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
except Exception, e:
|
||||
return "fail"
|
||||
|
||||
if str(r.status_code) != '200':
|
||||
logger.warn('Unable to download torrent from ' + site + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
if str(r.status_code) == '404' and site == '32P':
|
||||
logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.')
|
||||
delete_cache_entry(linkit)
|
||||
else:
|
||||
logger.info('content: %s' % r.content)
|
||||
return "fail"
|
||||
|
||||
if any([site == 'DEM', site == 'WWT']):
|
||||
if r.headers.get('Content-Encoding') == 'gzip':
|
||||
buf = StringIO(r.content)
|
||||
|
@ -1091,8 +1062,9 @@ def torsend2client(seriesname, issue, seriesyear, linkit, site, pubhash=None):
|
|||
|
||||
logger.fdebug('[' + site + '] Saved torrent file to : ' + filepath)
|
||||
else:
|
||||
#tpse is magnet links only...
|
||||
filepath = linkit
|
||||
if site != '32P':
|
||||
#tpse is magnet links only...
|
||||
filepath = linkit
|
||||
|
||||
if mylar.USE_UTORRENT:
|
||||
uTC = utorrent.utorrentclient()
|
||||
|
|
|
@ -131,7 +131,8 @@ class SABnzbd(object):
|
|||
'failed': False,
|
||||
'issueid': nzbinfo['issueid'],
|
||||
'comicid': nzbinfo['comicid'],
|
||||
'apicall': True}
|
||||
'apicall': True,
|
||||
'ddl': False}
|
||||
break
|
||||
else:
|
||||
logger.info('no file found where it should be @ %s - is there another script that moves things after completion ?' % hq['storage'])
|
||||
|
@ -152,7 +153,8 @@ class SABnzbd(object):
|
|||
'failed': True,
|
||||
'issueid': sendresponse['issueid'],
|
||||
'comicid': sendresponse['comicid'],
|
||||
'apicall': True}
|
||||
'apicall': True,
|
||||
'ddl': False}
|
||||
break
|
||||
break
|
||||
|
||||
|
|
195
mylar/search.py
195
mylar/search.py
|
@ -16,7 +16,7 @@
|
|||
from __future__ import division
|
||||
|
||||
import mylar
|
||||
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker, auth32p, sabnzbd, nzbget, wwt #, getcomics
|
||||
from mylar import logger, db, updater, helpers, parseit, findcomicfeed, notifiers, rsscheck, Failed, filechecker, auth32p, sabnzbd, nzbget, wwt, getcomics
|
||||
|
||||
import feedparser
|
||||
import requests
|
||||
|
@ -44,7 +44,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
unaltered_ComicName = None
|
||||
if filesafe:
|
||||
if filesafe != ComicName and mode != 'want_ann':
|
||||
logger.info('[SEARCH] Special Characters exist within Series Title. Enabling search-safe Name : ' + filesafe)
|
||||
logger.info('[SEARCH] Special Characters exist within Series Title. Enabling search-safe Name : %s' % filesafe)
|
||||
if AlternateSearch is None or AlternateSearch == 'None':
|
||||
AlternateSearch = filesafe
|
||||
else:
|
||||
|
@ -60,7 +60,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
if Publisher:
|
||||
if Publisher == 'IDW Publishing':
|
||||
Publisher = 'IDW'
|
||||
logger.fdebug('Publisher is : ' + Publisher)
|
||||
logger.fdebug('Publisher is : %s' % Publisher)
|
||||
|
||||
if IssueArcID and not IssueID:
|
||||
issuetitle = helpers.get_issue_title(IssueArcID)
|
||||
|
@ -68,7 +68,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
issuetitle = helpers.get_issue_title(IssueID)
|
||||
|
||||
if issuetitle:
|
||||
logger.info('Issue Title given as : ' + issuetitle)
|
||||
logger.fdebug('Issue Title given as : %s' % issuetitle)
|
||||
else:
|
||||
logger.fdebug('Issue Title not found. Setting to None.')
|
||||
|
||||
|
@ -91,8 +91,8 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
oneoff = True
|
||||
if SARC:
|
||||
logger.fdebug("Story-ARC Search parameters:")
|
||||
logger.fdebug("Story-ARC: " + str(SARC))
|
||||
logger.fdebug("IssueArcID: " + str(IssueArcID))
|
||||
logger.fdebug("Story-ARC: %s" % SARC)
|
||||
logger.fdebug("IssueArcID: %s" % IssueArcID)
|
||||
|
||||
torprovider = []
|
||||
torp = 0
|
||||
|
@ -177,14 +177,18 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
|
||||
prov_order, torznab_info, newznab_info = provider_sequence(nzbprovider, torprovider, newznab_hosts, torznab_hosts, ddlprovider)
|
||||
# end provider order sequencing
|
||||
logger.info('search provider order is ' + str(prov_order))
|
||||
logger.fdebug('search provider order is ' + str(prov_order))
|
||||
|
||||
#fix for issue dates between Nov-Dec/(Jan-Feb-Mar)
|
||||
IssDt = str(IssueDate)[5:7]
|
||||
if IssDt == "12" or IssDt == "11" or IssDt == "01" or IssDt == "02" or IssDt == "03":
|
||||
if any([IssDt == "12", IssDt == "11", IssDt == "01", IssDt == "02", IssDt == "03"]):
|
||||
IssDateFix = IssDt
|
||||
else:
|
||||
IssDateFix = "no"
|
||||
if StoreDate is not None:
|
||||
StDt = str(StoreDate)[5:7]
|
||||
if any([StDt == "10", StDt == "12", StDt == "11", StDt == "01", StDt == "02", StDt == "03"]):
|
||||
IssDateFix = StDt
|
||||
|
||||
searchcnt = 0
|
||||
srchloop = 1
|
||||
|
@ -326,10 +330,13 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
#sure it's not disabled (it gets auto-disabled on maxing out the API hits)
|
||||
prov_count+=1
|
||||
continue
|
||||
elif all([searchprov == '32P', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]):
|
||||
elif all([searchprov == '32P', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]) or all ([searchprov == 'Public Torrents', checked_once is True]) or all([searchprov == 'experimental', checked_once is True]) or all([searchprov == 'DDL', checked_once is True]):
|
||||
prov_count+=1
|
||||
continue
|
||||
if searchmode == 'rss':
|
||||
if searchprov.lower() == 'ddl':
|
||||
prov_count+=1
|
||||
continue
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="yes", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, digitaldate=digitaldate, booktype=booktype)
|
||||
if findit['status'] is False:
|
||||
if AlternateSearch is not None and AlternateSearch != "None":
|
||||
|
@ -351,7 +358,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
|
||||
else:
|
||||
findit = NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDate, StoreDate, searchprov, send_prov_count, IssDateFix, IssueID, UseFuzzy, newznab_host, ComicVersion=ComicVersion, SARC=SARC, IssueArcID=IssueArcID, RSS="no", ComicID=ComicID, issuetitle=issuetitle, unaltered_ComicName=unaltered_ComicName, allow_packs=allow_packs, oneoff=oneoff, cmloopit=cmloopit, manual=manual, torznab_host=torznab_host, torrentid_32p=torrentid_32p, digitaldate=digitaldate, booktype=booktype)
|
||||
if all([searchprov == '32P', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]) or all([searchprov == 'experimental', checked_once is False]):
|
||||
if all([searchprov == '32P', checked_once is False]) or all([searchprov.lower() == 'ddl', checked_once is False]) or all([searchprov == 'Public Torrents', checked_once is False]) or all([searchprov == 'experimental', checked_once is False]):
|
||||
checked_once = True
|
||||
if findit['status'] is False:
|
||||
if AlternateSearch is not None and AlternateSearch != "None":
|
||||
|
@ -417,7 +424,7 @@ def search_init(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueD
|
|||
searchprov = mylar.TMP_PROV
|
||||
return findit, searchprov
|
||||
else:
|
||||
logger.info('findit: %s' % findit)
|
||||
logger.fdebug('findit: %s' % findit)
|
||||
#if searchprov == '32P':
|
||||
# pass
|
||||
if manualsearch is None:
|
||||
|
@ -615,9 +622,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
if nzbprov == 'ddl':
|
||||
cmname = re.sub("%20", " ", str(comsrc))
|
||||
logger.fdebug('Sending request to DDL site for : %s %s' % (findcomic, isssearch))
|
||||
#b = getcomics.GC(query=findcomic + ' ' + isssearch)
|
||||
#bb = b.search()
|
||||
logger.info('bb returned from DDL: %s' % bb)
|
||||
b = getcomics.GC(query='%s %s' % (findcomic, isssearch))
|
||||
bb = b.search()
|
||||
#logger.info('bb returned from DDL: %s' % bb)
|
||||
elif RSS == "yes":
|
||||
if nzbprov == '32P' or nzbprov == 'Public Torrents':
|
||||
cmname = re.sub("%20", " ", str(comsrc))
|
||||
|
@ -644,7 +651,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
if ComicName[:17] == '0-Day Comics Pack':
|
||||
searchterm = {'series': ComicName, 'issue': StoreDate[8:10], 'volume': StoreDate[5:7], 'torrentid_32p': None}
|
||||
else:
|
||||
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p}
|
||||
searchterm = {'series': ComicName, 'id': ComicID, 'issue': findcomiciss, 'volume': ComicVersion, 'publisher': Publisher, 'torrentid_32p': torrentid_32p, 'booktype': booktype}
|
||||
#first we find the id on the serieslist of 32P
|
||||
#then we call the ajax against the id and issue# and volume (if exists)
|
||||
a = auth32p.info32p(searchterm=searchterm)
|
||||
|
@ -798,7 +805,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
elif nzbprov == 'experimental':
|
||||
#bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
|
||||
logger.info('sending %s to experimental search' % findcomic)
|
||||
bb = findcomicfeed.Startit(findcomic, isssearch, comyear, ComicVersion, IssDateFix)
|
||||
bb = findcomicfeed.Startit(findcomic, isssearch, comyear, ComicVersion, IssDateFix, booktype)
|
||||
# since the regexs in findcomicfeed do the 3 loops, lets force the exit after
|
||||
cmloopit == 1
|
||||
|
||||
|
@ -834,7 +841,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
pack_warning = True
|
||||
continue
|
||||
|
||||
logger.fdebug("checking search result: " + entry['title'])
|
||||
logger.fdebug("checking search result: %s" % entry['title'])
|
||||
#some nzbsites feel that comics don't deserve a nice regex to strip the crap from the header, the end result is that we're
|
||||
#dealing with the actual raw header which causes incorrect matches below.
|
||||
#this is a temporary cut from the experimental search option (findcomicfeed) as it does this part well usually.
|
||||
|
@ -889,7 +896,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
comsize_b = entry['size']
|
||||
elif entry['site'] == 'DDL':
|
||||
comsize_b = helpers.human2bytes(entry['size'])
|
||||
except:
|
||||
except Exception as e:
|
||||
tmpsz = entry.enclosures[0]
|
||||
comsize_b = tmpsz['length']
|
||||
|
||||
|
@ -930,20 +937,20 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
else:
|
||||
if entry['title'][:17] != '0-Day Comics Pack':
|
||||
comsize_m = helpers.human_size(comsize_b)
|
||||
logger.fdebug("size given as: " + str(comsize_m))
|
||||
logger.fdebug('size given as: %s' % comsize_m)
|
||||
#----size constraints.
|
||||
#if it's not within size constaints - dump it now and save some time.
|
||||
if mylar.CONFIG.USE_MINSIZE:
|
||||
conv_minsize = helpers.human2bytes(mylar.CONFIG.MINSIZE + "M")
|
||||
logger.fdebug("comparing Min threshold " + str(conv_minsize) + " .. to .. nzb " + str(comsize_b))
|
||||
logger.fdebug('comparing Min threshold %s .. to .. nzb %s' % (conv_minsize, comsize_b))
|
||||
if int(conv_minsize) > int(comsize_b):
|
||||
logger.fdebug("Failure to meet the Minimum size threshold - skipping")
|
||||
logger.fdebug('Failure to meet the Minimum size threshold - skipping')
|
||||
continue
|
||||
if mylar.CONFIG.USE_MAXSIZE:
|
||||
conv_maxsize = helpers.human2bytes(mylar.CONFIG.MAXSIZE + "M")
|
||||
logger.fdebug("comparing Max threshold " + str(conv_maxsize) + " .. to .. nzb " + str(comsize_b))
|
||||
logger.fdebug('comparing Max threshold %s .. to .. nzb %s' % (conv_maxsize, comsize_b))
|
||||
if int(comsize_b) > int(conv_maxsize):
|
||||
logger.fdebug("Failure to meet the Maximium size threshold - skipping")
|
||||
logger.fdebug('Failure to meet the Maximium size threshold - skipping')
|
||||
continue
|
||||
|
||||
#---- date constaints.
|
||||
|
@ -1006,7 +1013,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
else:
|
||||
postdate_int = time.mktime(dateconv[:len(dateconv) -1])
|
||||
except:
|
||||
logger.warn('Unable to parse posting date from provider result set for :' + entry['title'])
|
||||
logger.warn('Unable to parse posting date from provider result set for : %s' % entry['title'])
|
||||
continue
|
||||
|
||||
if all([digitaldate != '0000-00-00', digitaldate is not None]):
|
||||
|
@ -1016,7 +1023,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
i = 1
|
||||
|
||||
while i <= 1:
|
||||
logger.info('i: %s' % i)
|
||||
if i == 0:
|
||||
usedate = digitaldate
|
||||
else:
|
||||
|
@ -1061,23 +1067,23 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
#logger.info('dateconv2: %s' % dateconv2.date())
|
||||
#logger.info('digconv2: %s' % digconv2.date())
|
||||
if digitaldate != '0000-00-00' and dateconv2.date() >= digconv2.date():
|
||||
logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate))
|
||||
logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate))
|
||||
elif dateconv2.date() < issconv2.date():
|
||||
logger.fdebug('[CONV]pubdate: %s < storedate: %s' % (dateconv2.date(), issconv2.date()))
|
||||
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
|
||||
logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate))
|
||||
continue
|
||||
else:
|
||||
logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate))
|
||||
logger.fdebug('%s is after store date of %s' % (pubdate, stdate))
|
||||
except:
|
||||
#if the above fails, drop down to the integer compare method as a failsafe.
|
||||
if digitaldate != '0000-00-00' and postdate_int >= digitaldate_int:
|
||||
logger.fdebug(str(pubdate) + ' is after DIGITAL store date of ' + str(digitaldate))
|
||||
logger.fdebug('%s is after DIGITAL store date of %s' % (pubdate, digitaldate))
|
||||
elif postdate_int < issuedate_int:
|
||||
logger.fdebug('[INT]pubdate: %s < storedate: %s' % (postdate_int, issuedate_int))
|
||||
logger.fdebug(str(pubdate) + ' is before store date of ' + str(stdate) + '. Ignoring search result as this is not the right issue.')
|
||||
logger.fdebug('%s is before store date of %s. Ignoring search result as this is not the right issue.' % (pubdate, stdate))
|
||||
continue
|
||||
else:
|
||||
logger.fdebug(str(pubdate) + ' is after store date of ' + str(stdate))
|
||||
logger.fdebug('%s is after store date of %s' % (pubdate, stdate))
|
||||
# -- end size constaints.
|
||||
|
||||
if '(digital first)' in ComicTitle.lower(): #entry['title'].lower():
|
||||
|
@ -1088,7 +1094,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
else:
|
||||
thisentry = ComicTitle #entry['title']
|
||||
|
||||
logger.fdebug("Entry: " + thisentry)
|
||||
logger.fdebug('Entry: %s' % thisentry)
|
||||
cleantitle = thisentry
|
||||
|
||||
if 'mixed format' in cleantitle.lower():
|
||||
|
@ -1106,7 +1112,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
parsed_comic = p_comic.listFiles()
|
||||
|
||||
logger.fdebug('parsed_info: %s' % parsed_comic)
|
||||
if parsed_comic['parse_status'] == 'success':
|
||||
if parsed_comic['parse_status'] == 'success' and (all([booktype is None, parsed_comic['booktype'] == 'issue']) or all([booktype == 'Print', parsed_comic['booktype'] == 'issue']) or all([booktype == 'One-Shot', parsed_comic['booktype'] == 'issue']) or booktype == parsed_comic['booktype']):
|
||||
try:
|
||||
fcomic = filechecker.FileChecker(watchcomic=ComicName)
|
||||
filecomic = fcomic.matchIT(parsed_comic)
|
||||
|
@ -1115,8 +1121,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
continue
|
||||
else:
|
||||
logger.fdebug('match_check: %s' % filecomic)
|
||||
elif booktype != parsed_comic['booktype']:
|
||||
logger.fdebug('Booktypes do not match. Looking for %s, this is a %s. Ignoring this result.' % (booktype, parsed_comic['booktype']))
|
||||
continue
|
||||
else:
|
||||
logger.fdebug('Unable to parse name properly: %s' % filecomic)
|
||||
logger.fdebug('Unable to parse name properly: %s. Ignoring this result' % filecomic)
|
||||
continue
|
||||
|
||||
#adjust for covers only by removing them entirely...
|
||||
vers4year = "no"
|
||||
|
@ -1172,7 +1182,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
elif ComVersChk == 0:
|
||||
logger.fdebug("Series version detected as V1 (only series in existance with that title). Bypassing Year/Volume check")
|
||||
yearmatch = "true"
|
||||
elif UseFuzzy == "0" or UseFuzzy == "2" or UseFuzzy is None or IssDateFix != "no":
|
||||
elif any([UseFuzzy == "0", UseFuzzy == "2", UseFuzzy is None, IssDateFix != "no"]) and parsed_comic['issue_year'] is not None:
|
||||
if parsed_comic['issue_year'][:-2] == '19' or parsed_comic['issue_year'][:-2] == '20':
|
||||
logger.fdebug('year detected: %s' % parsed_comic['issue_year'])
|
||||
result_comyear = parsed_comic['issue_year']
|
||||
|
@ -1267,6 +1277,11 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
|
||||
downloadit = False
|
||||
#-------------------------------------fix this!
|
||||
try:
|
||||
pack_test = entry['pack']
|
||||
except Exception as e:
|
||||
pack_test = False
|
||||
|
||||
if nzbprov == 'Public Torrents' and any([entry['site'] == 'WWT', entry['site'] == 'DEM']):
|
||||
if entry['site'] == 'WWT':
|
||||
nzbprov = 'WWT'
|
||||
|
@ -1275,7 +1290,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
|
||||
if all([nzbprov == '32P', allow_packs == True, RSS == 'no']):
|
||||
logger.fdebug('pack:' + entry['pack'])
|
||||
if all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2']):
|
||||
if (all([nzbprov == '32P', RSS == 'no', allow_packs == True]) and any([entry['pack'] == '1', entry['pack'] == '2'])) or (all([nzbprov == 'ddl', pack_test is True])): #allow_packs is True
|
||||
if nzbprov == '32P':
|
||||
if entry['pack'] == '2':
|
||||
logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.')
|
||||
|
@ -1283,21 +1298,26 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.')
|
||||
else:
|
||||
logger.fdebug('[PACK-QUEUE] Invalid Pack.')
|
||||
else:
|
||||
logger.fdebug('[PACK-QUEUE] DDL Pack detected for %s.' % entry['filename'])
|
||||
|
||||
#find the pack range.
|
||||
pack_issuelist = None
|
||||
issueid_info = None
|
||||
if not entry['title'].startswith('0-Day Comics Pack'):
|
||||
pack_issuelist = entry['issues']
|
||||
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
|
||||
if issueid_info['valid'] == True:
|
||||
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.')
|
||||
else:
|
||||
logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping')
|
||||
continue
|
||||
#find the pack range.
|
||||
pack_issuelist = None
|
||||
issueid_info = None
|
||||
if not entry['title'].startswith('0-Day Comics Pack'):
|
||||
pack_issuelist = entry['issues']
|
||||
issueid_info = helpers.issue_find_ids(ComicName, ComicID, pack_issuelist, IssueNumber)
|
||||
if issueid_info['valid'] == True:
|
||||
logger.info('Issue Number %s exists within pack. Continuing.' % IssueNumber)
|
||||
else:
|
||||
logger.fdebug('Issue Number %s does NOT exist within this pack. Skipping' % IssueNumber)
|
||||
continue
|
||||
#pack support.
|
||||
nowrite = False
|
||||
nzbid = generate_id(nzbprov, entry['link'])
|
||||
if all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
|
||||
nzbid = entry['id']
|
||||
else:
|
||||
nzbid = generate_id(nzbprov, entry['link'])
|
||||
if manual is not True:
|
||||
downloadit = True
|
||||
else:
|
||||
|
@ -1349,7 +1369,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
logger.fdebug("issue we are looking for is : %s" % findcomiciss)
|
||||
logger.fdebug("integer value of issue we are looking for : %s" % intIss)
|
||||
else:
|
||||
if intIss is None:
|
||||
if intIss is None and all([booktype == 'One-Shot', helpers.issuedigits(parsed_comic['issue_number']) == 1000]):
|
||||
intIss = 1000
|
||||
else:
|
||||
intIss = 9999999999
|
||||
if parsed_comic['issue_number'] is not None:
|
||||
logger.fdebug("issue we found for is : %s" % parsed_comic['issue_number'])
|
||||
|
@ -1362,8 +1384,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
if parsed_comic['issue_number'] is None:
|
||||
pc_in = None
|
||||
else:
|
||||
pc_in = int(parsed_comic['issue_number'])
|
||||
|
||||
pc_in = helpers.issuedigits(parsed_comic['issue_number'])
|
||||
#issue comparison now as well
|
||||
if int(intIss) == int(comintIss) or all([cmloopit == 4, findcomiciss is None, pc_in is None]) or all([cmloopit == 4, findcomiciss is None, pc_in == 1]):
|
||||
nowrite = False
|
||||
|
@ -1371,6 +1392,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
nzbid = generate_id(nzbprov, entry['id'])
|
||||
elif all([nzbprov == 'ddl', 'getcomics' in entry['link']]):
|
||||
nzbid = entry['id']
|
||||
entry['title'] = entry['filename']
|
||||
else:
|
||||
nzbid = generate_id(nzbprov, entry['link'])
|
||||
if manual is not True:
|
||||
|
@ -1457,10 +1479,10 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
links = entry['link']
|
||||
searchresult = searcher(nzbprov, nzbname, mylar.COMICINFO, links, IssueID, ComicID, tmpprov, newznab=newznab_host, torznab=torznab_host, rss=RSS)
|
||||
|
||||
if searchresult == 'downloadchk-fail' or searchresult == 'double-pp':
|
||||
if any([searchresult == 'downloadchk-fail', searchresult == 'double-pp']):
|
||||
foundc['status'] = False
|
||||
continue
|
||||
elif searchresult == 'torrent-fail' or searchresult == 'nzbget-fail' or searchresult == 'sab-fail' or searchresult == 'blackhole-fail':
|
||||
elif any([searchresult == 'torrent-fail', searchresult == 'nzbget-fail', searchresult == 'sab-fail', searchresult == 'blackhole-fail', searchresult == 'ddl-fail']):
|
||||
foundc['status'] = False
|
||||
return foundc
|
||||
|
||||
|
@ -1492,9 +1514,12 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
if 'Public Torrents' in tmpprov and any([nzbprov == 'WWT', nzbprov == 'DEM']):
|
||||
tmpprov = re.sub('Public Torrents', nzbprov, tmpprov)
|
||||
foundcomic.append("yes")
|
||||
|
||||
if mylar.COMICINFO[0]['pack']:
|
||||
issinfo = mylar.COMICINFO[0]['pack_issuelist']
|
||||
logger.info('mylar.COMICINFO: %s' % mylar.COMICINFO)
|
||||
if mylar.COMICINFO[0]['pack'] is True:
|
||||
try:
|
||||
issinfo = mylar.COMICINFO[0]['pack_issuelist']
|
||||
except:
|
||||
issinfo = mylar.COMICINFO['pack_issuelist']
|
||||
if issinfo is not None:
|
||||
#we need to get EVERY issue ID within the pack and update the log to reflect that they're being downloaded via a pack.
|
||||
logger.fdebug("Found matching comic within pack...preparing to send to Updater with IssueIDs: " + str(issueid_info) + " and nzbname of " + str(nzbname))
|
||||
|
@ -1502,9 +1527,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
for isid in issinfo['issues']:
|
||||
updater.nzblog(isid['issueid'], nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, oneoff=oneoff)
|
||||
updater.foundsearch(ComicID, isid['issueid'], mode='series', provider=tmpprov)
|
||||
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov)
|
||||
notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], mylar.COMICINFO[0]['pack_numbers'], nzbprov, True)
|
||||
else:
|
||||
notify_snatch(nzbname, sent_to, mylar.COMICINFO[0]['modcomicname'], mylar.COMICINFO[0]['comyear'], None, nzbprov)
|
||||
notify_snatch(sent_to, mylar.COMICINFO[0]['ComicName'], mylar.COMICINFO[0]['comyear'], None, nzbprov, True)
|
||||
|
||||
else:
|
||||
if alt_nzbname is None or alt_nzbname == '':
|
||||
|
@ -1520,7 +1545,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
cyear = ComicYear
|
||||
else:
|
||||
cyear = comyear
|
||||
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), cyear, IssueNumber, nzbprov)
|
||||
notify_snatch(sent_to, ComicName, cyear, IssueNumber, nzbprov, False)
|
||||
prov_count == 0
|
||||
mylar.TMP_PROV = nzbprov
|
||||
|
||||
|
@ -1676,7 +1701,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
UseFuzzy = None
|
||||
ComicVersion = comic['Volume']
|
||||
TorrentID_32p = None
|
||||
booktype = None
|
||||
booktype = comic['Type']
|
||||
else:
|
||||
Comicname_filesafe = comic['ComicName_Filesafe']
|
||||
SeriesYear = comic['ComicYear']
|
||||
|
@ -1770,7 +1795,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
StoreDate = result['ReleaseDate']
|
||||
DigitalDate = result['DigitalDate']
|
||||
TorrentID_32p = None
|
||||
booktype = None
|
||||
booktype = result['Type']
|
||||
elif mode == 'pullwant':
|
||||
ComicName = result['COMIC']
|
||||
Comicname_filesafe = helpers.filesafe(ComicName)
|
||||
|
@ -1787,7 +1812,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None, manual=False):
|
|||
IssueDate = result['SHIPDATE']
|
||||
StoreDate = IssueDate
|
||||
DigitalDate = '0000-00-00'
|
||||
booktype = None
|
||||
booktype = result['format']
|
||||
else:
|
||||
comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()
|
||||
if mode == 'want_ann':
|
||||
|
@ -1865,6 +1890,7 @@ def searchIssueIDList(issuelist):
|
|||
UseFuzzy = comic['UseFuzzy']
|
||||
ComicVersion = comic['ComicVersion']
|
||||
TorrentID_32p = comic['TorrentID_32P']
|
||||
booktype = comic['Type']
|
||||
if issue['IssueDate'] == None:
|
||||
IssueYear = comic['ComicYear']
|
||||
else:
|
||||
|
@ -1874,7 +1900,7 @@ def searchIssueIDList(issuelist):
|
|||
else:
|
||||
AllowPacks = False
|
||||
|
||||
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate'])
|
||||
foundNZB, prov = search_init(comic['ComicName'], issue['Issue_Number'], str(IssueYear), comic['ComicYear'], Publisher, issue['IssueDate'], issue['ReleaseDate'], issue['IssueID'], AlternateSearch, UseFuzzy, ComicVersion, SARC=None, IssueArcID=None, mode=mode, ComicID=issue['ComicID'], filesafe=comic['ComicName_Filesafe'], allow_packs=AllowPacks, torrentid_32p=TorrentID_32p, digitaldate=issue['DigitalDate'], booktype=booktype)
|
||||
if foundNZB['status'] is True:
|
||||
updater.foundsearch(ComicID=issue['ComicID'], IssueID=issue['IssueID'], mode=mode, provider=prov, hash=foundNZB['info']['t_hash'])
|
||||
logger.info('Completed search request.')
|
||||
|
@ -2289,13 +2315,16 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
|
|||
sent_to = None
|
||||
t_hash = None
|
||||
if mylar.CONFIG.ENABLE_DDL is True and nzbprov == 'ddl':
|
||||
ggc = getcomics.GC('nope')
|
||||
sendsite = ggc.loadsite(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid), link)
|
||||
ddl_it = ggc.parse_downloadresults(os.path.join(mylar.CONFIG.CACHE_DIR, 'getcomics-' + nzbid))
|
||||
ggc = getcomics.GC(issueid=IssueID, comicid=ComicID)
|
||||
sendsite = ggc.loadsite(nzbid, link)
|
||||
ddl_it = ggc.parse_downloadresults(nzbid, link)
|
||||
logger.info("ddl status response: %s" % ddl_it)
|
||||
if ddl_it[0]['status'] == 'success':
|
||||
nzbname = ddl_it[0]['filename']
|
||||
logger.info('Successfully retrieved %s from DDL site' % (nzbname))
|
||||
if ddl_it['success'] is True:
|
||||
logger.info('Successfully snatched %s from DDL site. It is currently being queued to download in position %s' % (nzbname, mylar.DDL_QUEUE.qsize()))
|
||||
else:
|
||||
logger.info('Failed to retrieve %s from the DDL site.' %s (nzbname))
|
||||
return "ddl-fail"
|
||||
|
||||
sent_to = "is downloading it directly via DDL"
|
||||
|
||||
elif mylar.USE_BLACKHOLE and all([nzbprov != '32P', nzbprov != 'WWT', nzbprov != 'DEM', nzbprov != 'torznab']):
|
||||
|
@ -2670,37 +2699,41 @@ def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, direc
|
|||
if '[RSS]' in tmpprov: tmpprov = re.sub('\[RSS\]', '', tmpprov).strip()
|
||||
updater.nzblog(IssueID, nzbname, ComicName, SARC=SARC, IssueArcID=IssueArcID, id=nzbid, prov=tmpprov, alt_nzbname=alt_nzbname, oneoff=oneoff)
|
||||
#send out notifications for on snatch after the updater incase notification fails (it would bugger up the updater/pp scripts)
|
||||
notify_snatch(nzbname, sent_to, helpers.filesafe(modcomicname), comyear, IssueNumber, nzbprov)
|
||||
notify_snatch(sent_to, ComicName, comyear, IssueNumber, nzbprov, False)
|
||||
mylar.TMP_PROV = nzbprov
|
||||
return return_val
|
||||
|
||||
def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov):
|
||||
|
||||
if IssueNumber is not None:
|
||||
snline = '%s (%s) #%s snatched!' % (modcomicname, comyear, IssueNumber)
|
||||
def notify_snatch(sent_to, comicname, comyear, IssueNumber, nzbprov, pack):
|
||||
if pack is False:
|
||||
snline = 'Issue snatched!'
|
||||
else:
|
||||
snline = '%s (%s) snatched!' % (modcomicname, comyear)
|
||||
snline = 'Pack snatched!'
|
||||
|
||||
if IssueNumber is not None:
|
||||
snatched_name = '%s (%s) #%s' % (comicname, comyear, IssueNumber)
|
||||
else:
|
||||
snatched_name= '%s (%s)' % (comicname, comyear)
|
||||
|
||||
if mylar.CONFIG.PROWL_ENABLED and mylar.CONFIG.PROWL_ONSNATCH:
|
||||
logger.info(u"Sending Prowl notification")
|
||||
prowl = notifiers.PROWL()
|
||||
prowl.notify(nzbname, "Download started using " + sent_to)
|
||||
prowl.notify(snatched_name, "Download started using " + sent_to)
|
||||
if mylar.CONFIG.NMA_ENABLED and mylar.CONFIG.NMA_ONSNATCH:
|
||||
logger.info(u"Sending NMA notification")
|
||||
nma = notifiers.NMA()
|
||||
nma.notify(snline=snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov)
|
||||
nma.notify(snline=snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
|
||||
if mylar.CONFIG.PUSHOVER_ENABLED and mylar.CONFIG.PUSHOVER_ONSNATCH:
|
||||
logger.info(u"Sending Pushover notification")
|
||||
pushover = notifiers.PUSHOVER()
|
||||
pushover.notify(snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov)
|
||||
pushover.notify(snline, snatched_nzb=snatched_name, prov=nzbprov, sent_to=sent_to)
|
||||
if mylar.CONFIG.BOXCAR_ENABLED and mylar.CONFIG.BOXCAR_ONSNATCH:
|
||||
logger.info(u"Sending Boxcar notification")
|
||||
boxcar = notifiers.BOXCAR()
|
||||
boxcar.notify(snatched_nzb=nzbname, sent_to=sent_to, snline=snline)
|
||||
boxcar.notify(snatched_nzb=snatched_name, sent_to=sent_to, snline=snline)
|
||||
if mylar.CONFIG.PUSHBULLET_ENABLED and mylar.CONFIG.PUSHBULLET_ONSNATCH:
|
||||
logger.info(u"Sending Pushbullet notification")
|
||||
pushbullet = notifiers.PUSHBULLET()
|
||||
pushbullet.notify(snline=snline, snatched=nzbname, sent_to=sent_to, prov=nzbprov, method='POST')
|
||||
pushbullet.notify(snline=snline, snatched=snatched_name, sent_to=sent_to, prov=nzbprov, method='POST')
|
||||
if mylar.CONFIG.TELEGRAM_ENABLED and mylar.CONFIG.TELEGRAM_ONSNATCH:
|
||||
logger.info(u"Sending Telegram notification")
|
||||
telegram = notifiers.TELEGRAM()
|
||||
|
@ -2708,7 +2741,7 @@ def notify_snatch(nzbname, sent_to, modcomicname, comyear, IssueNumber, nzbprov)
|
|||
if mylar.CONFIG.SLACK_ENABLED and mylar.CONFIG.SLACK_ONSNATCH:
|
||||
logger.info(u"Sending Slack notification")
|
||||
slack = notifiers.SLACK()
|
||||
slack.notify("Snatched", snline, snatched_nzb=nzbname, sent_to=sent_to, prov=nzbprov)
|
||||
slack.notify("Snatched", snline, snatched_nzb=snatched_name, sent_to=sent_to, prov=nzbprov)
|
||||
|
||||
return
|
||||
|
||||
|
|
|
@ -11,29 +11,33 @@ class TorrentClient(object):
|
|||
def __init__(self):
|
||||
self.conn = None
|
||||
|
||||
def connect(self, host, username, password):
|
||||
def connect(self, host, username, password, test=False):
|
||||
if self.conn is not None:
|
||||
return self.connect
|
||||
|
||||
if not host:
|
||||
return {'status': False}
|
||||
return {'status': False, 'error': 'host not specified'}
|
||||
|
||||
try:
|
||||
logger.info(host)
|
||||
self.client = client.Client(host)
|
||||
except Exception as e:
|
||||
logger.error('Could not create qBittorrent Object' + str(e))
|
||||
return {'status': False}
|
||||
logger.error('Could not create qBittorrent Object %s' % e)
|
||||
return {'status': False, 'error': e}
|
||||
else:
|
||||
try:
|
||||
self.client.login(username, password)
|
||||
except Exception as e:
|
||||
logger.error('Could not connect to qBittorrent ' + host)
|
||||
logger.error('Could not connect to qBittorrent: %s' % host)
|
||||
return {'status': False, 'error': e}
|
||||
else:
|
||||
return self.client
|
||||
if test is True:
|
||||
version = self.client.qbittorrent_version
|
||||
return {'status': True, 'version': version}
|
||||
else:
|
||||
return self.client
|
||||
|
||||
def find_torrent(self, hash):
|
||||
logger.debug('Finding Torrent hash: ' + hash)
|
||||
logger.debug('Finding Torrent hash: %s' % hash)
|
||||
torrent_info = self.get_torrent(hash)
|
||||
if torrent_info:
|
||||
return True
|
||||
|
@ -41,11 +45,11 @@ class TorrentClient(object):
|
|||
return False
|
||||
|
||||
def get_torrent(self, hash):
|
||||
logger.debug('Getting Torrent info hash: ' + hash)
|
||||
logger.debug('Getting Torrent info hash: %s' % hash)
|
||||
try:
|
||||
torrent_info = self.client.get_torrent(hash)
|
||||
except Exception as e:
|
||||
logger.error('Could not get torrent info for ' + hash)
|
||||
logger.error('Could not get torrent info for %s' % hash)
|
||||
return False
|
||||
else:
|
||||
logger.info('Successfully located information for torrent')
|
||||
|
@ -55,7 +59,7 @@ class TorrentClient(object):
|
|||
def load_torrent(self, filepath):
|
||||
|
||||
if not filepath.startswith('magnet'):
|
||||
logger.info('filepath to torrent file set to : ' + filepath)
|
||||
logger.info('filepath to torrent file set to : %s' % filepath)
|
||||
|
||||
if self.client._is_authenticated is True:
|
||||
logger.info('Checking if Torrent Exists!')
|
||||
|
@ -68,67 +72,66 @@ class TorrentClient(object):
|
|||
logger.debug('Magnet (load_torrent) initiating')
|
||||
else:
|
||||
hash = self.get_the_hash(filepath)
|
||||
logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath)))
|
||||
logger.debug('FileName (load_torrent): %s' % os.path.basename(filepath))
|
||||
|
||||
logger.debug('Torrent Hash (load_torrent): "' + hash + '"')
|
||||
logger.debug('Torrent Hash (load_torrent): "%s"' % hash)
|
||||
|
||||
|
||||
#Check if torrent already added
|
||||
if self.find_torrent(hash):
|
||||
logger.info('load_torrent: Torrent already exists!')
|
||||
return {'status': False}
|
||||
return {'status': False, 'error': 'Torrent already exists'}
|
||||
#should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download
|
||||
#multiple copies of the same issues that's already downloaded
|
||||
else:
|
||||
logger.info('Torrent not added yet, trying to add it now!')
|
||||
if any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']):
|
||||
down_dir = None
|
||||
else:
|
||||
down_dir = mylar.CONFIG.QBITTORRENT_FOLDER
|
||||
logger.info('Forcing Download location to: %s' % down_dir)
|
||||
|
||||
# Build an arg dict based on user prefs.
|
||||
addargs = {}
|
||||
if not any([mylar.CONFIG.QBITTORRENT_LABEL is None, mylar.CONFIG.QBITTORRENT_LABEL == '', mylar.CONFIG.QBITTORRENT_LABEL == 'None']):
|
||||
addargs.update( { 'category': str(mylar.CONFIG.QBITTORRENT_LABEL) } )
|
||||
logger.info('Setting download label to: %s' % mylar.CONFIG.QBITTORRENT_LABEL)
|
||||
if not any([mylar.CONFIG.QBITTORRENT_FOLDER is None, mylar.CONFIG.QBITTORRENT_FOLDER == '', mylar.CONFIG.QBITTORRENT_FOLDER == 'None']):
|
||||
addargs.update( { 'savepath': str(mylar.CONFIG.QBITTORRENT_FOLDER) } )
|
||||
logger.info('Forcing download location to: %s' % mylar.CONFIG.QBITTORRENT_FOLDER)
|
||||
if mylar.CONFIG.QBITTORRENT_LOADACTION == 'pause':
|
||||
addargs.update( { 'paused': 'true' } )
|
||||
logger.info('Attempting to add torrent in paused state')
|
||||
|
||||
if filepath.startswith('magnet'):
|
||||
try:
|
||||
if down_dir is not None:
|
||||
tid = self.client.download_from_link(filepath, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
else:
|
||||
tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
tid = self.client.download_from_link(filepath, **addargs)
|
||||
except Exception as e:
|
||||
logger.debug('Torrent not added')
|
||||
return {'status': False}
|
||||
logger.error('Torrent not added')
|
||||
return {'status': False, 'error': e}
|
||||
else:
|
||||
logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.')
|
||||
else:
|
||||
try:
|
||||
torrent_content = open(filepath, 'rb')
|
||||
if down_dir is not None:
|
||||
tid = self.client.download_from_file(torrent_content, savepath=str(down_dir), category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
else:
|
||||
tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
|
||||
tid = self.client.download_from_file(torrent_content, **addargs)
|
||||
except Exception as e:
|
||||
logger.debug('Torrent not added')
|
||||
return {'status': False}
|
||||
logger.error('Torrent not added')
|
||||
return {'status': False, 'error': e}
|
||||
else:
|
||||
logger.debug('Successfully submitted for add via file. Verifying item is now on client.')
|
||||
|
||||
if mylar.CONFIG.QBITTORRENT_STARTONLOAD:
|
||||
logger.info('attempting to start')
|
||||
startit = self.client.force_start(hash)
|
||||
logger.info('startit returned:' + str(startit))
|
||||
else:
|
||||
logger.info('attempting to pause torrent incase it starts')
|
||||
if mylar.CONFIG.QBITTORRENT_LOADACTION == 'force_start':
|
||||
logger.info('Attempting to force start torrent')
|
||||
try:
|
||||
startit = self.client.pause(hash)
|
||||
logger.info('startit paused:' + str(startit))
|
||||
startit = self.client.force_start(hash)
|
||||
logger.info('startit returned: %s' % startit)
|
||||
except:
|
||||
logger.warn('Unable to pause torrent - possibly already paused?')
|
||||
logger.warn('Unable to force start torrent - please check your client.')
|
||||
else:
|
||||
logger.info('Client default add action selected. Doing nothing.')
|
||||
|
||||
try:
|
||||
time.sleep(5) # wait 5 in case it's not populated yet.
|
||||
tinfo = self.get_torrent(hash)
|
||||
except Exception as e:
|
||||
logger.warn('Torrent was not added! Please check logs')
|
||||
return {'status': False}
|
||||
return {'status': False, 'error': e}
|
||||
else:
|
||||
logger.info('Torrent successfully added!')
|
||||
filelist = self.client.get_torrent_files(hash)
|
||||
|
@ -160,6 +163,5 @@ class TorrentClient(object):
|
|||
metainfo = bencode.decode(torrent_file.read())
|
||||
info = metainfo['info']
|
||||
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
|
||||
logger.debug('Hash: ' + thehash)
|
||||
return thehash
|
||||
|
||||
|
|
|
@ -114,12 +114,12 @@ def dbUpdate(ComicIDList=None, calledfrom=None, sched=False):
|
|||
#logger.fdebug('%s [%s] Was refreshed less than %s hours ago. Skipping Refresh at this time.' % (ComicName, ComicID, cache_hours))
|
||||
cnt +=1
|
||||
continue
|
||||
logger.info('[' + str(cnt) + '/' + str(len(comiclist)) + '] Refreshing :' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']')
|
||||
logger.info('[%s/%s] Refreshing :%s (%s) [%s]' % (cnt, len(comiclist), ComicName, dspyear, ComicID))
|
||||
else:
|
||||
ComicID = comic['ComicID']
|
||||
ComicName = comic['ComicName']
|
||||
|
||||
logger.fdebug('Refreshing: ' + ComicName + ' (' + str(dspyear) + ') [' + str(ComicID) + ']')
|
||||
logger.info('Refreshing/Updating: %s (%s) [%s]' % (ComicName, dspyear, ComicID))
|
||||
|
||||
mismatch = "no"
|
||||
if not mylar.CONFIG.CV_ONLY or ComicID[:1] == "G":
|
||||
|
@ -1104,6 +1104,8 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
|||
temploc = '1'
|
||||
else:
|
||||
temploc = None
|
||||
logger.warn('The filename [%s] does not have a valid issue number, and the Edition of the series is %s. You might need to Forcibly Mark the Series as TPB/GN and try this again.' % (tmpfc['ComicFilename'], rescan['Type']))
|
||||
return
|
||||
|
||||
if all(['annual' not in temploc.lower(), 'special' not in temploc.lower()]):
|
||||
#remove the extension here
|
||||
|
@ -1119,6 +1121,7 @@ def forceRescan(ComicID, archive=None, module=None, recheck=False):
|
|||
while True:
|
||||
try:
|
||||
reiss = reissues[n]
|
||||
int_iss = None
|
||||
except IndexError:
|
||||
break
|
||||
int_iss = helpers.issuedigits(reiss['Issue_Number'])
|
||||
|
|
|
@ -645,6 +645,8 @@ class WebInterface(object):
|
|||
seriesYear = cid['SeriesYear']
|
||||
issuePublisher = cid['Publisher']
|
||||
seriesVolume = cid['Volume']
|
||||
bookType = cid['Type']
|
||||
seriesAliases = cid['Aliases']
|
||||
if storyarcpublisher is None:
|
||||
#assume that the arc is the same
|
||||
storyarcpublisher = issuePublisher
|
||||
|
@ -670,6 +672,8 @@ class WebInterface(object):
|
|||
"IssuePublisher": issuePublisher,
|
||||
"CV_ArcID": arcid,
|
||||
"Int_IssueNumber": AD['Int_IssueNumber'],
|
||||
"Type": bookType,
|
||||
"Aliases": seriesAliases,
|
||||
"Manual": AD['Manual']}
|
||||
|
||||
myDB.upsert("storyarcs", newVals, newCtrl)
|
||||
|
@ -2194,6 +2198,41 @@ class WebInterface(object):
|
|||
|
||||
annualDelete.exposed = True
|
||||
|
||||
def queueManage(self): # **args):
|
||||
myDB = db.DBConnection()
|
||||
activelist = 'There are currently no items currently downloading via Direct Download (DDL).'
|
||||
active = myDB.selectone("SELECT * FROM DDL_INFO WHERE STATUS = 'Downloading'").fetchone()
|
||||
if active is not None:
|
||||
activelist ={'series': active['series'],
|
||||
'year': active['year'],
|
||||
'size': active['size'],
|
||||
'filename': active['filename'],
|
||||
'status': active['status'],
|
||||
'id': active['id']}
|
||||
|
||||
resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.'
|
||||
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID WHERE c.status != 'Downloading'")
|
||||
if s_info:
|
||||
resultlist = []
|
||||
for si in s_info:
|
||||
issue = si['Issue_Number']
|
||||
if issue is not None:
|
||||
issue = '#%s' % issue
|
||||
resultlist.append({'series': si['ComicName'],
|
||||
'issue': issue,
|
||||
'id': si['id'],
|
||||
'volume': si['ComicVersion'],
|
||||
'year': si['ComicYear'],
|
||||
'size': si['size'].strip(),
|
||||
'comicid': si['ComicID'],
|
||||
'issueid': si['IssueID'],
|
||||
'status': si['status']})
|
||||
|
||||
logger.info('resultlist: %s' % resultlist)
|
||||
return serve_template(templatename="queue_management.html", title="Queue Management", activelist=activelist, resultlist=resultlist)
|
||||
queueManage.exposed = True
|
||||
|
||||
|
||||
def previewRename(self, **args): #comicid=None, comicidlist=None):
|
||||
file_format = mylar.CONFIG.FILE_FORMAT
|
||||
myDB = db.DBConnection()
|
||||
|
@ -4104,7 +4143,7 @@ class WebInterface(object):
|
|||
import random
|
||||
SRID = str(random.randint(100000, 999999))
|
||||
|
||||
logger.info('[IMPORT] Issues found with valid ComicID information for : ' + comicinfo['ComicName'] + ' [' + str(comicinfo['ComicID']) + ']')
|
||||
logger.info('[IMPORT] Issues found with valid ComicID information for : %s [%s]' % (comicinfo['ComicName'], comicinfo['ComicID']))
|
||||
imported = {'ComicName': comicinfo['ComicName'],
|
||||
'DynamicName': comicinfo['DynamicName'],
|
||||
'Volume': comicinfo['Volume'],
|
||||
|
@ -4127,7 +4166,7 @@ class WebInterface(object):
|
|||
# "ComicName": comicinfo['ComicName'],
|
||||
# "DynamicName": comicinfo['DynamicName']}
|
||||
# myDB.upsert("importresults", newVal, ctrlVal)
|
||||
logger.info('[IMPORT] Successfully verified import sequence data for : ' + comicinfo['ComicName'] + '. Currently adding to your watchlist.')
|
||||
logger.info('[IMPORT] Successfully verified import sequence data for : %s. Currently adding to your watchlist.' % comicinfo['ComicName'])
|
||||
RemoveIDS.append(comicinfo['ComicID'])
|
||||
|
||||
#we need to remove these items from the comiclist now, so they don't get processed again
|
||||
|
@ -4200,9 +4239,10 @@ class WebInterface(object):
|
|||
else:
|
||||
raise cherrypy.HTTPRedirect("importResults")
|
||||
else:
|
||||
comicstoIMP.append(result['ComicLocation'])#.decode(mylar.SYS_ENCODING, 'replace'))
|
||||
#logger.fdebug('result: %s' % result)
|
||||
comicstoIMP.append(result['ComicLocation']) #.decode(mylar.SYS_ENCODING, 'replace'))
|
||||
getiss = result['IssueNumber']
|
||||
#logger.info('getiss:' + getiss)
|
||||
#logger.fdebug('getiss: %s' % getiss)
|
||||
if 'annual' in getiss.lower():
|
||||
tmpiss = re.sub('[^0-9]','', getiss).strip()
|
||||
if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4:
|
||||
|
@ -4217,10 +4257,10 @@ class WebInterface(object):
|
|||
miniss_num = helpers.issuedigits(minISSUE)
|
||||
startiss_num = helpers.issuedigits(startISSUE)
|
||||
if int(getiss_num) > int(miniss_num):
|
||||
#logger.fdebug('Minimum issue now set to : ' + getiss + ' - it was : ' + minISSUE)
|
||||
logger.fdebug('Minimum issue now set to : %s - it was %s' % (getiss, minISSUE))
|
||||
minISSUE = getiss
|
||||
if int(getiss_num) < int(startiss_num):
|
||||
#logger.fdebug('Start issue now set to : ' + getiss + ' - it was : ' + startISSUE)
|
||||
logger.fdebug('Start issue now set to : %s - it was %s' % (getiss, startISSUE))
|
||||
startISSUE = str(getiss)
|
||||
if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start.
|
||||
startyear = result['ComicYear']
|
||||
|
@ -4545,13 +4585,20 @@ class WebInterface(object):
|
|||
#----
|
||||
# to be implemented in the future.
|
||||
if mylar.INSTALL_TYPE == 'git':
|
||||
branch_history, err = mylar.versioncheck.runGit("log --pretty=format:'%h - %cr - %an - %s' -n 5")
|
||||
#here we pass the branch_history to the pretty_git module to break it down
|
||||
if branch_history:
|
||||
br_hist = self.pretty_git(branch_history)
|
||||
#br_hist = branch_history.replace("\n", "<br />\n")
|
||||
else:
|
||||
br_hist = err
|
||||
try:
|
||||
branch_history, err = mylar.versioncheck.runGit('log --encoding=UTF-8 --pretty=format:"%h - %cr - %an - %s" -n 5')
|
||||
#here we pass the branch_history to the pretty_git module to break it down
|
||||
if branch_history:
|
||||
br_hist = self.pretty_git(branch_history)
|
||||
try:
|
||||
br_hist = u"" + br_hist.decode('utf-8')
|
||||
except:
|
||||
br_hist = br_hist
|
||||
else:
|
||||
br_hist = err
|
||||
except Exception as e:
|
||||
logger.fdebug('[ERROR] Unable to retrieve git revision history for some reason: %s' % e)
|
||||
br_hist = 'This would be a nice place to see revision history...'
|
||||
else:
|
||||
br_hist = 'This would be a nice place to see revision history...'
|
||||
#----
|
||||
|
@ -4649,6 +4696,7 @@ class WebInterface(object):
|
|||
"sab_priority": mylar.CONFIG.SAB_PRIORITY,
|
||||
"sab_directory": mylar.CONFIG.SAB_DIRECTORY,
|
||||
"sab_to_mylar": helpers.checked(mylar.CONFIG.SAB_TO_MYLAR),
|
||||
"sab_version": mylar.CONFIG.SAB_VERSION,
|
||||
"sab_client_post_processing": helpers.checked(mylar.CONFIG.SAB_CLIENT_POST_PROCESSING),
|
||||
"nzbget_host": mylar.CONFIG.NZBGET_HOST,
|
||||
"nzbget_port": mylar.CONFIG.NZBGET_PORT,
|
||||
|
@ -4691,7 +4739,7 @@ class WebInterface(object):
|
|||
"qbittorrent_password": mylar.CONFIG.QBITTORRENT_PASSWORD,
|
||||
"qbittorrent_label": mylar.CONFIG.QBITTORRENT_LABEL,
|
||||
"qbittorrent_folder": mylar.CONFIG.QBITTORRENT_FOLDER,
|
||||
"qbittorrent_startonload": helpers.checked(mylar.CONFIG.QBITTORRENT_STARTONLOAD),
|
||||
"qbittorrent_loadaction": mylar.CONFIG.QBITTORRENT_LOADACTION,
|
||||
"blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR,
|
||||
"usenet_retention": mylar.CONFIG.USENET_RETENTION,
|
||||
"nzbsu": helpers.checked(mylar.CONFIG.NZBSU),
|
||||
|
@ -5056,7 +5104,7 @@ class WebInterface(object):
|
|||
def configUpdate(self, **kwargs):
|
||||
checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search',
|
||||
'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload',
|
||||
'enable_torrents', 'qbittorrent_startonload', 'enable_rss', 'nzbsu', 'nzbsu_verify',
|
||||
'enable_torrents', 'enable_rss', 'nzbsu', 'nzbsu_verify',
|
||||
'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_public', 'enable_32p', 'enable_torznab',
|
||||
'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing',
|
||||
'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts',
|
||||
|
@ -5064,7 +5112,7 @@ class WebInterface(object):
|
|||
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
|
||||
'prowl_enabled', 'prowl_onsnatch', 'nma_enabled', 'nma_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
|
||||
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
|
||||
'opds_enable', 'opds_authentication', 'opds_metainfo'] #, 'enable_ddl']
|
||||
'opds_enable', 'opds_authentication', 'opds_metainfo', 'enable_ddl']
|
||||
|
||||
for checked_config in checked_configs:
|
||||
if checked_config not in kwargs:
|
||||
|
@ -5168,7 +5216,12 @@ class WebInterface(object):
|
|||
else:
|
||||
verify = False
|
||||
|
||||
version = 'Unknown'
|
||||
try:
|
||||
v = requests.get(querysab, params={'mode': 'version'}, verify=verify)
|
||||
if str(v.status_code) == '200':
|
||||
logger.fdebug('sabnzbd version: %s' % v.content)
|
||||
version = v.text
|
||||
r = requests.get(querysab, params=payload, verify=verify)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (querysab, e))
|
||||
|
@ -5183,6 +5236,10 @@ class WebInterface(object):
|
|||
verify = False
|
||||
|
||||
try:
|
||||
v = requests.get(querysab, params={'mode': 'version'}, verify=verify)
|
||||
if str(v.status_code) == '200':
|
||||
logger.fdebug('sabnzbd version: %s' % v.text)
|
||||
version = v.text
|
||||
r = requests.get(querysab, params=payload, verify=verify)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
|
||||
|
@ -5191,7 +5248,7 @@ class WebInterface(object):
|
|||
return 'Unable to retrieve data from SABnzbd'
|
||||
|
||||
|
||||
logger.info('status code: ' + str(r.status_code))
|
||||
logger.fdebug('status code: ' + str(r.status_code))
|
||||
|
||||
if str(r.status_code) != '200':
|
||||
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
|
@ -5215,7 +5272,9 @@ class WebInterface(object):
|
|||
mylar.CONFIG.SAB_APIKEY = q_apikey
|
||||
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
|
||||
logger.info('Connection to SABnzbd tested sucessfully')
|
||||
return "Successfully verified APIkey"
|
||||
mylar.CONFIG.SAB_VERSION = version
|
||||
return json.dumps({"status": "Successfully verified APIkey.", "version": str(version)})
|
||||
|
||||
SABtest.exposed = True
|
||||
|
||||
def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None):
|
||||
|
@ -5627,6 +5686,21 @@ class WebInterface(object):
|
|||
return "Successfully validated connection to %s" % host
|
||||
testrtorrent.exposed = True
|
||||
|
||||
def testqbit(self, host, username, password):
|
||||
import torrent.clients.qbittorrent as QbitClient
|
||||
qc = QbitClient.TorrentClient()
|
||||
qclient = qc.connect(host, username, password, True)
|
||||
if not qclient:
|
||||
logger.warn('[qBittorrent] Could not establish connection to %s' % host)
|
||||
return 'Error establishing connection to Qbittorrent'
|
||||
else:
|
||||
if qclient['status'] is False:
|
||||
logger.warn('[qBittorrent] Could not establish connection to %s. Error returned:' % (host, qclient['error']))
|
||||
return 'Error establishing connection to Qbittorrent'
|
||||
else:
|
||||
logger.info('[qBittorrent] Successfully validated connection to %s [%s]' % (host, qclient['version']))
|
||||
return 'Successfully validated qBittorrent connection'
|
||||
testqbit.exposed = True
|
||||
|
||||
def testnewznab(self, name, host, ssl, apikey):
|
||||
result = helpers.newznab_test(name, host, ssl, apikey)
|
||||
|
|
|
@ -81,7 +81,9 @@ def pullit(forcecheck=None, weeknumber=None, year=None):
|
|||
elif chk_locg['status'] == 'success':
|
||||
logger.info('[PULL-LIST] Weekly Pull List successfully loaded with ' + str(chk_locg['count']) + ' issues.')
|
||||
return new_pullcheck(chk_locg['weeknumber'],chk_locg['year'])
|
||||
|
||||
elif chk_log['status'] == 'update_required':
|
||||
logger.warn('[PULL-LIST] Your version of Mylar is not up-to-date. You MUST update before this works')
|
||||
return
|
||||
else:
|
||||
logger.info('[PULL-LIST] Unable to retrieve weekly pull-list. Dropping down to legacy method of PW-file')
|
||||
mylar.PULLBYFILE = pull_the_file(newrl)
|
||||
|
|
Loading…
Reference in New Issue