mirror of https://github.com/evilhero/mylar
Merge branch 'development'
This commit is contained in:
commit
a825a8959a
|
@ -70,8 +70,7 @@
|
|||
<div id="tabs">
|
||||
<ul>
|
||||
<li><a href="#tabs-1">Comic Details</a></li>
|
||||
<li><a href="#tabs-2">Download settings</a></li>
|
||||
<li><a href="#tabs-3">Edit Settings</a></li>
|
||||
<li><a href="#tabs-2">Edit Settings</a></li>
|
||||
</ul>
|
||||
|
||||
<div id="tabs-1">
|
||||
|
@ -193,85 +192,7 @@
|
|||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="tabs-2">
|
||||
<table class="comictable" summary="Download Settings">
|
||||
<tr>
|
||||
<td id="mainimg">
|
||||
<fieldset>
|
||||
<div id="artistImg">
|
||||
<img src="${comic['ComicImage']}" alt="" height="400" width="263" />
|
||||
</div>
|
||||
</fieldset>
|
||||
</td>
|
||||
<td width="100%" padding="10">
|
||||
%if comic['ComicPublisher'] == 'DC Comics':
|
||||
<img src="interfaces/default/images/publisherlogos/logo-dccomics.png" align="right" alt="DC" height="50" width="50">
|
||||
%elif comic['ComicPublisher'] == 'Marvel':
|
||||
<img src="interfaces/default/images/publisherlogos/logo-marvel.jpg" align="right" alt="Marvel" height="50" width="100">
|
||||
%elif comic['ComicPublisher'] == 'Image':
|
||||
<img src="interfaces/default/images/publisherlogos/logo-imagecomics.png" align="right" alt="Image" height="100" width="50"/>
|
||||
%elif comic['ComicPublisher'] == 'Dark Horse Comics':
|
||||
<img src="interfaces/default/images/publisherlogos/logo-darkhorse.png" align="right" alt="Darkhorse" height="75" width="50"/>
|
||||
%elif comic['ComicPublisher'] == 'IDW Publishing':
|
||||
<img src="interfaces/default/images/publisherlogos/logo-idwpublish.png" align="right" alt="IDW" height="50" width="100"/>
|
||||
%endif
|
||||
<fieldset>
|
||||
<div>
|
||||
<label><big>Alternate versions :</big><norm>${comic['ComicVersion']}</norm></label>
|
||||
</div>
|
||||
<div>
|
||||
<label><big>Scanner :</big><norm>${comic['QUALscanner']}</norm></label>
|
||||
</div>
|
||||
<div>
|
||||
<label><big>Type :</big><norm>${comic['QUALtype']}</norm></label>
|
||||
</div>
|
||||
<div>
|
||||
<label><big>Quality :</big><norm>${comic['QUALquality']}</norm></label>
|
||||
</div>
|
||||
<div>
|
||||
<label><big>Alternate Search Names :</big>
|
||||
<%
|
||||
import re
|
||||
AS_Alternate = []
|
||||
if comic['AlternateSearch'] is None:
|
||||
AS_Alternate.append("None")
|
||||
else:
|
||||
chkthealt = comic['AlternateSearch'].split('##')
|
||||
if chkthealt == 0:
|
||||
AS_Alternate.append(comic['AlternateSearch'])
|
||||
for calt in chkthealt:
|
||||
AS_Alternate.append(re.sub('##','',calt))
|
||||
%>
|
||||
<UL><UL>
|
||||
%for AS in AS_Alternate:
|
||||
<LI><norm>${AS}</norm>
|
||||
%endfor
|
||||
</UL></UL>
|
||||
</label>
|
||||
</div>
|
||||
<%
|
||||
if comic['UseFuzzy'] == "0" or comic['UseFuzzy'] is None:
|
||||
fuzzy = "None"
|
||||
fuzzy_year = "0"
|
||||
elif comic['UseFuzzy'] == "1":
|
||||
fuzzy = "Remove Year"
|
||||
fuzzy_year = "1"
|
||||
elif comic['UseFuzzy'] == "2":
|
||||
fuzzy = "Fuzzy Year"
|
||||
fuzzy_year = "2"
|
||||
|
||||
%>
|
||||
|
||||
<div>
|
||||
<label><big>Fuzzy Year logic : </big><norm>${fuzzy} </norm></label>
|
||||
</div>
|
||||
|
||||
</fieldset>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<div id="tabs-3">
|
||||
<div id="tabs-2">
|
||||
<table class="comictable" summary="Edit Settings">
|
||||
<tr>
|
||||
<td id="mainimg">
|
||||
|
@ -530,7 +451,7 @@
|
|||
</div>
|
||||
|
||||
|
||||
<table class="display_no_select" id="annual_table">
|
||||
<table class="display" id="annual_table">
|
||||
|
||||
<thead>
|
||||
<tr>
|
||||
|
@ -841,49 +762,50 @@
|
|||
initActions();
|
||||
$('#issue_table').dataTable(
|
||||
{
|
||||
"bDestroy": true,
|
||||
"aoColumnDefs": [
|
||||
{ 'bSortable': false, 'aTargets': [ 0, 3 ] },
|
||||
{ 'bVisible': false, 'aTargets': [1] },
|
||||
{ 'sType': 'numeric', 'aTargets': [1] },
|
||||
{ 'columns.orderData': [1], 'aTargets': [2] }
|
||||
"destroy": true,
|
||||
"columnDefs": [
|
||||
{ "orderable": false, "targets": [0, 6] },
|
||||
{ "visible": false, "targets": 1 },
|
||||
{ "orderData": 1, "targets": 2 },
|
||||
{ "type": 'num', "targets": 1 },
|
||||
{ "order": [[1, 'desc']] }
|
||||
],
|
||||
"aLengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]],
|
||||
"oLanguage": {
|
||||
"sLengthMenu":"Show _MENU_ issues per page",
|
||||
"sEmptyTable": "No issue information available",
|
||||
"sInfo":"Showing _TOTAL_ issues",
|
||||
"sInfoEmpty":"Showing 0 to 0 of 0 issues",
|
||||
"sInfoFiltered":"(filtered from _MAX_ total issues)",
|
||||
"sSearch": ""},
|
||||
"bStateSave": true,
|
||||
"iDisplayLength": 25,
|
||||
"sPaginationType": "full_numbers",
|
||||
"aaSorting": [[1, 'desc'],[4,'desc']]
|
||||
"lengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]],
|
||||
"language": {
|
||||
"lengthMenu":"Show _MENU_ issues per page",
|
||||
"emptyTable": "No issue information available",
|
||||
"info":"Showing _TOTAL_ issues",
|
||||
"infoEmpty":"Showing 0 to 0 of 0 issues",
|
||||
"infoFiltered":"(filtered from _MAX_ total issues)",
|
||||
"search": ""},
|
||||
"stateSave": true,
|
||||
"searching": true,
|
||||
"pageLength": 25,
|
||||
"pagingType": "full_numbers"
|
||||
});
|
||||
$('#annual_table').dataTable(
|
||||
{
|
||||
"bDestroy": true,
|
||||
"aoColumnDefs": [
|
||||
{ 'bSortable': false, 'aTargets': [ 0, 3 ] },
|
||||
{ 'bVisible': false, 'aTargets': [1] },
|
||||
{ 'sType': 'numeric', 'aTargets': [1] },
|
||||
{ 'columns.orderData': [1], 'aTargets': [2] }
|
||||
"destroy": true,
|
||||
"columnDefs": [
|
||||
{ "orderable": false, "targets": [0, 6] },
|
||||
{ "visible": false, "targets": 1 },
|
||||
{ "orderData": 1, "targets": 2 },
|
||||
{ "type": 'num', "targets": 1 },
|
||||
{ "order": [[1, 'desc']] }
|
||||
],
|
||||
"aLengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]],
|
||||
"oLanguage": {
|
||||
"sLengthMenu":"",
|
||||
"sEmptyTable": "No issue information available",
|
||||
"sInfo":"Showing _TOTAL_ issues",
|
||||
"sInfoEmpty":"Showing 0 to 0 of 0 issues",
|
||||
"sInfoFiltered":"",
|
||||
"sSearch": ""},
|
||||
"bStateSave": true,
|
||||
"bFilter": false,
|
||||
"sPaginationType": "full_numbers",
|
||||
"iDisplayLength": 10
|
||||
"lengthMenu": [[10, 25, 50, -1], [10, 25, 50, 'All' ]],
|
||||
"language": {
|
||||
"lengthMenu":"Show _MENU_ annuals per page",
|
||||
"emptyTable": "No annual information available",
|
||||
"info":"Showing _TOTAL_ annuals",
|
||||
"infoEmpty":"Showing 0 to 0 of 0 annuals",
|
||||
"infoFiltered":"(filtered from _MAX_ total annuals)",
|
||||
"search": ""},
|
||||
"stateSave": true,
|
||||
"searching": false,
|
||||
"pageLength": 10,
|
||||
"pagingType": "full_numbers"
|
||||
});
|
||||
|
||||
resetFilters("issue", "annual");
|
||||
setTimeout(function(){
|
||||
initFancybox();
|
||||
|
@ -891,8 +813,7 @@
|
|||
}
|
||||
|
||||
$(document).ready(function() {
|
||||
$("issue_table").dataTable();
|
||||
$("annual_table").dataTable();
|
||||
$('table.display').DataTable();
|
||||
initThisPage();
|
||||
});
|
||||
</script>
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
<%!
|
||||
from mylar import helpers, db
|
||||
import datetime
|
||||
import decimal
|
||||
%>
|
||||
|
||||
<%def name="body()">
|
||||
|
@ -32,6 +33,18 @@
|
|||
if comic['percent'] < 100:
|
||||
css = '<div class=\"progress-container missing\">'
|
||||
|
||||
if any([comic['haveissues'] == 'None', comic['haveissues'] is None]):
|
||||
hissues = 0
|
||||
else:
|
||||
hissues = comic['haveissues']
|
||||
|
||||
if any([comic['totalissues'] == 'None', comic['totalissues'] is None]):
|
||||
tissues = 0
|
||||
else:
|
||||
tissues = comic['totalissues']
|
||||
|
||||
comic_percent = int(hissues) + decimal.Decimal(tissues) / decimal.Decimal('1000')
|
||||
|
||||
if comic['Status'] == 'Paused':
|
||||
grade = 'X'
|
||||
elif comic['Status'] == 'Loading':
|
||||
|
@ -48,7 +61,7 @@
|
|||
<td id="year"><span title="${comic['ComicYear']}"></span>${comic['ComicYear']}</td>
|
||||
<td id="issue"><span title="${comic['LatestIssue']}"></span># ${comic['LatestIssue']}</td>
|
||||
<td id="published">${comic['LatestDate']}</td>
|
||||
<td class="hidden" id="have_percent">${comic['percent']}</td>
|
||||
<td class="hidden" id="have_percent">${comic_percent}</td>
|
||||
<td id="have"><span title="${comic['percent']}"></span>${css}<div style="width:${comic['percent']}%"><span class="progressbar-front-text">${comic['haveissues']}/${comic['totalissues']}</span></div></td>
|
||||
<td id="status">${comic['recentstatus']}</td>
|
||||
<td id="active" align="center">
|
||||
|
@ -82,26 +95,26 @@
|
|||
function initThisPage() {
|
||||
$('#series_table').dataTable(
|
||||
{
|
||||
"bDestroy": true,
|
||||
"aoColumnDefs": [
|
||||
{ 'bSortable': false, 'aTargets': [5, 9] },
|
||||
{ 'bVisible': false, 'aTargets': [5, 9] },
|
||||
{ 'sType': 'numeric', 'aTargets': [5] },
|
||||
{ 'columns.orderData': [5], 'aTargets': [6] },
|
||||
{ 'columns.orderData': [9], 'aTargets': [8] }
|
||||
"destroy": true,
|
||||
"columnDefs": [
|
||||
{ "orderable": false, "targets": [5, 9] },
|
||||
{ "visible": false, "targets": [5, 9] },
|
||||
{ "type": 'num', "targets": 5 },
|
||||
{ "orderData": 5, "targets": 6 },
|
||||
{ "orderData": 9, "targets": 8 },
|
||||
{ "order": [[7, 'asc'],[1, 'asc']] }
|
||||
],
|
||||
"aLengthMenu": [[10, 15, 25, 50, -1], [10, 15, 25, 50, 'All' ]],
|
||||
"oLanguage": {
|
||||
"sLengthMenu":"Show _MENU_ results per page",
|
||||
"sEmptyTable": "No results",
|
||||
"sInfo":"Showing _START_ to _END_ of _TOTAL_ results",
|
||||
"sInfoEmpty":"Showing 0 to 0 of 0 results",
|
||||
"sInfoFiltered":"(filtered from _MAX_ total results)",
|
||||
"sSearch" : ""},
|
||||
"bStateSave": true,
|
||||
"iDisplayLength": 25,
|
||||
"sPaginationType": "full_numbers",
|
||||
"aaSorting": [[7,'asc'],[1,'asc']],
|
||||
"lengthMenu": [[10, 15, 25, 50, -1], [10, 15, 25, 50, 'All' ]],
|
||||
"language": {
|
||||
"lengthMenu":"Show _MENU_ results per page",
|
||||
"emptyTable": "No results",
|
||||
"info":"Showing _START_ to _END_ of _TOTAL_ results",
|
||||
"infoEmpty":"Showing 0 to 0 of 0 results",
|
||||
"infoFiltered":"(filtered from _MAX_ total results)",
|
||||
"search" : ""},
|
||||
"stateSave": true,
|
||||
"pageLength": 25,
|
||||
"pagingType": "full_numbers",
|
||||
|
||||
});
|
||||
|
||||
|
|
|
@ -75,8 +75,8 @@
|
|||
<th id="comicname">ComicName</th>
|
||||
<th id="issue">Issue</th>
|
||||
<th id="issuedate">Pub Date</th>
|
||||
<th id="status">Status</th>
|
||||
<th id="action">Options</th>
|
||||
<th id="status">Status</th>
|
||||
<th id="action">Options</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
|
@ -131,7 +131,7 @@
|
|||
issuedate = item['IssueDate']
|
||||
else:
|
||||
if item['StoreDate'] != '0000-00-00' and item['StoreDate'] is not None:
|
||||
issuedate = item['IssueDate']
|
||||
issuedate = item['StoreDate']
|
||||
else:
|
||||
# this is needed for imported cbl's
|
||||
try:
|
||||
|
|
|
@ -327,6 +327,7 @@ ARC_FOLDERFORMAT = None
|
|||
ARC_FILEOPS = 'copy'
|
||||
|
||||
CVURL = None
|
||||
CV_VERIFY = 0
|
||||
CURRENT_WEEKNUMBER = None
|
||||
CURRENT_YEAR = None
|
||||
PULL_REFRESH = None
|
||||
|
@ -501,7 +502,7 @@ def initialize():
|
|||
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, \
|
||||
FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, \
|
||||
FILE_OPTS, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, SEND2READ, MAINTAINSERIESFOLDER, TAB_ENABLE, TAB_HOST, TAB_USER, TAB_PASS, TAB_DIRECTORY, \
|
||||
STORYARCDIR, COPY2ARCDIR, ARC_FOLDERFORMAT, ARC_FILEOPS, CVURL, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
|
||||
STORYARCDIR, COPY2ARCDIR, ARC_FOLDERFORMAT, ARC_FILEOPS, CVURL, CV_VERIFY, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
|
||||
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, PULLBYFILE, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, \
|
||||
SYNO_FIX, ENFORCE_PERMS, CHMOD_FILE, CHMOD_DIR, CHOWNER, CHGROUP, ANNUALS_ON, CV_ONLY, CV_ONETIMER, CURRENT_WEEKNUMBER, CURRENT_YEAR, PULL_REFRESH, WEEKFOLDER, WEEKFOLDER_LOC, WEEKFOLDER_FORMAT, UMASK, \
|
||||
TELEGRAM_ENABLED, TELEGRAM_TOKEN, TELEGRAM_USERID
|
||||
|
@ -548,6 +549,7 @@ def initialize():
|
|||
if not COMICVINE_API:
|
||||
COMICVINE_API = None
|
||||
CVAPI_RATE = check_setting_int(CFG, 'General', 'cvapi_rate', 2)
|
||||
CV_VERIFY = bool(check_setting_int(CFG, 'General', 'cv_verify', 0))
|
||||
HTTP_HOST = check_setting_str(CFG, 'General', 'http_host', '0.0.0.0')
|
||||
HTTP_USERNAME = check_setting_str(CFG, 'General', 'http_username', '')
|
||||
HTTP_PASSWORD = check_setting_str(CFG, 'General', 'http_password', '')
|
||||
|
@ -1210,7 +1212,7 @@ def initialize():
|
|||
logger.info('Synology Parsing Fix already implemented. No changes required at this time.')
|
||||
|
||||
#set the default URL for ComicVine API here.
|
||||
CVURL = 'http://comicvine.gamespot.com/api/'
|
||||
CVURL = 'https://comicvine.gamespot.com/api/'
|
||||
|
||||
#comictagger - force to use included version if option is enabled.
|
||||
if ENABLE_META:
|
||||
|
@ -1374,6 +1376,7 @@ def config_write():
|
|||
new_config['General']['comicvine_api'] = COMICVINE_API.strip()
|
||||
|
||||
new_config['General']['cvapi_rate'] = CVAPI_RATE
|
||||
new_config['General']['cv_verify'] = int(CV_VERIFY)
|
||||
new_config['General']['http_port'] = HTTP_PORT
|
||||
new_config['General']['http_host'] = HTTP_HOST
|
||||
new_config['General']['http_username'] = HTTP_USERNAME
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
# This file is part of Headphones.
|
||||
#
|
||||
# Headphones is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Headphones is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mylar import db
|
||||
|
||||
|
||||
def getCachedArt(albumid):
|
||||
|
||||
from mylar import cache
|
||||
|
||||
c = cache.Cache()
|
||||
|
||||
artwork_path = c.get_artwork_from_cache(ComicID=comicid)
|
||||
|
||||
if not artwork_path:
|
||||
return None
|
||||
|
||||
if artwork_path.startswith('http://'):
|
||||
artwork = urllib.urlopen(artwork_path).read()
|
||||
return artwork
|
||||
else:
|
||||
artwork = open(artwork_path, "r").read()
|
||||
return artwork
|
|
@ -88,10 +88,9 @@ def pulldetails(comicid, type, issueid=None, offset=1, arclist=None, comicidlist
|
|||
#download the file:
|
||||
#set payload to None for now...
|
||||
payload = None
|
||||
verify = False
|
||||
|
||||
try:
|
||||
r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
|
||||
r = requests.get(PULLURL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from ComicVine: %s' % (e))
|
||||
return
|
||||
|
|
|
@ -151,7 +151,8 @@ class FileChecker(object):
|
|||
'series_volume': runresults['series_volume'],
|
||||
'issue_year': runresults['issue_year'],
|
||||
'issue_number': runresults['issue_number'],
|
||||
'scangroup': runresults['scangroup']
|
||||
'scangroup': runresults['scangroup'],
|
||||
'reading_order': runresults['reading_order']
|
||||
})
|
||||
else:
|
||||
comiclist.append({
|
||||
|
@ -230,6 +231,7 @@ class FileChecker(object):
|
|||
#split the file and then get all the relevant numbers that could possibly be an issue number.
|
||||
#remove the extension.
|
||||
modfilename = re.sub(filetype, '', filename).strip()
|
||||
reading_order = None
|
||||
|
||||
#if it's a story-arc, make sure to remove any leading reading order #'s
|
||||
if self.sarc and mylar.READ2FILENAME:
|
||||
|
@ -237,6 +239,8 @@ class FileChecker(object):
|
|||
if mylar.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[SARC] Checking filename for Reading Order sequence - Reading Sequence Order found #: ' + str(modfilename[:removest]))
|
||||
if modfilename[:removest].isdigit() and removest <= 3:
|
||||
reading_order = {'reading_sequence': str(modfilename[:removest]),
|
||||
'filename': filename[removest+1:]}
|
||||
modfilename = modfilename[removest+1:]
|
||||
if mylar.FOLDER_SCAN_LOG_VERBOSE:
|
||||
logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : ' + modfilename)
|
||||
|
@ -261,6 +265,8 @@ class FileChecker(object):
|
|||
break
|
||||
cnt +=1
|
||||
|
||||
modfilename = modfilename.replace('()','').strip()
|
||||
|
||||
#here we take a snapshot of the current modfilename, the intent is that we will remove characters that match
|
||||
#as we discover them - namely volume, issue #, years, etc
|
||||
#the remaining strings should be the series title and/or issue title if present (has to be detected properly)
|
||||
|
@ -489,12 +495,12 @@ class FileChecker(object):
|
|||
|
||||
#now we try to find the series title &/or volume lablel.
|
||||
if any( [sf.lower().startswith('v'), sf.lower().startswith('vol'), volumeprior == True, 'volume' in sf.lower(), 'vol' in sf.lower()] ) and sf.lower() not in {'one','two','three','four','five','six'}:
|
||||
if sf[1:].isdigit() or sf[3:].isdigit():# or volumeprior == True:
|
||||
if any([ split_file[split_file.index(sf)].isdigit(), split_file[split_file.index(sf)][3:].isdigit(), split_file[split_file.index(sf)][1:].isdigit() ]):
|
||||
volume = re.sub("[^0-9]", "", sf)
|
||||
if volumeprior:
|
||||
try:
|
||||
volumetmp = split_file.index(volumeprior_label, current_pos -1) #if this passes, then we're ok, otherwise will try exception
|
||||
volume_found['position'] = split_file.index(sf, current_pos)
|
||||
volume_found['position'] = split_file.index(volumeprior_label, current_pos -1) #if this passes, then we're ok, otherwise will try exception
|
||||
logger.fdebug('volume_found: ' + str(volume_found['position']))
|
||||
except:
|
||||
sep_volume = False
|
||||
continue
|
||||
|
@ -502,6 +508,7 @@ class FileChecker(object):
|
|||
volume_found['position'] = split_file.index(sf, current_pos)
|
||||
|
||||
volume_found['volume'] = volume
|
||||
logger.fdebug('volume label detected as : Volume ' + str(volume) + ' @ position: ' + str(split_file.index(sf)))
|
||||
volumeprior = False
|
||||
volumeprior_label = None
|
||||
elif 'vol' in sf.lower() and len(sf) == 3:
|
||||
|
@ -509,7 +516,7 @@ class FileChecker(object):
|
|||
volumeprior = True
|
||||
volumeprior_label = sf
|
||||
sep_volume = True
|
||||
#logger.fdebug('volume label detected, but vol. number is not adjacent, adjusting scope to include number.')
|
||||
logger.fdebug('volume label detected, but vol. number is not adjacent, adjusting scope to include number.')
|
||||
elif 'volume' in sf.lower():
|
||||
volume = re.sub("[^0-9]", "", sf)
|
||||
if volume.isdigit():
|
||||
|
@ -819,7 +826,8 @@ class FileChecker(object):
|
|||
'series_volume': issue_volume,
|
||||
'issue_year': issue_year,
|
||||
'issue_number': issue_number,
|
||||
'scangroup': scangroup}
|
||||
'scangroup': scangroup,
|
||||
'reading_order': reading_order}
|
||||
|
||||
series_info = {}
|
||||
series_info = {'sub': path_list,
|
||||
|
|
223
mylar/helpers.py
223
mylar/helpers.py
|
@ -247,7 +247,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
import db, logger
|
||||
myDB = db.DBConnection()
|
||||
logger.fdebug('comicid: ' + str(comicid))
|
||||
logger.fdebug('issue#: ' + str(issue))
|
||||
logger.fdebug('issue#: ' + issue)
|
||||
# the issue here is a non-decimalized version, we need to see if it's got a decimal and if not, add '.00'
|
||||
# iss_find = issue.find('.')
|
||||
# if iss_find < 0:
|
||||
|
@ -370,7 +370,9 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
for issexcept in issue_exceptions:
|
||||
if issexcept.lower() in issuenum.lower():
|
||||
logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
|
||||
if any(v in issuenum for v in valid_spaces):
|
||||
v_chk = [v in issuenum for v in valid_spaces]
|
||||
if v_chk:
|
||||
iss_space = v_chk[0]
|
||||
logger.fdebug('character space denoted as : ' + iss_space)
|
||||
else:
|
||||
logger.fdebug('character space not denoted.')
|
||||
|
@ -422,7 +424,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
else:
|
||||
iss = issuenum
|
||||
issueno = str(iss)
|
||||
logger.fdebug('iss:' + str(iss))
|
||||
logger.fdebug('iss:' + iss)
|
||||
logger.fdebug('issueno:' + str(issueno))
|
||||
# issue zero-suppression here
|
||||
if mylar.ZERO_LEVEL == "0":
|
||||
|
@ -507,10 +509,10 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
if month_name is None:
|
||||
month_name = 'None'
|
||||
logger.fdebug('Issue Year : ' + str(issueyear))
|
||||
logger.fdebug('Publisher: ' + str(publisher))
|
||||
logger.fdebug('Series: ' + str(series))
|
||||
logger.fdebug('Publisher: ' + publisher)
|
||||
logger.fdebug('Series: ' + series)
|
||||
logger.fdebug('Year: ' + str(seriesyear))
|
||||
logger.fdebug('Comic Location: ' + str(comlocation))
|
||||
logger.fdebug('Comic Location: ' + comlocation)
|
||||
if comversion is None:
|
||||
comversion = 'None'
|
||||
#if comversion is None, remove it so it doesn't populate with 'None'
|
||||
|
@ -626,15 +628,15 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
|
|||
nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
|
||||
|
||||
nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower()
|
||||
logger.fdebug('New Filename: ' + str(nfilename))
|
||||
logger.fdebug('New Filename: ' + nfilename)
|
||||
|
||||
if mylar.LOWERCASE_FILENAMES:
|
||||
dst = os.path.join(comlocation, nfilename.lower())
|
||||
else:
|
||||
dst = os.path.join(comlocation, nfilename)
|
||||
|
||||
logger.fdebug('Source: ' + str(ofilename))
|
||||
logger.fdebug('Destination: ' + str(dst))
|
||||
logger.fdebug('Source: ' + ofilename)
|
||||
logger.fdebug('Destination: ' + dst)
|
||||
|
||||
rename_this = {"destination_dir": dst,
|
||||
"nfilename": nfilename,
|
||||
|
@ -1370,93 +1372,114 @@ def filesafe(comic):
|
|||
|
||||
return comicname_filesafe
|
||||
|
||||
def IssueDetails(filelocation, IssueID=None):
|
||||
def IssueDetails(filelocation, IssueID=None, justinfo=False):
|
||||
import zipfile, logger
|
||||
from xml.dom.minidom import parseString
|
||||
|
||||
dstlocation = os.path.join(mylar.CACHE_DIR, 'temp.zip')
|
||||
|
||||
issuedetails = []
|
||||
|
||||
if filelocation.endswith('.cbz'):
|
||||
logger.fdebug('CBZ file detected. Checking for .xml within file')
|
||||
shutil.copy(filelocation, dstlocation)
|
||||
else:
|
||||
logger.fdebug('filename is not a cbz : ' + filelocation)
|
||||
return
|
||||
|
||||
cover = "notfound"
|
||||
issuetag = None
|
||||
pic_extensions = ('.jpg','.png','.webp')
|
||||
modtime = os.path.getmtime(dstlocation)
|
||||
low_infile = 999999
|
||||
|
||||
try:
|
||||
with zipfile.ZipFile(dstlocation, 'r') as inzipfile:
|
||||
for infile in sorted(inzipfile.namelist()):
|
||||
tmp_infile = re.sub("[^0-9]","", infile).strip()
|
||||
if tmp_infile == '':
|
||||
pass
|
||||
elif int(tmp_infile) < int(low_infile):
|
||||
low_infile = tmp_infile
|
||||
low_infile_name = infile
|
||||
if infile == 'ComicInfo.xml':
|
||||
logger.fdebug('Extracting ComicInfo.xml to display.')
|
||||
dst = os.path.join(mylar.CACHE_DIR, 'ComicInfo.xml')
|
||||
data = inzipfile.read(infile)
|
||||
#print str(data)
|
||||
issuetag = 'xml'
|
||||
#looks for the first page and assumes it's the cover. (Alternate covers handled later on)
|
||||
elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
|
||||
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
|
||||
if justinfo is False:
|
||||
dstlocation = os.path.join(mylar.CACHE_DIR, 'temp.zip')
|
||||
|
||||
|
||||
if filelocation.endswith('.cbz'):
|
||||
logger.fdebug('CBZ file detected. Checking for .xml within file')
|
||||
shutil.copy(filelocation, dstlocation)
|
||||
else:
|
||||
logger.fdebug('filename is not a cbz : ' + filelocation)
|
||||
return
|
||||
|
||||
cover = "notfound"
|
||||
pic_extensions = ('.jpg','.png','.webp')
|
||||
modtime = os.path.getmtime(dstlocation)
|
||||
low_infile = 999999
|
||||
|
||||
try:
|
||||
with zipfile.ZipFile(dstlocation, 'r') as inzipfile:
|
||||
for infile in sorted(inzipfile.namelist()):
|
||||
tmp_infile = re.sub("[^0-9]","", infile).strip()
|
||||
if tmp_infile == '':
|
||||
pass
|
||||
elif int(tmp_infile) < int(low_infile):
|
||||
low_infile = tmp_infile
|
||||
low_infile_name = infile
|
||||
if infile == 'ComicInfo.xml':
|
||||
logger.fdebug('Extracting ComicInfo.xml to display.')
|
||||
dst = os.path.join(mylar.CACHE_DIR, 'ComicInfo.xml')
|
||||
data = inzipfile.read(infile)
|
||||
#print str(data)
|
||||
issuetag = 'xml'
|
||||
#looks for the first page and assumes it's the cover. (Alternate covers handled later on)
|
||||
elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
|
||||
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(infile))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
|
||||
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
|
||||
altlist = ('00a', '00b', '00c', '00d', '00e')
|
||||
for alt in altlist:
|
||||
if alt in infile:
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(infile))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
break
|
||||
|
||||
elif any(['001.jpg' in infile, '001.png' in infile, '001.webp' in infile, '01.jpg' in infile, '01.png' in infile, '01.webp' in infile]) and cover == "notfound":
|
||||
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(infile))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
|
||||
if cover != "found":
|
||||
logger.fdebug('Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : ' + str(low_infile))
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(infile))
|
||||
local_file.write(inzipfile.read(low_infile_name))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
|
||||
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
|
||||
altlist = ('00a', '00b', '00c', '00d', '00e')
|
||||
for alt in altlist:
|
||||
if alt in infile:
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(infile))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
break
|
||||
cover = "found"
|
||||
|
||||
elif any(['001.jpg' in infile, '001.png' in infile, '001.webp' in infile, '01.jpg' in infile, '01.png' in infile, '01.webp' in infile]) and cover == "notfound":
|
||||
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(infile))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
except:
|
||||
logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.')
|
||||
return
|
||||
|
||||
if cover != "found":
|
||||
logger.fdebug('Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : ' + str(low_infile))
|
||||
local_file = open(os.path.join(mylar.CACHE_DIR, 'temp.jpg'), "wb")
|
||||
local_file.write(inzipfile.read(low_infile_name))
|
||||
local_file.close
|
||||
cover = "found"
|
||||
ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime))
|
||||
IssueImage = replacetheslash(ComicImage)
|
||||
|
||||
except:
|
||||
logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.')
|
||||
return
|
||||
|
||||
ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime))
|
||||
IssueImage = replacetheslash(ComicImage)
|
||||
else:
|
||||
IssueImage = "None"
|
||||
try:
|
||||
with zipfile.ZipFile(filelocation, 'r') as inzipfile:
|
||||
for infile in sorted(inzipfile.namelist()):
|
||||
if infile == 'ComicInfo.xml':
|
||||
logger.fdebug('Found ComicInfo.xml - now retrieving information.')
|
||||
data = inzipfile.read(infile)
|
||||
issuetag = 'xml'
|
||||
break
|
||||
except:
|
||||
logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.')
|
||||
return
|
||||
|
||||
|
||||
if issuetag is None:
|
||||
import subprocess
|
||||
from subprocess import CalledProcessError, check_output
|
||||
unzip_cmd = "/usr/bin/unzip"
|
||||
data = None
|
||||
try:
|
||||
#unzip -z will extract the zip comment field.
|
||||
data = subprocess.check_output([unzip_cmd, '-z', dstlocation])
|
||||
# return data is encoded in bytes, not unicode. Need to figure out how to run check_output returning utf-8
|
||||
issuetag = 'comment'
|
||||
except CalledProcessError as e:
|
||||
dz = zipfile.ZipFile(filelocation, 'r')
|
||||
data = dz.comment
|
||||
except:
|
||||
logger.warn('Unable to extract comment field from zipfile.')
|
||||
return
|
||||
else:
|
||||
if data:
|
||||
issuetag = 'comment'
|
||||
else:
|
||||
logger.warn('No metadata available in zipfile comment field.')
|
||||
return
|
||||
|
||||
logger.info('Tag returned as being: ' + str(issuetag))
|
||||
|
||||
#logger.info('data:' + str(data))
|
||||
|
||||
|
@ -1549,28 +1572,30 @@ def IssueDetails(filelocation, IssueID=None):
|
|||
except:
|
||||
pagecount = 0
|
||||
|
||||
i = 0
|
||||
#not used atm.
|
||||
#to validate a front cover if it's tagged as one within the zip (some do this)
|
||||
#i = 0
|
||||
#try:
|
||||
# pageinfo = result.getElementsByTagName('Page')[0].attributes
|
||||
# if pageinfo: pageinfo_test == True
|
||||
#except:
|
||||
# pageinfo_test = False
|
||||
|
||||
try:
|
||||
pageinfo = result.getElementsByTagName('Page')[0].attributes
|
||||
if pageinfo: pageinfo_test == True
|
||||
except:
|
||||
pageinfo_test = False
|
||||
#if pageinfo_test:
|
||||
# while (i < int(pagecount)):
|
||||
# pageinfo = result.getElementsByTagName('Page')[i].attributes
|
||||
# attrib = pageinfo.getNamedItem('Image')
|
||||
# #logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value))
|
||||
# att = pageinfo.getNamedItem('Type')
|
||||
# #logger.fdebug('pageinfo: ' + str(pageinfo))
|
||||
# if att.value == 'FrontCover':
|
||||
# #logger.fdebug('FrontCover detected. Extracting.')
|
||||
# break
|
||||
# i+=1
|
||||
|
||||
if pageinfo_test:
|
||||
while (i < int(pagecount)):
|
||||
pageinfo = result.getElementsByTagName('Page')[i].attributes
|
||||
attrib = pageinfo.getNamedItem('Image')
|
||||
#logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value))
|
||||
att = pageinfo.getNamedItem('Type')
|
||||
logger.fdebug('pageinfo: ' + str(pageinfo))
|
||||
if att.value == 'FrontCover':
|
||||
#logger.fdebug('FrontCover detected. Extracting.')
|
||||
break
|
||||
i+=1
|
||||
elif issuetag == 'comment':
|
||||
logger.info('CBL Tagging.')
|
||||
stripline = 'Archive: ' + dstlocation
|
||||
stripline = 'Archive: ' + filelocation
|
||||
data = re.sub(stripline, '', data.encode("utf-8")).strip()
|
||||
if data is None or data == '':
|
||||
return
|
||||
|
@ -2320,7 +2345,7 @@ def spantheyears(storyarcid):
|
|||
lowyear = 9999
|
||||
maxyear = 0
|
||||
for la in totalcnt:
|
||||
if la['IssueDate'] is None:
|
||||
if la['IssueDate'] is None or la['IssueDate'] == '0000-00-00':
|
||||
continue
|
||||
else:
|
||||
if int(la['IssueDate'][:4]) > maxyear:
|
||||
|
|
|
@ -425,7 +425,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
|
||||
logger.info('Attempting to retrieve the comic image for series')
|
||||
try:
|
||||
r = requests.get(comic['ComicImage'], params=None, stream=True, headers=mylar.CV_HEADERS)
|
||||
r = requests.get(comic['ComicImage'], params=None, stream=True, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS)
|
||||
|
||||
except Exception, e:
|
||||
logger.warn('Unable to download image from CV URL link: ' + comic['ComicImage'] + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
|
@ -462,7 +462,7 @@ def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=No
|
|||
|
||||
logger.info('Attempting to retrieve alternate comic image for the series.')
|
||||
try:
|
||||
r = requests.get(comic['ComicImageALT'], params=None, stream=True, headers=mylar.CV_HEADERS)
|
||||
r = requests.get(comic['ComicImageALT'], params=None, stream=True, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS)
|
||||
|
||||
except Exception, e:
|
||||
logger.warn('Unable to download image from CV URL link: ' + comic['ComicImageALT'] + ' [Status Code returned: ' + str(r.status_code) + ']')
|
||||
|
|
|
@ -271,146 +271,156 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None,
|
|||
if i['ComicLocation'].endswith('.cbz'):
|
||||
logger.fdebug('[IMPORT-CBZ] Metatagging checking enabled.')
|
||||
logger.info('[IMPORT-CBZ} Attempting to read tags present in filename: ' + i['ComicLocation'])
|
||||
issueinfo = helpers.IssueDetails(i['ComicLocation'])
|
||||
logger.info('issueinfo: ' + str(issueinfo))
|
||||
if issueinfo is None:
|
||||
logger.fdebug('[IMPORT-CBZ] No valid metadata contained within filename. Dropping down to parsing the filename itself.')
|
||||
try:
|
||||
issueinfo = helpers.IssueDetails(i['ComicLocation'], justinfo=True)
|
||||
except:
|
||||
logger.fdebug('[IMPORT-CBZ] Unable to retrieve metadata - possibly doesn\'t exist. Ignoring meta-retrieval')
|
||||
pass
|
||||
else:
|
||||
issuenotes_id = None
|
||||
logger.info('[IMPORT-CBZ] Successfully retrieved some tags. Lets see what I can figure out.')
|
||||
comicname = issueinfo[0]['series']
|
||||
if comicname is not None:
|
||||
logger.fdebug('[IMPORT-CBZ] Series Name: ' + comicname)
|
||||
as_d = filechecker.FileChecker()
|
||||
as_dyninfo = as_d.dynamic_replace(comicname)
|
||||
logger.fdebug('Dynamic-ComicName: ' + as_dyninfo['mod_seriesname'])
|
||||
else:
|
||||
logger.fdebug('[IMPORT-CBZ] No series name found within metadata. This is bunk - dropping down to file parsing for usable information.')
|
||||
issueinfo = None
|
||||
issue_number = None
|
||||
logger.info('issueinfo: ' + str(issueinfo))
|
||||
|
||||
if issueinfo is not None:
|
||||
try:
|
||||
issueyear = issueinfo[0]['year']
|
||||
except:
|
||||
issueyear = None
|
||||
|
||||
#if the issue number is a non-numeric unicode string, this will screw up along with impID
|
||||
issue_number = issueinfo[0]['issue_number']
|
||||
if issue_number is not None:
|
||||
logger.fdebug('[IMPORT-CBZ] Issue Number: ' + issue_number)
|
||||
else:
|
||||
issue_number = i['parsed']['issue_number']
|
||||
|
||||
if 'annual' in comicname.lower() or 'annual' in comfilename.lower():
|
||||
if issue_number is None or issue_number == 'None':
|
||||
logger.info('Annual detected with no issue number present within metadata. Assuming year as issue.')
|
||||
try:
|
||||
issue_number = 'Annual ' + str(issueyear)
|
||||
except:
|
||||
issue_number = 'Annual ' + i['parsed']['issue_year']
|
||||
else:
|
||||
logger.info('Annual detected with issue number present within metadata.')
|
||||
if 'annual' not in issue_number.lower():
|
||||
issue_number = 'Annual ' + issue_number
|
||||
mod_series = re.sub('annual', '', comicname, flags=re.I).strip()
|
||||
else:
|
||||
mod_series = comicname
|
||||
|
||||
logger.fdebug('issue number SHOULD Be: ' + issue_number)
|
||||
|
||||
try:
|
||||
issuetitle = issueinfo[0]['title']
|
||||
except:
|
||||
issuetitle = None
|
||||
try:
|
||||
issueyear = issueinfo[0]['year']
|
||||
except:
|
||||
issueyear = None
|
||||
try:
|
||||
issuevolume = str(issueinfo[0]['volume'])
|
||||
if all([issuevolume is not None, issuevolume != 'None']) and not issuevolume.lower().startswith('v'):
|
||||
issuevolume = 'v' + str(issuevolume)
|
||||
logger.fdebug('[TRY]issue volume is: ' + str(issuevolume))
|
||||
except:
|
||||
logger.fdebug('[EXCEPT]issue volume is: ' + str(issuevolume))
|
||||
issuevolume = None
|
||||
|
||||
if any([comicname is None, comicname == 'None', issue_number is None, issue_number == 'None']):
|
||||
logger.fdebug('[IMPORT-CBZ] Improperly tagged file as the metatagging is invalid. Ignoring meta and just parsing the filename.')
|
||||
issueinfo = None
|
||||
if issueinfo is None:
|
||||
logger.fdebug('[IMPORT-CBZ] No valid metadata contained within filename. Dropping down to parsing the filename itself.')
|
||||
pass
|
||||
else:
|
||||
# if used by ComicTagger, Notes field will have the IssueID.
|
||||
issuenotes = issueinfo[0]['notes']
|
||||
logger.fdebug('[IMPORT-CBZ] Notes: ' + issuenotes)
|
||||
if issuenotes is not None and issuenotes != 'None':
|
||||
if 'Issue ID' in issuenotes:
|
||||
st_find = issuenotes.find('Issue ID')
|
||||
tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
|
||||
if tmp_issuenotes_id.isdigit():
|
||||
issuenotes_id = tmp_issuenotes_id
|
||||
logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
|
||||
elif 'CVDB' in issuenotes:
|
||||
st_find = issuenotes.find('CVDB')
|
||||
tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
|
||||
if tmp_issuenotes_id.isdigit():
|
||||
issuenotes_id = tmp_issuenotes_id
|
||||
logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
|
||||
else:
|
||||
logger.fdebug('[IMPORT-CBZ] Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.')
|
||||
issuenotes_id = None
|
||||
logger.info('[IMPORT-CBZ] Successfully retrieved some tags. Lets see what I can figure out.')
|
||||
comicname = issueinfo[0]['series']
|
||||
if comicname is not None:
|
||||
logger.fdebug('[IMPORT-CBZ] Series Name: ' + comicname)
|
||||
as_d = filechecker.FileChecker()
|
||||
as_dyninfo = as_d.dynamic_replace(comicname)
|
||||
logger.fdebug('Dynamic-ComicName: ' + as_dyninfo['mod_seriesname'])
|
||||
else:
|
||||
logger.fdebug('[IMPORT-CBZ] No series name found within metadata. This is bunk - dropping down to file parsing for usable information.')
|
||||
issueinfo = None
|
||||
issue_number = None
|
||||
|
||||
logger.fdebug('[IMPORT-CBZ] Adding ' + comicname + ' to the import-queue!')
|
||||
#impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
|
||||
impid = str(random.randint(1000000,99999999))
|
||||
logger.fdebug('[IMPORT-CBZ] impid: ' + str(impid))
|
||||
#make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids)
|
||||
issuepopulated = False
|
||||
if cvinfo_CID is None:
|
||||
if issuenotes_id is None:
|
||||
logger.info('[IMPORT-CBZ] No ComicID detected where it should be. Bypassing this metadata entry and going the parsing route [' + comfilename + ']')
|
||||
else:
|
||||
#we need to store the impid here as well so we can look it up.
|
||||
issueid_list.append({'issueid': issuenotes_id,
|
||||
'importinfo': {'impid': impid,
|
||||
'comicid': None,
|
||||
'comicname': comicname,
|
||||
'dynamicname': as_dyninfo['mod_seriesname'],
|
||||
'comicyear': issueyear,
|
||||
'issuenumber': issue_number,
|
||||
'volume': issuevolume,
|
||||
'comfilename': comfilename,
|
||||
'comlocation': comlocation.decode(mylar.SYS_ENCODING)}
|
||||
})
|
||||
mylar.IMPORT_CID_COUNT +=1
|
||||
issuepopulated = True
|
||||
if issueinfo is not None:
|
||||
try:
|
||||
issueyear = issueinfo[0]['year']
|
||||
except:
|
||||
issueyear = None
|
||||
|
||||
if issuepopulated == False:
|
||||
if cvscanned_loc == os.path.dirname(comlocation):
|
||||
cv_cid = cvinfo_CID
|
||||
logger.fdebug('[IMPORT-CBZ] CVINFO_COMICID attached : ' + str(cv_cid))
|
||||
#if the issue number is a non-numeric unicode string, this will screw up along with impID
|
||||
issue_number = issueinfo[0]['issue_number']
|
||||
if issue_number is not None:
|
||||
logger.fdebug('[IMPORT-CBZ] Issue Number: ' + issue_number)
|
||||
else:
|
||||
cv_cid = None
|
||||
import_by_comicids.append({
|
||||
"impid": impid,
|
||||
"comicid": cv_cid,
|
||||
"watchmatch": None,
|
||||
"displayname": mod_series,
|
||||
"comicname": comicname,
|
||||
"dynamicname": as_dyninfo['mod_seriesname'],
|
||||
"comicyear": issueyear,
|
||||
"issuenumber": issue_number,
|
||||
"volume": issuevolume,
|
||||
"issueid": issuenotes_id,
|
||||
"comfilename": comfilename,
|
||||
"comlocation": comlocation.decode(mylar.SYS_ENCODING)
|
||||
})
|
||||
issue_number = i['parsed']['issue_number']
|
||||
|
||||
mylar.IMPORT_CID_COUNT +=1
|
||||
else:
|
||||
pass
|
||||
#logger.fdebug(i['ComicFilename'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags')
|
||||
if 'annual' in comicname.lower() or 'annual' in comfilename.lower():
|
||||
if issue_number is None or issue_number == 'None':
|
||||
logger.info('Annual detected with no issue number present within metadata. Assuming year as issue.')
|
||||
try:
|
||||
issue_number = 'Annual ' + str(issueyear)
|
||||
except:
|
||||
issue_number = 'Annual ' + i['parsed']['issue_year']
|
||||
else:
|
||||
logger.info('Annual detected with issue number present within metadata.')
|
||||
if 'annual' not in issue_number.lower():
|
||||
issue_number = 'Annual ' + issue_number
|
||||
mod_series = re.sub('annual', '', comicname, flags=re.I).strip()
|
||||
else:
|
||||
mod_series = comicname
|
||||
|
||||
logger.fdebug('issue number SHOULD Be: ' + issue_number)
|
||||
|
||||
try:
|
||||
issuetitle = issueinfo[0]['title']
|
||||
except:
|
||||
issuetitle = None
|
||||
try:
|
||||
issueyear = issueinfo[0]['year']
|
||||
except:
|
||||
issueyear = None
|
||||
try:
|
||||
issuevolume = str(issueinfo[0]['volume'])
|
||||
if all([issuevolume is not None, issuevolume != 'None', not issuevolume.lower().startswith('v')]):
|
||||
issuevolume = 'v' + str(issuevolume)
|
||||
if any([issuevolume is None, issuevolume == 'None']):
|
||||
logger.info('EXCEPT] issue volume is NONE')
|
||||
issuevolume = None
|
||||
else:
|
||||
logger.fdebug('[TRY]issue volume is: ' + str(issuevolume))
|
||||
except:
|
||||
logger.fdebug('[EXCEPT]issue volume is: ' + str(issuevolume))
|
||||
issuevolume = None
|
||||
|
||||
if any([comicname is None, comicname == 'None', issue_number is None, issue_number == 'None']):
|
||||
logger.fdebug('[IMPORT-CBZ] Improperly tagged file as the metatagging is invalid. Ignoring meta and just parsing the filename.')
|
||||
issueinfo = None
|
||||
pass
|
||||
else:
|
||||
# if used by ComicTagger, Notes field will have the IssueID.
|
||||
issuenotes = issueinfo[0]['notes']
|
||||
logger.fdebug('[IMPORT-CBZ] Notes: ' + issuenotes)
|
||||
if issuenotes is not None and issuenotes != 'None':
|
||||
if 'Issue ID' in issuenotes:
|
||||
st_find = issuenotes.find('Issue ID')
|
||||
tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
|
||||
if tmp_issuenotes_id.isdigit():
|
||||
issuenotes_id = tmp_issuenotes_id
|
||||
logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
|
||||
elif 'CVDB' in issuenotes:
|
||||
st_find = issuenotes.find('CVDB')
|
||||
tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
|
||||
if tmp_issuenotes_id.isdigit():
|
||||
issuenotes_id = tmp_issuenotes_id
|
||||
logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
|
||||
else:
|
||||
logger.fdebug('[IMPORT-CBZ] Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.')
|
||||
|
||||
logger.fdebug('[IMPORT-CBZ] Adding ' + comicname + ' to the import-queue!')
|
||||
#impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
|
||||
impid = str(random.randint(1000000,99999999))
|
||||
logger.fdebug('[IMPORT-CBZ] impid: ' + str(impid))
|
||||
#make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids)
|
||||
issuepopulated = False
|
||||
if cvinfo_CID is None:
|
||||
if issuenotes_id is None:
|
||||
logger.info('[IMPORT-CBZ] No ComicID detected where it should be. Bypassing this metadata entry and going the parsing route [' + comfilename + ']')
|
||||
else:
|
||||
#we need to store the impid here as well so we can look it up.
|
||||
issueid_list.append({'issueid': issuenotes_id,
|
||||
'importinfo': {'impid': impid,
|
||||
'comicid': None,
|
||||
'comicname': comicname,
|
||||
'dynamicname': as_dyninfo['mod_seriesname'],
|
||||
'comicyear': issueyear,
|
||||
'issuenumber': issue_number,
|
||||
'volume': issuevolume,
|
||||
'comfilename': comfilename,
|
||||
'comlocation': comlocation.decode(mylar.SYS_ENCODING)}
|
||||
})
|
||||
mylar.IMPORT_CID_COUNT +=1
|
||||
issuepopulated = True
|
||||
|
||||
if issuepopulated == False:
|
||||
if cvscanned_loc == os.path.dirname(comlocation):
|
||||
cv_cid = cvinfo_CID
|
||||
logger.fdebug('[IMPORT-CBZ] CVINFO_COMICID attached : ' + str(cv_cid))
|
||||
else:
|
||||
cv_cid = None
|
||||
import_by_comicids.append({
|
||||
"impid": impid,
|
||||
"comicid": cv_cid,
|
||||
"watchmatch": None,
|
||||
"displayname": mod_series,
|
||||
"comicname": comicname,
|
||||
"dynamicname": as_dyninfo['mod_seriesname'],
|
||||
"comicyear": issueyear,
|
||||
"issuenumber": issue_number,
|
||||
"volume": issuevolume,
|
||||
"issueid": issuenotes_id,
|
||||
"comfilename": comfilename,
|
||||
"comlocation": comlocation.decode(mylar.SYS_ENCODING)
|
||||
})
|
||||
|
||||
mylar.IMPORT_CID_COUNT +=1
|
||||
else:
|
||||
pass
|
||||
#logger.fdebug(i['ComicFilename'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags')
|
||||
|
||||
if issueinfo is None:
|
||||
if i['parsedinfo']['issue_number'] is None:
|
||||
|
|
|
@ -68,10 +68,9 @@ def pullsearch(comicapi, comicquery, offset, explicit, type):
|
|||
|
||||
#download the file:
|
||||
payload = None
|
||||
verify = False
|
||||
|
||||
try:
|
||||
r = requests.get(PULLURL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
|
||||
r = requests.get(PULLURL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from ComicVine: %s' % (e))
|
||||
return
|
||||
|
@ -415,10 +414,9 @@ def storyarcinfo(xmlid):
|
|||
|
||||
#download the file:
|
||||
payload = None
|
||||
verify = False
|
||||
|
||||
try:
|
||||
r = requests.get(ARCPULL_URL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
|
||||
r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CV_VERIFY, headers=mylar.CV_HEADERS)
|
||||
except Exception, e:
|
||||
logger.warn('Error fetching data from ComicVine: %s' % (e))
|
||||
return
|
||||
|
|
|
@ -10,6 +10,7 @@ import ftpsshup
|
|||
import datetime
|
||||
import gzip
|
||||
import time
|
||||
import random
|
||||
from StringIO import StringIO
|
||||
|
||||
import mylar
|
||||
|
@ -144,7 +145,10 @@ def torrents(pickfeed=None, seriesname=None, issue=None, feedinfo=None):
|
|||
|
||||
if all([pickfeed != '4', pickfeed != '3', pickfeed != '5', pickfeed != '999']):
|
||||
payload = None
|
||||
|
||||
|
||||
ddos_protection = round(random.uniform(0,15),2)
|
||||
time.sleep(ddos_protection)
|
||||
|
||||
try:
|
||||
cf_cookievalue = None
|
||||
scraper = cfscrape.create_scraper()
|
||||
|
@ -360,8 +364,12 @@ def nzbs(provider=None, forcerss=False):
|
|||
newznabuid = newznabuid or '1'
|
||||
newznabcat = newznabcat or '7030'
|
||||
|
||||
# 11-21-2014: added &num=100 to return 100 results (or maximum) - unsure of cross-reliablity
|
||||
_parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[3].rstrip(), bool(newznab_host[2]))
|
||||
if site[-10:] == '[nzbhydra]':
|
||||
#to allow nzbhydra to do category search by most recent (ie. rss)
|
||||
_parse_feed(site, newznab_host[1].rstrip() + '/api?t=search&cat=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&apikey=' + newznab_host[3].rstrip(), bool(newznab_host[2]))
|
||||
else:
|
||||
# 11-21-2014: added &num=100 to return 100 results (or maximum) - unsure of cross-reliablity
|
||||
_parse_feed(site, newznab_host[1].rstrip() + '/rss?t=' + str(newznabcat) + '&dl=1&i=' + str(newznabuid) + '&num=100&r=' + newznab_host[3].rstrip(), bool(newznab_host[2]))
|
||||
|
||||
feeddata = []
|
||||
|
||||
|
|
|
@ -307,7 +307,6 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
|
||||
if any([allow_packs is None, allow_packs == 'None', allow_packs == 0]) and all([mylar.ENABLE_TORRENT_SEARCH, mylar.ENABLE_32P]):
|
||||
allow_packs = False
|
||||
logger.info('allow_packs set to :' + str(allow_packs))
|
||||
|
||||
newznab_local = False
|
||||
|
||||
|
@ -1232,19 +1231,16 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
else:
|
||||
nzbprov = 'DEM'
|
||||
|
||||
logger.info(nzbprov)
|
||||
logger.info('rss:' + RSS)
|
||||
logger.info('allow_packs:' + str(allow_packs))
|
||||
if nzbprov == '32P' and allow_packs and RSS == 'no':
|
||||
logger.info('pack:' + entry['pack'])
|
||||
logger.fdebug('pack:' + entry['pack'])
|
||||
if all([nzbprov == '32P', RSS == 'no', allow_packs]) and any([entry['pack'] == '1', entry['pack'] == '2']):
|
||||
if nzbprov == '32P':
|
||||
if entry['pack'] == '2':
|
||||
logger.info('[PACK-QUEUE] Diamond FreeLeech Pack detected.')
|
||||
logger.fdebug('[PACK-QUEUE] Diamond FreeLeech Pack detected.')
|
||||
elif entry['pack'] == '1':
|
||||
logger.info('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.')
|
||||
logger.fdebug('[PACK-QUEUE] Normal Pack detected. Checking available inkdrops prior to downloading.')
|
||||
else:
|
||||
logger.info('[PACK-QUEUE] Invalid Pack.')
|
||||
logger.fdebug('[PACK-QUEUE] Invalid Pack.')
|
||||
|
||||
#find the pack range.
|
||||
pack_issuelist = entry['issues']
|
||||
|
@ -1253,7 +1249,7 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
if issueid_info['valid'] == True:
|
||||
logger.info('Issue Number ' + IssueNumber + ' exists within pack. Continuing.')
|
||||
else:
|
||||
logger.info('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping')
|
||||
logger.fdebug('Issue Number ' + IssueNumber + ' does NOT exist within this pack. Skipping')
|
||||
continue
|
||||
|
||||
#pack support.
|
||||
|
@ -1571,7 +1567,9 @@ def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, Publisher, IssueDa
|
|||
|
||||
#generate nzbname
|
||||
nzbname = nzbname_create(nzbprov, info=comicinfo, title=ComicTitle) #entry['title'])
|
||||
|
||||
if nzbname is None:
|
||||
logger.error('[NZBPROVIDER = NONE] Encountered an error using given provider with requested information: ' + comicinfo + '. You have a blank entry most likely in your newznabs, fix it & restart Mylar')
|
||||
continue
|
||||
#generate the send-to and actually send the nzb / torrent.
|
||||
searchresult = searcher(nzbprov, nzbname, comicinfo, entry['link'], IssueID, ComicID, tmpprov, newznab=newznab_host)
|
||||
|
||||
|
@ -1877,8 +1875,9 @@ def nzbname_create(provider, title=None, info=None):
|
|||
#the nzbname here is used when post-processing
|
||||
# it searches nzblog which contains the nzbname to pull out the IssueID and start the post-processing
|
||||
# it is also used to keep the hashinfo for the nzbname in case it fails downloading, it will get put into the failed db for future exclusions
|
||||
nzbname = None
|
||||
|
||||
if mylar.USE_BLACKHOLE and provider != '32P' and provider != 'TPSE':
|
||||
if mylar.USE_BLACKHOLE and all([provider != '32P', provider != 'TPSE', provider != 'WWT', provider != 'DEM']):
|
||||
if os.path.exists(mylar.BLACKHOLE_DIR):
|
||||
#load in the required info to generate the nzb names when required (blackhole only)
|
||||
ComicName = info[0]['ComicName']
|
||||
|
@ -1933,8 +1932,11 @@ def nzbname_create(provider, title=None, info=None):
|
|||
nzbname = re.sub(match.group(), '', nzbname).strip()
|
||||
logger.fdebug('[SEARCHER] end nzbname: ' + nzbname)
|
||||
|
||||
logger.fdebug("nzbname used for post-processing:" + nzbname)
|
||||
return nzbname
|
||||
if nzbname is None:
|
||||
return None
|
||||
else:
|
||||
logger.fdebug("nzbname used for post-processing:" + nzbname)
|
||||
return nzbname
|
||||
|
||||
def searcher(nzbprov, nzbname, comicinfo, link, IssueID, ComicID, tmpprov, directsend=None, newznab=None):
|
||||
alt_nzbname = None
|
||||
|
|
|
@ -158,13 +158,13 @@ class TorrentClient(object):
|
|||
|
||||
def get_the_hash(self, filepath):
|
||||
import hashlib, StringIO
|
||||
from mylar import bencode
|
||||
import bencode
|
||||
|
||||
# Open torrent file
|
||||
torrent_file = open(filepath, "rb")
|
||||
metainfo = bencode.bdecode(torrent_file.read())
|
||||
metainfo = bencode.decode(torrent_file.read())
|
||||
info = metainfo['info']
|
||||
thehash = hashlib.sha1(bencode.bencode(info)).hexdigest().upper()
|
||||
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
|
||||
logger.debug('Hash: ' + thehash)
|
||||
return thehash
|
||||
|
||||
|
|
|
@ -551,7 +551,8 @@ class WebInterface(object):
|
|||
|
||||
myDB.upsert("readinglist", newVals, newCtrl)
|
||||
|
||||
logger.info(newVals)
|
||||
#logger.info(newVals)
|
||||
|
||||
#run the Search for Watchlist matches now.
|
||||
logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.')
|
||||
self.ArcWatchlist(storyarcid)
|
||||
|
@ -1594,10 +1595,17 @@ class WebInterface(object):
|
|||
|
||||
date_fmt = "%B %d, %Y"
|
||||
|
||||
try:
|
||||
con_startweek = u"" + startweek.strftime(date_fmt).decode('utf-8')
|
||||
con_endweek = u"" + endweek.strftime(date_fmt).decode('utf-8')
|
||||
except:
|
||||
con_startweek = u"" + startweek.strftime(date_fmt).decode('cp1252')
|
||||
con_endweek = u"" + endweek.strftime(date_fmt).decode('cp1252')
|
||||
|
||||
weekinfo = {'weeknumber': weeknumber,
|
||||
'startweek': u"" + startweek.strftime(date_fmt).decode('utf-8'),
|
||||
'startweek': con_startweek,
|
||||
'midweek': midweek.strftime('%Y-%m-%d'),
|
||||
'endweek': u"" + endweek.strftime(date_fmt).decode('utf-8'),
|
||||
'endweek': con_endweek,
|
||||
'year': year,
|
||||
'prev_weeknumber': prev_week,
|
||||
'prev_year': prev_year,
|
||||
|
@ -2476,7 +2484,7 @@ class WebInterface(object):
|
|||
'IssueNumber': la['IssueNumber'],
|
||||
'ReadingOrder': la['ReadingOrder']})
|
||||
|
||||
if la['IssueDate'] is None:
|
||||
if la['IssueDate'] is None or la['IssueDate'] == '0000-00-00':
|
||||
continue
|
||||
else:
|
||||
if int(la['IssueDate'][:4]) > maxyear:
|
||||
|
@ -2794,40 +2802,41 @@ class WebInterface(object):
|
|||
|
||||
logger.info('arcpub: ' + arcpub)
|
||||
dstloc = helpers.arcformat(arcdir, spanyears, arcpub)
|
||||
|
||||
if not os.path.isdir(dstloc):
|
||||
if mylar.STORYARCDIR:
|
||||
logger.info('Story Arc Directory [' + dstloc + '] does not exist! - attempting to create now.')
|
||||
else:
|
||||
logger.info('Story Arc Grab-Bag Directory [' + dstloc + '] does not exist! - attempting to create now.')
|
||||
checkdirectory = filechecker.validateAndCreateDirectory(dstloc, True)
|
||||
if not checkdirectory:
|
||||
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
|
||||
return
|
||||
|
||||
if all([mylar.CVINFO, mylar.STORYARCDIR]):
|
||||
if not os.path.isfile(os.path.join(dstloc, "cvinfo")) or mylar.CV_ONETIMER:
|
||||
logger.fdebug('Generating cvinfo file for story-arc.')
|
||||
with open(os.path.join(dstloc, "cvinfo"), "w") as text_file:
|
||||
if any([ArcWatch[0]['StoryArcID'] == ArcWatch[0]['CV_ArcID'], ArcWatch[0]['CV_ArcID'] is None]):
|
||||
cvinfo_arcid = ArcWatch[0]['StoryArcID']
|
||||
else:
|
||||
cvinfo_arcid = ArcWatch[0]['CV_ArcID']
|
||||
|
||||
text_file.write('https://comicvine.gamespot.com/storyarc/4045-' + str(cvinfo_arcid))
|
||||
if mylar.ENFORCE_PERMS:
|
||||
filechecker.setperms(os.path.join(dstloc, 'cvinfo'))
|
||||
|
||||
#get the list of files within the storyarc directory, if any.
|
||||
filelist = None
|
||||
if mylar.STORYARCDIR:
|
||||
fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True)
|
||||
filechk = fchk.listFiles()
|
||||
fccnt = filechk['comiccount']
|
||||
logger.fdebug('[STORY ARC DIRECTORY] ' + str(fccnt) + ' files exist within this directory.')
|
||||
if fccnt > 0:
|
||||
filelist = filechk['comiclist']
|
||||
logger.info(filechk)
|
||||
|
||||
if dstloc is not None:
|
||||
if not os.path.isdir(dstloc):
|
||||
if mylar.STORYARCDIR:
|
||||
logger.info('Story Arc Directory [' + dstloc + '] does not exist! - attempting to create now.')
|
||||
else:
|
||||
logger.info('Story Arc Grab-Bag Directory [' + dstloc + '] does not exist! - attempting to create now.')
|
||||
checkdirectory = filechecker.validateAndCreateDirectory(dstloc, True)
|
||||
if not checkdirectory:
|
||||
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
|
||||
return
|
||||
|
||||
if all([mylar.CVINFO, mylar.STORYARCDIR]):
|
||||
if not os.path.isfile(os.path.join(dstloc, "cvinfo")) or mylar.CV_ONETIMER:
|
||||
logger.fdebug('Generating cvinfo file for story-arc.')
|
||||
with open(os.path.join(dstloc, "cvinfo"), "w") as text_file:
|
||||
if any([ArcWatch[0]['StoryArcID'] == ArcWatch[0]['CV_ArcID'], ArcWatch[0]['CV_ArcID'] is None]):
|
||||
cvinfo_arcid = ArcWatch[0]['StoryArcID']
|
||||
else:
|
||||
cvinfo_arcid = ArcWatch[0]['CV_ArcID']
|
||||
|
||||
text_file.write('https://comicvine.gamespot.com/storyarc/4045-' + str(cvinfo_arcid))
|
||||
if mylar.ENFORCE_PERMS:
|
||||
filechecker.setperms(os.path.join(dstloc, 'cvinfo'))
|
||||
|
||||
#get the list of files within the storyarc directory, if any.
|
||||
if mylar.STORYARCDIR:
|
||||
fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True)
|
||||
filechk = fchk.listFiles()
|
||||
fccnt = filechk['comiccount']
|
||||
logger.fdebug('[STORY ARC DIRECTORY] ' + str(fccnt) + ' files exist within this directory.')
|
||||
if fccnt > 0:
|
||||
filelist = filechk['comiclist']
|
||||
logger.info(filechk)
|
||||
|
||||
arc_match = []
|
||||
wantedlist = []
|
||||
|
@ -2891,18 +2900,19 @@ class WebInterface(object):
|
|||
matcheroso = "yes"
|
||||
break
|
||||
if matcheroso == "no":
|
||||
logger.fdebug("Unable to find a match for " + arc['ComicName'] + " :#" + arc['IssueNumber'])
|
||||
logger.fdebug("[NO WATCHLIST MATCH] Unable to find a match for " + arc['ComicName'] + " :#" + arc['IssueNumber'])
|
||||
wantedlist.append({
|
||||
"ComicName": arc['ComicName'],
|
||||
"IssueNumber": arc['IssueNumber'],
|
||||
"IssueYear": arc['IssueYear']})
|
||||
|
||||
if filelist is not None and mylar.STORYARCDIR:
|
||||
logger.fdebug("[NO WATCHLIST MATCH] Checking against lcoal Arc directory for given issue.")
|
||||
fn = 0
|
||||
valids = [x for x in filelist if re.sub('[\|\s]','', x['dynamic_name'].lower()).strip() == re.sub('[\|\s]','', arc['DynamicComicName'].lower()).strip()]
|
||||
logger.info('valids: ' + str(valids))
|
||||
if len(valids) > 0:
|
||||
for tmpfc in filelist:
|
||||
for tmpfc in valids: #filelist:
|
||||
haveissue = "no"
|
||||
issuedupe = "no"
|
||||
temploc = tmpfc['issue_number'].replace('_', ' ')
|
||||
|
@ -2911,31 +2921,46 @@ class WebInterface(object):
|
|||
if int_iss == fcdigit:
|
||||
logger.fdebug(arc['ComicName'] + ' Issue #' + arc['IssueNumber'] + ' already present in StoryArc directory.')
|
||||
#update readinglist db to reflect status.
|
||||
rr_rename = False
|
||||
if mylar.READ2FILENAME:
|
||||
readorder = helpers.renamefile_readingorder(arc['ReadingOrder'])
|
||||
dfilename = str(readorder) + "-" + tmpfc['comicfilename']
|
||||
if all([tmpfc['reading_order'] is not None, int(readorder) != int(tmpfc['reading_order']['reading_sequence'])]):
|
||||
logger.warn('reading order sequence has changed for this issue from ' + str(tmpfc['reading_order']['reading_sequence']) + ' to ' + str(readorder))
|
||||
rr_rename = True
|
||||
dfilename = str(readorder) + '-' + tmpfc['reading_order']['filename']
|
||||
elif tmpfc['reading_order'] is None:
|
||||
dfilename = str(readorder) + '-' + tmpfc['comicfilename']
|
||||
else:
|
||||
dfilename = str(readorder) + '-' + tmpfc['reading_order']['filename']
|
||||
else:
|
||||
dfilename = tmpfc['comicfilename']
|
||||
|
||||
if all([tmpfc['sub'] is not None, tmpfc['sub'] != 'None']):
|
||||
loc_path = os.path.join(tmpfc['ComicLocation'], tmpfc['sub'], dfilename)
|
||||
loc_path = os.path.join(tmpfc['comiclocation'], tmpfc['sub'], dfilename)
|
||||
else:
|
||||
loc_path = os.path.join(tmpfc['ComicLocation'], dfilename)
|
||||
loc_path = os.path.join(tmpfc['comiclocation'], dfilename)
|
||||
|
||||
if rr_rename:
|
||||
logger.fdebug('Now re-sequencing file to : ' + dfilename)
|
||||
os.rename(os.path.join(tmpfc['comiclocation'],tmpfc['comicfilename']), loc_path)
|
||||
|
||||
newVal = {"Status": "Downloaded",
|
||||
"Location": loc_path} #dfilename}
|
||||
ctrlVal = {"IssueArcID": arc['IssueArcID']}
|
||||
myDB.upsert("readinglist", newVal, ctrlVal)
|
||||
fn+=1
|
||||
else:
|
||||
newVal = {"Status": "Skipped"}
|
||||
ctrlVal = {"IssueArcID": arc['IssueArcID']}
|
||||
myDB.upsert("readinglist", newVal, ctrlVal)
|
||||
continue
|
||||
|
||||
logger.fdebug("we matched on " + str(len(arc_match)) + " issues")
|
||||
newVal = {"Status": "Skipped"}
|
||||
ctrlVal = {"IssueArcID": arc['IssueArcID']}
|
||||
myDB.upsert("readinglist", newVal, ctrlVal)
|
||||
|
||||
logger.fdebug(str(len(arc_match)) + " issues currently exist on your watchlist that are within this arc. Analyzing...")
|
||||
for m_arc in arc_match:
|
||||
#now we cycle through the issues looking for a match.
|
||||
issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
|
||||
#issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
|
||||
issue = myDB.selectone("SELECT a.Issue_Number, a.Status, a.IssueID, a.ComicName, a.IssueDate, a.Location, b.readingorder FROM issues AS a INNER JOIN readinglist AS b ON a.comicid = b.comicid where a.comicid=? and a.issue_number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
|
||||
|
||||
if issue is None: pass
|
||||
else:
|
||||
|
||||
|
@ -2975,14 +3000,34 @@ class WebInterface(object):
|
|||
logger.fdebug('Destination location set to : ' + m_arc['destination_location'])
|
||||
logger.fdebug('Attempting to copy into StoryArc directory')
|
||||
#copy into StoryArc directory...
|
||||
|
||||
#need to make sure the file being copied over isn't already present in the directory either with a different filename,
|
||||
#or different reading order.
|
||||
rr_rename = False
|
||||
if mylar.READ2FILENAME:
|
||||
readorder = helpers.renamefile_readingorder(m_arc['match_readingorder'])
|
||||
dfilename = str(readorder) + "-" + issue['Location']
|
||||
if all([m_arc['match_readingorder'] is not None, int(readorder) != int(m_arc['match_readingorder'])]):
|
||||
logger.warn('reading order sequence has changed for this issue from ' + str(m_arc['match_reading_order']) + ' to ' + str(readorder))
|
||||
rr_rename = True
|
||||
dfilename = str(readorder) + '-' + issue['Location']
|
||||
elif m_arc['match_readingorder'] is None:
|
||||
dfilename = str(readorder) + '-' + issue['Location']
|
||||
else:
|
||||
dfilename = str(readorder) + '-' + issue['Location']
|
||||
else:
|
||||
dfilename = issue['Location']
|
||||
|
||||
#dfilename = str(readorder) + "-" + issue['Location']
|
||||
#else:
|
||||
#dfilename = issue['Location']
|
||||
|
||||
dstloc = os.path.join(m_arc['destination_location'], dfilename)
|
||||
|
||||
if rr_rename:
|
||||
logger.fdebug('Now re-sequencing COPIED file to : ' + dfilename)
|
||||
os.rename(issloc, dstloc)
|
||||
|
||||
|
||||
if not os.path.isfile(dstloc):
|
||||
logger.fdebug('Copying ' + issloc + ' to ' + dstloc)
|
||||
try:
|
||||
|
@ -3008,10 +3053,10 @@ class WebInterface(object):
|
|||
else:
|
||||
logger.fdebug("We don't have " + issue['ComicName'] + " :# " + issue['Issue_Number'])
|
||||
ctrlVal = {"IssueArcID": m_arc['match_issuearcid']}
|
||||
newVal = {"Status": "Wanted",
|
||||
newVal = {"Status": issue['Status'], #"Wanted",
|
||||
"IssueID": issue['IssueID']}
|
||||
myDB.upsert("readinglist", newVal, ctrlVal)
|
||||
logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as Wanted.")
|
||||
logger.info("Marked " + issue['ComicName'] + " :# " + issue['Issue_Number'] + " as " + issue['Status'])
|
||||
|
||||
return
|
||||
|
||||
|
@ -5080,22 +5125,14 @@ class WebInterface(object):
|
|||
return
|
||||
orderThis.exposed = True
|
||||
|
||||
def torrentit(self, torrent_hash):
|
||||
import test
|
||||
#import lib.torrent.libs.rtorrent as rTorrent
|
||||
from base64 import b16encode, b32decode
|
||||
#torrent_hash # Hash of the torrent
|
||||
logger.fdebug("Working on torrent: " + torrent_hash)
|
||||
|
||||
if len(torrent_hash) == 32:
|
||||
torrent_hash = b16encode(b32decode(torrent_hash))
|
||||
|
||||
if not len(torrent_hash) == 40:
|
||||
logger.error("Torrent hash is missing, or an invalid hash value has been passed")
|
||||
return
|
||||
def torrentit(self, issueid=None, torrent_hash=None, download=False):
|
||||
#make sure it's bool'd here.
|
||||
if download == 'True':
|
||||
download = True
|
||||
else:
|
||||
rp = test.RTorrent()
|
||||
torrent_info = rp.main(torrent_hash, check=True)
|
||||
download = False
|
||||
|
||||
torrent_info = helpers.torrentinfo(issueid, torrent_hash, download)
|
||||
|
||||
if torrent_info:
|
||||
torrent_name = torrent_info['name']
|
||||
|
@ -5120,15 +5157,20 @@ class WebInterface(object):
|
|||
|
||||
ti = '<table><tr><td>'
|
||||
ti += '<center><b>' + torrent_name + '</b></center></br>'
|
||||
ti += '<center>' + torrent_info['hash'] + '</center>'
|
||||
if torrent_info['completed'] and download is True:
|
||||
ti += '<br><center><tr><td>AUTO-SNATCH ENABLED: ' + torrent_info['snatch_status'] + '</center></td></tr>'
|
||||
ti += '<tr><td><center>Hash: ' + torrent_info['hash'] + '</center></td></tr>'
|
||||
ti += '<tr><td><center>Location: ' + os.path.join(torrent_info['folder'], torrent_name) + '</center></td></tr></br>'
|
||||
ti += '<tr><td><center>Filesize: ' + torrent_info['filesize'] + '</center></td></tr>'
|
||||
ti += '<tr><td><center>' + torrent_info['download'] + ' DOWN / ' + torrent_info['upload'] + ' UP</center></td></tr>'
|
||||
ti += '<tr><td><center>Ratio: ' + str(torrent_info['ratio']) + '</center></td></tr>'
|
||||
ti += '<tr><td><center>Seedtime: ' + torrent_info['seedtime'] + '</center></td</tr>'
|
||||
ti += '</table>'
|
||||
|
||||
else:
|
||||
torrent_name = 'Not Found'
|
||||
ti = 'Torrent not found (' + str(torrent_hash)
|
||||
|
||||
return ti
|
||||
|
||||
torrentit.exposed = True
|
||||
|
|
Loading…
Reference in New Issue