Many updates & bug fixes - too many too list here (check changelog @ http://forum.mylarcomics.com/viewtopic.php?f=2&t=545)

This commit is contained in:
evilhero 2015-01-16 14:40:08 -05:00
parent c814e0aa35
commit 3945ba4a0d
30 changed files with 1977 additions and 484 deletions

View File

@ -511,19 +511,23 @@
<%
if annual['Status'] == 'Skipped':
grade = 'Z'
agrade = 'Z'
elif annual['Status'] == 'Wanted':
grade = 'X'
agrade = 'X'
elif annual['Status'] == 'Snatched':
grade = 'C'
agrade = 'C'
elif annual['Status'] == 'Downloaded':
grade = 'A'
agrade = 'A'
elif annual['Status'] == 'Archived':
grade = 'A'
agrade = 'A'
elif annual['Status'] == 'Ignored':
agrade = 'A'
elif annual['Status'] == 'Failed':
agrade = 'C'
else:
grade = 'A'
agrade = 'A'
%>
<tr class="${annual['Status']} grade${grade}">
<tr class="${annual['Status']} grade${agrade}">
<td id="select"><input type="checkbox" name="${annual['IssueID']}" class="checkbox" value="${annual['IssueID']}" /></td>
<%
if annual['Int_IssueNumber'] is None:

View File

@ -598,10 +598,20 @@
</fieldset>
<fieldset>
<legend>Duplicate Handling</legend>
<div class="row checkbox left clearfix">
</div>
<div class="row">
<label>Retain based on</label>
<select name="dupeconstraint">
%for x in ['filesize', 'filetype-cbr', 'filetype-cbz']:
<%
if config['dupeconstraint'] == x:
outputselect = 'selected'
else:
outputselect = ''
%>
<option value=${x} ${outputselect}>${x}</option>
%endfor
</select>
</div>
</fieldset>
<fieldset>
<legend>Failed Download Handling</legend>

View File

@ -34,11 +34,13 @@
<div class="row checkbox">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_move" id="imp_move" value="1" ${checked(mylar.IMP_MOVE)}><label>Move files</label>
</div>
<div class="row checkbox">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.IMP_RENAME)}><label>Rename Files </label>
<small>(After importing, Rename the files to configuration settings)</small>
<label>${mylar.FOLDER_FORMAT}/${mylar.FILE_FORMAT}</label>
</div>
%if mylar.RENAME_FILES:
<div class="row checkbox">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_rename" id="imp_rename" value="1" ${checked(mylar.IMP_RENAME)}><label>Rename Files </label>
<small>(After importing, Rename the files to configuration settings)</small>
<label>${mylar.FOLDER_FORMAT}/${mylar.FILE_FORMAT}</label>
</div>
%endif
<div class="row checkbox">
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="imp_metadata" id="imp_metadata" value="1" ${checked(mylar.IMP_METADATA)}><label>Use Existing Metadata</label>
<small>(Use existing Metadata to better locate series for import)</small>
@ -59,8 +61,8 @@
<input type="hidden" value="Go">
</div>
<table class="display" id="impresults_table">
<tr />
<tr><center><h3>To be Imported</h3></center></tr>
<tr />
<tr><center><h3>To be Imported</h3></center></tr>
<tr><center><small>(green indicates confirmed on watchlist)</tr>
<thead>
<tr>
@ -74,8 +76,8 @@
</tr>
</thead>
<tbody>
%if results:
%for result in results:
%if results:
%for result in results:
<%
if result['DisplayName'] is None:
displayname = result['ComicName']
@ -83,42 +85,54 @@
displayname = result['DisplayName']
endif
%>
<tr>
<td id="select"><input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="${result['ComicName']}" value="${result['ComicName']}" class="checkbox" />
<td id="comicname"><a href="${displayname}" title="${displayname}" target="_blank">${displayname}</td>
<td id="comicyear"><title="${result['ComicYear']}">${result['ComicYear']}</td>
<td id="comicissues"><title="${result['IssueCount']}">${result['IssueCount']}</td>
<td id="status">${result['Status']}
%if result['WatchMatch'] is not None:
<img src="interfaces/default/images/green-circle.png" height="10" width="10"/>
%endif
<tr>
<td id="select"><input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="${result['ComicName']}" value="${result['ComicName']}" class="checkbox" />
<td id="comicname">${displayname}</td>
<td id="comicyear"><title="${result['ComicYear']}">${result['ComicYear']}</td>
<td id="comicissues"><title="${result['IssueCount']}">${result['IssueCount']}</td>
<td id="status">
%if result['ComicID']:
<a href="comicDetails?ComicID=${result['ComicID']}">${result['Status']}</a>
%else:
${result['Status']}
%endif
%if result['WatchMatch'] is not None:
<img src="interfaces/default/images/green-circle.png" height="10" width="10"/>
%endif
</td>
<td id="importdate">${result['ImportDate']}</td>
<td id="addcomic">
%if result['Status'] == 'Not Imported':
[<a href="#" title="Import ${result['ComicName']} into your watchlist" onclick="doAjaxCall('preSearchit?ComicName=${result['ComicName']| u}&displaycomic=${displayname}| u}',$(this),'table')" data-success="Imported ${result['ComicName']}">Import</a>]
%endif
[<a href="deleteimport?ComicName=${result['ComicName']}">Remove</a>]
%if result['implog'] is not None:
[<a class="showlog" title="Display the Import log for ${result['ComicName']}" href="importLog?ComicName=${result['ComicName'] |u}&SRID=${result['SRID']}">Log</a>]
%endif
%if result['SRID'] is not None and result['Status'] != 'Imported':
[<a title="Manual intervention is required - more than one result when attempting to import" href="importresults_popup?SRID=${result['SRID']}&ComicName=${result['ComicName'] |u}&imported=yes&ogcname=${result['ComicName'] |u}">Select</a>]
%endif
</td>
</tr>
<%
myDB = db.DBConnection()
files = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [result['ComicName']])
%>
%endfor
%else:
<tr>
<td colspan="6">
<center><legend>There are no results to display</legend></center>
</td>
<td id="importdate">${result['ImportDate']}</td>
<td id="addcomic">[<a href="preSearchit?ComicName=${result['ComicName']| u}&displaycomic=${displayname |u}">Import</a>]
[<a href="deleteimport?ComicName=${result['ComicName']}">Remove</a>]
<!-- [<a href="#">Rename</a>] -->
%if result['implog'] is not None:
[<a class="showlog" href="importLog?ComicName=${result['ComicName']| u}">Log</a>]
%endif
</td>
</tr>
<%
myDB = db.DBConnection()
files = myDB.action("SELECT * FROM importresults WHERE ComicName=?", [result['ComicName']])
%>
%endfor
%else:
<tr>
<td colspan="6"><center><legend>There are no results to display</legend></center></td></tr>
%endif
</tbody>
</table>
</tr>
%endif
</tbody>
</table>
</form>
<table class="display" id="impresults_table">
<tr><br /></tr>
<tr><center><h3>Already on Watchlist</h3></center></tr>
<tr><center>(you need to CONFIRM the match before doing an import!)
<tr><center>(you need to CONFIRM the match before doing an import!)</tr>
<thead>
<tr>
<th id="select"></th>
@ -163,8 +177,6 @@
</tbody>
</table>
</div>
</form>
</div>
</%def>
<%def name="javascriptIncludes()">

View File

@ -0,0 +1,107 @@
<%inherit file="base.html" />
<%!
import mylar
from mylar.helpers import checked
%>
<%def name="headerIncludes()">
<div id="subhead_container">
<div id="subhead_menu">
<a id="menu_link_refresh" href="importResults">Return to ImportResults</a>
</div>
</div>
</%def>
<%def name="body()">
<div id="paddingheader">
<h1 class="clearfix"><img src="interfaces/default/images/icon_search.png" alt="Search results"/>Import Search results for : ${searchtext}</h1>
<div>
<div class="table_wrapper">
<table class="display" id="searchresults_table">
<thead>
<tr>
<th id="blank"></th>
<th id="name">Comic Name</th>
<th id="publisher">Publisher</th>
<th id="comicyear">Year</th>
<th id="issues">Issues</th>
<th id="add"></th>
</tr>
</thead>
<tbody>
%if searchresults:
%for result in searchresults:
<%
if result['comicyear'] == '2015':
grade = 'A'
else:
grade = 'Z'
if result['haveit'] != "No":
grade = 'H';
%>
<tr class="grade${grade}">
<td class="blank"></td>
%if result['deck'] == 'None' or result['deck'] is None:
<td class="name"><a href="${result['url']}" target="_blank">${result['name']}</a></td>
%else:
<td class="name"><a href="${result['url']}" title="${result['deck']}" target="_blank">${result['name']}</a></td>
%endif
<td class="publisher">${result['publisher']}</a></td>
<td class="comicyear">${result['comicyear']}</a></td>
<td class="issues">${result['issues']}</td>
%if result['haveit'] == 'No':
<%
calledby = "web-import"
%>
<td class="add"><a href="addbyid?comicid=${result['comicid']}&calledby=${calledby}"><span class="ui-icon ui-icon-plus"></span>Add this Comic</a></td>
%else:
<td class="add"><span class="ui-icon ui-icon-plus"></span>Already in Library</td>
%endif
</tr>
%endfor
%endif
</tbody>
</table>
</div>
</%def>
<%def name="headIncludes()">
<link rel="stylesheet" href="interfaces/default/css/data_table.css">
</%def>
<%def name="javascriptIncludes()">
<script src="js/libs/jquery.dataTables.min.js"></script>
<script>
function initThisPage(){
initActions();
$('#searchresults_table').dataTable(
{
"bDestroy": true,
"aoColumnDefs": [
{ 'bSortable': false, 'aTargets': [ 0,3 ] }
],
"oLanguage": {
"sLengthMenu":"Show _MENU_ results per page",
"sEmptyTable": "No results",
"sInfo":"Showing _START_ to _END_ of _TOTAL_ results",
"sInfoEmpty":"Showing 0 to 0 of 0 results",
"sInfoFiltered":"(filtered from _MAX_ total results)",
"sSearch" : ""},
"iDisplayLength": 25,
"sPaginationType": "full_numbers",
"aaSorting": []
});
resetFilters("result");
setTimeout(function(){
initFancybox();
},1500);
}
$(document).ready(function() {
initThisPage();
});
</script>
</%def>

View File

@ -35,7 +35,7 @@
<tr class="grade${grade}">
<td id="publisher">${comic['ComicPublisher']}</td>
<td id="name"><span title="${comic['ComicSortName']}"></span><a href="comicDetails?ComicID=${comic['ComicID']}">${comic['ComicName']}</a></td>
<td id="year"><span title="${comic['ComicYear']}"></span>${comic['ComicYear']}</td>
<td id="year"><span title="${comic['ComicYear']}"></span>${comic['ComicYear']}</td>
<td id="issue"><span title="${comic['LatestIssue']}"></span># ${comic['LatestIssue']}</td>
<td id="published">${comic['LatestDate']}</td>
<td id="have"><span title="${comic['percent']}"></span><div class="progress-container"><div style="background-color:#a3e532; height:14px; width:${comic['percent']}%"><div class="havetracks">${comic['haveissues']}/${comic['totalissues']}</div></div></div></td>

View File

@ -33,7 +33,7 @@
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Show Downloaded Story Arc Issues on ReadingList tab</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="read2filename" id="read2filename" value="1" ${checked(mylar.READ2FILENAME)} /><label>Append Reading# to filename</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Enforce Renaming/MetaTagging options (if enabled)</label><br/>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" /><label>Copy watchlisted issues to StoryArc Directory</label>
<input type="checkbox" style="vertical-align: middle; margin: 3px; margin-top: -1px;" name="copy2arcdir" id="copy2arcdir" value="1" ${checked(mylar.COPY2ARCDIR)} /><label>Copy watchlisted issues to StoryArc Directory</label>
<input type="hidden" name="StoryArcID" value="${storyarcid}">
<input type="hidden" name="StoryArcName" value="${storyarcname}">
@ -61,17 +61,26 @@
<tbody>
%for item in readlist:
<%
if item['Status'] == 'Downloaded':
if item['Status'] == 'Skipped':
grade = 'Z'
elif item['Status'] == 'Wanted':
grade = 'X'
elif item['Status'] == 'Snatched':
grade = 'C'
elif item['Status'] == 'Downloaded':
grade = 'A'
elif item['Status'] == 'Archived':
grade = 'A'
elif item['Status'] == 'Ignored':
grade = 'A'
elif item['Status'] == 'Failed':
grade = 'C'
elif item['Status'] == 'Read':
grade = 'C'
elif item['Status'] == 'Not Watched':
grade = 'X'
elif item['Status'] == 'Wanted':
grade = 'Y'
else:
grade = 'U'
grade = 'A'
%>
<tr id="${item['ReadingOrder']}" class="grade${grade}">
@ -98,9 +107,9 @@
<td id="status">${item['Status']}</td>
<td id="action">
%if item['Status'] is None or item['Status'] == None:
<a href="queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${item['IssueYear']}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}"><span class="ui-icon ui-icon-plus"></span>Grab it</a>
<a href="queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issueyear}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}"><span class="ui-icon ui-icon-plus"></span>Grab it</a>
%elif item['Status'] == 'Snatched':
<a href="#" onclick="doAjaxCall('queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${item['IssueYEAR']}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}',$(this),'table')" data-success="Trying to Retry"><span class="ui-icon ui-icon-plus"></span>Retry</a>
<a href="#" onclick="doAjaxCall('queueissue?ComicName=${item['ComicName'] | u}&ComicIssue=${item['IssueNumber']}&ComicYear=${issueyear}&mode=readlist&SARC=${item['StoryArc']}&IssueArcID=${item['IssueArcID']}&SeriesYear=${item['SeriesYear']}',$(this),'table')" data-success="Trying to Retry"><span class="ui-icon ui-icon-plus"></span>Retry</a>
%endif
</td>
</tr>

View File

@ -63,19 +63,26 @@
grade = 'C'
else:
grade = 'A'
if weekly['AUTOWANT'] == True:
grade = 'H'
%>
<tr class="grade${grade}">
%if pullfilter is True:
<td class="publisher">${weekly['PUBLISHER']}</td>
<td class="comicname">${weekly['COMIC']}</td>
<td class="comicnumber">${weekly['ISSUE']}</td>
<td class="status">${weekly['STATUS']}
%if weekly['STATUS'] == 'Skipped':
<a href="searchit?name=${weekly['COMIC'] | u}&issue=${weekly['ISSUE']}&mode=pullseries"><span class="ui-icon ui-icon-plus"></span>add series</a>
<a href="queueissue?ComicName=${weekly['COMIC'] | u}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}"><span class="ui-icon ui-icon-plus"></span>one off</a>
%if weekly['ISSUE'] == '1' or weekly['ISSUE'] == '0':
<a href="#" title="Watch for this series" onclick="doAjaxCall('add2futurewatchlist?ComicName=${weekly['COMIC'] |u}&Issue=${weekly['ISSUE']}&Publisher=${weekly['PUBLISHER']}&ShipDate=${pulldate}, $(this),'table')" data-success="Successfuly Added ${weekly['COMIC']} to future Watch list.">Watch</a>
%endif
%if weekly['AUTOWANT']:
<td class="status">Auto-Want
%else:
<td class="status">${weekly['STATUS']}
%if weekly['STATUS'] == 'Skipped':
%if weekly['ISSUE'] == '1' or weekly['ISSUE'] == '0':
<a href="#" title="Watch for this series" onclick="doAjaxCall('add2futurewatchlist?ComicName=${weekly['COMIC'] |u}&Issue=${weekly['ISSUE']}&Publisher=${weekly['PUBLISHER']}&ShipDate=${pulldate}', $(this),'table')" data-success="${weekly['COMIC']} is now on auto-watch/add."><span class="ui-icon ui-icon-plus"></span>Watch</a>
%endif
<a href="searchit?name=${weekly['COMIC'] | u}&issue=${weekly['ISSUE']}&mode=pullseries"><span class="ui-icon ui-icon-plus"></span>add series</a>
<a href="queueissue?ComicName=${weekly['COMIC'] | u}&ComicIssue=${weekly['ISSUE']}&mode=pullwant&Publisher=${weekly['PUBLISHER']}"><span class="ui-icon ui-icon-plus"></span>one off</a>
%endif
%endif
</td>
%endif

View File

@ -0,0 +1,240 @@
Metadata-Version: 1.1
Name: ConcurrentLogHandler
Version: 0.9.1
Summary: Concurrent logging handler (drop-in replacement for RotatingFileHandler)
Home-page: http://launchpad.net/python-concurrent-log-handler
Author: Lowell Alleman
Author-email: lowell87@gmail.com
License: http://www.apache.org/licenses/LICENSE-2.0
Description:
Overview
========
This module provides an additional log handler for Python's standard logging
package (PEP 282). This handler will write log events to log file which is
rotated when the log file reaches a certain size. Multiple processes can
safely write to the same log file concurrently.
Details
=======
.. _portalocker: http://code.activestate.com/recipes/65203/
The ``ConcurrentRotatingFileHandler`` class is a drop-in replacement for
Python's standard log handler ``RotatingFileHandler``. This module uses file
locking so that multiple processes can concurrently log to a single file without
dropping or clobbering log events. This module provides a file rotation scheme
like with ``RotatingFileHanler``. Extra care is taken to ensure that logs
can be safely rotated before the rotation process is started. (This module works
around the file rename issue with ``RotatingFileHandler`` on Windows, where a
rotation failure means that all subsequent log events are dropped).
This module attempts to preserve log records at all cost. This means that log
files will grow larger than the specified maximum (rotation) size. So if disk
space is tight, you may want to stick with ``RotatingFileHandler``, which will
strictly adhere to the maximum file size.
If you have multiple instances of a script (or multiple scripts) all running at
the same time and writing to the same log file, then *all* of the scripts should
be using ``ConcurrentRotatingFileHandler``. You should not attempt to mix
and match ``RotatingFileHandler`` and ``ConcurrentRotatingFileHandler``.
This package bundles `portalocker`_ to deal with file locking. Please be aware
that portalocker only supports Unix (posix) an NT platforms at this time, and
therefore this package only supports those platforms as well.
Installation
============
Use the following command to install this package::
pip install ConcurrentLogHandler
If you are installing from source, you can use::
python setup.py install
Examples
========
Simple Example
--------------
Here is a example demonstrating how to use this module directly (from within
Python code)::
from logging import getLogger, INFO
from cloghandler import ConcurrentRotatingFileHandler
import os
log = getLogger()
# Use an absolute path to prevent file rotation trouble.
logfile = os.path.abspath("mylogfile.log")
# Rotate log after reaching 512K, keep 5 old copies.
rotateHandler = ConcurrentRotatingFileHandler(logfile, "a", 512*1024, 5)
log.addHandler(rotateHandler)
log.setLevel(INFO)
log.info("Here is a very exciting log message, just for you")
Automatic fallback example
--------------------------
If you are distributing your code and you are unsure if the
`ConcurrentLogHandler` package has been installed everywhere your code will run,
Python makes it easy to gracefully fallback to the built in
`RotatingFileHandler`, here is an example::
try:
from cloghandler import ConcurrentRotatingFileHandler as RFHandler
except ImportError:
# Next 2 lines are optional: issue a warning to the user
from warnings import warn
warn("ConcurrentLogHandler package not installed. Using builtin log handler")
from logging.handlers import RotatingFileHandler as RFHandler
log = getLogger()
rotateHandler = RFHandler("/path/to/mylogfile.log", "a", 1048576, 15)
log.addHandler(rotateHandler)
Config file example
-------------------
This example shows you how to use this log handler with the logging config file
parser. This allows you to keep your logging configuration code separate from
your application code.
Example config file: ``logging.ini``::
[loggers]
keys=root
[handlers]
keys=hand01
[formatters]
keys=form01
[logger_root]
level=NOTSET
handlers=hand01
[handler_hand01]
class=handlers.ConcurrentRotatingFileHandler
level=NOTSET
formatter=form01
args=("rotating.log", "a", 512*1024, 5)
[formatter_form01]
format=%(asctime)s %(levelname)s %(message)s
Example Python code: ``app.py``::
import logging, logging.config
import cloghandler
logging.config.fileConfig("logging.ini")
log = logging.getLogger()
log.info("Here is a very exciting log message, just for you")
Change Log
==========
.. _Red Hat Bug #858912: https://bugzilla.redhat.com/show_bug.cgi?id=858912
.. _Python Bug #15960: http://bugs.python.org/issue15960
.. _LP Bug 1199332: https://bugs.launchpad.net/python-concurrent-log-handler/+bug/1199332
.. _LP Bug 1199333: https://bugs.launchpad.net/python-concurrent-log-handler/+bug/1199333
- 0.9.1: Bug fixes - `LP Bug 1199332`_ and `LP Bug 1199333`_.
* More gracefully handle out of disk space scenarios. Prevent release() from
throwing an exception.
* Handle logging.shutdown() in Python 2.7+. Close the lock file stream via
close().
* Big thanks to Dan Callaghan for forwarding these issues and patches.
- 0.9.0: Now requires Python 2.6+
* Revamp file opening/closing and file-locking internals (inspired by
feedback from Vinay Sajip.)
* Add the 'delay' parameter (delayed log file opening) to better match the
core logging functionality in more recent version of Python.
* For anyone still using Python 2.3-2.5, please use the latest 0.8.x release
- 0.8.6: Fixed packaging bug with test script
* Fix a small packaging bug from the 0.8.5 release. (Thanks to Björn Häuser
for bringing this to my attention.)
* Updated stresstest.py to always use the correct python version when
launching sub-processes instead of the system's default "python".
- 0.8.5: Fixed ValueError: I/O operation on closed file
* Thanks to Vince Carney, Arif Kasim, Matt Drew, Nick Coghlan, and
Dan Callaghan for bug reports. Bugs can now be filled here:
https://bugs.launchpad.net/python-concurrent-log-handler. Bugs resolved
`Red Hat Bug #858912`_ and `Python Bug #15960`_
* Updated ez_setup.py to 0.7.7
* Updated portalocker to 0.3 (now maintained by Rick van Hattem)
* Initial Python 3 support (needs more testing)
* Fixed minor spelling mistakes
- 0.8.4: Fixed lock-file naming issue
* Resolved a minor issue where lock-files would be improperly named if the
log file contained ".log" in the middle of the log name. For example, if
you log file was "/var/log/mycompany.logging.mysource.log", the lock file
would be named "/var/log/mycompany.ging.mysource.lock", which is not correct.
Thanks to Dirk Rothe for pointing this out. Since this introduce a slight
lock-file behavior difference, make sure all concurrent writers are updated
to 0.8.4 at the same time if this issue effects you.
* Updated ez_setup.py to 0.6c11
- 0.8.3: Fixed a log file rotation bug and updated docs
* Fixed a bug that happens after log rotation when multiple processes are
witting to the same log file. Each process ends up writing to their own
log file ("log.1" or "log.2" instead of "log"). The fix is simply to reopen
the log file and check the size again. I do not believe this bug results in
data loss; however, this certainly was not the desired behavior. (A big
thanks goes to Oliver Tonnhofer for finding, documenting, and providing a
patch for this bug.)
* Cleanup the docs. (aka "the page you are reading right now") I fixed some
silly mistakes and typos... who writes this stuff?
- 0.8.2: Minor bug fix release (again)
* Found and resolved another issue with older logging packages that do not
support encoding.
- 0.8.1: Minor bug fix release
* Now importing "codecs" directly; I found some slight differences in the
logging module in different Python 2.4.x releases that caused the module to
fail to load.
- 0.8.0: Minor feature release
* Add better support for using ``logging.config.fileConfig()``. This class
is now available using ``class=handlers.ConcurrentRotatingFileHandler``.
* Minor changes in how the ``filename`` parameter is handled when given a
relative path.
- 0.7.4: Minor bug fix
* Fixed a typo in the package description (incorrect class name)
* Added a change log; which you are reading now.
* Fixed the ``close()`` method to no longer assume that stream is still
open.
To-do
=====
* This module has had minimal testing in a multi-threaded process. I see no
reason why this should be an issue, but no stress-testing has been done in a
threaded situation. If this is important to you, you could always add
threading support to the ``stresstest.py`` script and send me the patch.
Keywords: logging,windows,linux,unix,rotate,portalocker
Platform: nt
Platform: posix
Classifier: Development Status :: 4 - Beta
Classifier: Topic :: System :: Logging
Classifier: Operating System :: POSIX
Classifier: Operating System :: Microsoft :: Windows
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: License :: OSI Approved :: Apache Software License

View File

@ -0,0 +1,16 @@
.bzrignore
LICENSE
README
do_release.sh
ez_setup.py
pre_commit.sh
setup.cfg
setup.py
stresstest.py
src/cloghandler.py
src/portalocker.py
src/ConcurrentLogHandler.egg-info/PKG-INFO
src/ConcurrentLogHandler.egg-info/SOURCES.txt
src/ConcurrentLogHandler.egg-info/dependency_links.txt
src/ConcurrentLogHandler.egg-info/top_level.txt
src/ConcurrentLogHandler.egg-info/zip-safe

View File

@ -0,0 +1,13 @@
../cloghandler.py
../portalocker.py
../cloghandler.pyc
../portalocker.pyc
../../../../tests/stresstest.py
../../../../docs/README
../../../../docs/LICENSE
./
SOURCES.txt
zip-safe
PKG-INFO
dependency_links.txt
top_level.txt

View File

@ -0,0 +1,2 @@
cloghandler
portalocker

View File

@ -0,0 +1,349 @@
# Copyright 2013 Lowell Alleman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" cloghandler.py: A smart replacement for the standard RotatingFileHandler
ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in
replacement for the python standard log handler 'RotateFileHandler', the primary
difference being that this handler will continue to write to the same file if
the file cannot be rotated for some reason, whereas the RotatingFileHandler will
strictly adhere to the maximum file size. Unfortunately, if you are using the
RotatingFileHandler on Windows, you will find that once an attempted rotation
fails, all subsequent log messages are dropped. The other major advantage of
this module is that multiple processes can safely write to a single log file.
To put it another way: This module's top priority is preserving your log
records, whereas the standard library attempts to limit disk usage, which can
potentially drop log messages. If you are trying to determine which module to
use, there are number of considerations: What is most important: strict disk
space usage or preservation of log messages? What OSes are you supporting? Can
you afford to have processes blocked by file locks?
Concurrent access is handled by using file locks, which should ensure that log
messages are not dropped or clobbered. This means that a file lock is acquired
and released for every log message that is written to disk. (On Windows, you may
also run into a temporary situation where the log file must be opened and closed
for each log message.) This can have potentially performance implications. In my
testing, performance was more than adequate, but if you need a high-volume or
low-latency solution, I suggest you look elsewhere.
This module currently only support the 'nt' and 'posix' platforms due to the
usage of the portalocker module. I do not have access to any other platforms
for testing, patches are welcome.
See the README file for an example usage of this module.
This module supports Python 2.6 and later.
"""
__version__ = '0.9.1'
__revision__ = 'lowell87@gmail.com-20130711022321-doutxl7zyzuwss5a 2013-07-10 22:23:21 -0400 [0]'
__author__ = "Lowell Alleman"
__all__ = [
"ConcurrentRotatingHandler",
]
import os
import sys
from random import randint
from logging import Handler, LogRecord
from logging.handlers import BaseRotatingHandler
try:
import codecs
except ImportError:
codecs = None
# Question/TODO: Should we have a fallback mode if we can't load portalocker /
# we should still be better off than with the standard RotattingFileHandler
# class, right? We do some rename checking... that should prevent some file
# clobbering that the builtin class allows.
# sibling module than handles all the ugly platform-specific details of file locking
from portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException
# Workaround for handleError() in Python 2.7+ where record is written to stderr
class NullLogRecord(LogRecord):
def __init__(self):
pass
def __getattr__(self, attr):
return None
class ConcurrentRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file to the
next when the current file reaches a certain size. Multiple processes can
write to the log file concurrently, but this may mean that the file will
exceed the given size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, debug=True, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
On Windows, it is not possible to rename a file that is currently opened
by another process. This means that it is not possible to rotate the
log files if multiple processes is using the same log file. In this
case, the current log file will continue to grow until the rotation can
be completed successfully. In order for rotation to be possible, all of
the other processes need to close the file first. A mechanism, called
"degraded" mode, has been created for this scenario. In degraded mode,
the log file is closed after each log message is written. So once all
processes have entered degraded mode, the net rotation attempt should
be successful and then normal logging can be resumed. Using the 'delay'
parameter may help reduce contention in some usage patterns.
This log handler assumes that all concurrent processes logging to a
single file will are using only this class, and that the exact same
parameters are provided to each instance of this class. If, for
example, two different processes are using this class, but with
different values for 'maxBytes' or 'backupCount', then odd behavior is
expected. The same is true if this class is used by one application, but
the RotatingFileHandler is used by another.
"""
# Absolute file name handling done by FileHandler since Python 2.5
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.delay = delay
self._rotateFailed = False
self.maxBytes = maxBytes
self.backupCount = backupCount
self._open_lockfile()
# For debug mode, swap out the "_degrade()" method with a more a verbose one.
if debug:
self._degrade = self._degrade_debug
def _open_lockfile(self):
# Use 'file.lock' and not 'file.log.lock' (Only handles the normal "*.log" case.)
if self.baseFilename.endswith(".log"):
lock_file = self.baseFilename[:-4]
else:
lock_file = self.baseFilename
lock_file += ".lock"
self.stream_lock = open(lock_file,"w")
def _open(self, mode=None):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
Note: Copied from stdlib. Added option to override 'mode'
"""
if mode is None:
mode = self.mode
if self.encoding is None:
stream = open(self.baseFilename, mode)
else:
stream = codecs.open(self.baseFilename, mode, self.encoding)
return stream
def _close(self):
""" Close file stream. Unlike close(), we don't tear anything down, we
expect the log to be re-opened after rotation."""
if self.stream:
try:
if not self.stream.closed:
# Flushing probably isn't technically necessary, but it feels right
self.stream.flush()
self.stream.close()
finally:
self.stream = None
def acquire(self):
""" Acquire thread and file locks. Re-opening log for 'degraded' mode.
"""
# handle thread lock
Handler.acquire(self)
# Issue a file lock. (This is inefficient for multiple active threads
# within a single process. But if you're worried about high-performance,
# you probably aren't using this log handler.)
if self.stream_lock:
# If stream_lock=None, then assume close() was called or something
# else weird and ignore all file-level locks.
if self.stream_lock.closed:
# Daemonization can close all open file descriptors, see
# https://bugzilla.redhat.com/show_bug.cgi?id=952929
# Try opening the lock file again. Should we warn() here?!?
try:
self._open_lockfile()
except Exception:
self.handleError(NullLogRecord())
# Don't try to open the stream lock again
self.stream_lock = None
return
lock(self.stream_lock, LOCK_EX)
# Stream will be opened as part by FileHandler.emit()
def release(self):
""" Release file and thread locks. If in 'degraded' mode, close the
stream to reduce contention until the log files can be rotated. """
try:
if self._rotateFailed:
self._close()
except Exception:
self.handleError(NullLogRecord())
finally:
try:
if self.stream_lock and not self.stream_lock.closed:
unlock(self.stream_lock)
except Exception:
self.handleError(NullLogRecord())
finally:
# release thread lock
Handler.release(self)
def close(self):
"""
Close log stream and stream_lock. """
try:
self._close()
if not self.stream_lock.closed:
self.stream_lock.close()
finally:
self.stream_lock = None
Handler.close(self)
def _degrade(self, degrade, msg, *args):
""" Set degrade mode or not. Ignore msg. """
self._rotateFailed = degrade
del msg, args # avoid pychecker warnings
def _degrade_debug(self, degrade, msg, *args):
""" A more colorful version of _degade(). (This is enabled by passing
"debug=True" at initialization).
"""
if degrade:
if not self._rotateFailed:
sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = True
else:
if self._rotateFailed:
sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = False
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
self._close()
if self.backupCount <= 0:
# Don't keep any backups, just overwrite the existing backup file
# Locking doesn't much matter here; since we are overwriting it anyway
self.stream = self._open("w")
return
try:
# Determine if we can rename the log file or not. Windows refuses to
# rename an open file, Unix is inode base so it doesn't care.
# Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable
tmpname = None
while not tmpname or os.path.exists(tmpname):
tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0,99999999))
try:
# Do a rename test to determine if we can successfully rename the log file
os.rename(self.baseFilename, tmpname)
except (IOError, OSError):
exc_value = sys.exc_info()[1]
self._degrade(True, "rename failed. File in use? "
"exception=%s", exc_value)
return
# Q: Is there some way to protect this code from a KeboardInterupt?
# This isn't necessarily a data loss issue, but it certainly does
# break the rotation process during stress testing.
# There is currently no mechanism in place to handle the situation
# where one of these log files cannot be renamed. (Example, user
# opens "logfile.3" in notepad); we could test rename each file, but
# nobody's complained about this being an issue; so the additional
# code complexity isn't warranted.
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(tmpname, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self._degrade(False, "Rotation completed")
finally:
# Re-open the output stream, but if "delay" is enabled then wait
# until the next emit() call. This could reduce rename contention in
# some usage patterns.
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
For those that are keeping track. This differs from the standard
library's RotatingLogHandler class. Because there is no promise to keep
the file size under maxBytes we ignore the length of the current record.
"""
del record # avoid pychecker warnings
# Is stream is not yet open, skip rollover check. (Check will occur on
# next message, after emit() calls _open())
if self.stream is None:
return False
if self._shouldRollover():
# If some other process already did the rollover (which is possible
# on Unix) the file our stream may now be named "log.1", thus
# triggering another rollover. Avoid this by closing and opening
# "log" again.
self._close()
self.stream = self._open()
return self._shouldRollover()
return False
def _shouldRollover(self):
if self.maxBytes > 0: # are we rolling over?
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() >= self.maxBytes:
return True
else:
self._degrade(False, "Rotation done or not needed at this time")
return False
# Publish this class to the "logging.handlers" module so that it can be use
# from a logging config file via logging.config.fileConfig().
import logging.handlers
logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler

View File

@ -0,0 +1,141 @@
# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
"""Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open("somefile", "r+")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write("foo")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
Exceptions:
LockException
Notes:
For the 'nt' platform, this module requires the Python Extensions for Windows.
Be aware that this may not work as expected on Windows 95/98/ME.
History:
I learned the win32 technique for locking files from sample code
provided by John Nielsen <nielsenjf@my-deja.com> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <jdf@pobox.com>,
Lowell Alleman <lalleman@mfps.com>,
Rick van Hattem <Rick.van.Hattem@Fawo.nl>
Version: 0.3
URL: https://github.com/WoLpH/portalocker
"""
__all__ = [
"lock",
"unlock",
"LOCK_EX",
"LOCK_SH",
"LOCK_NB",
"LockException",
]
import os
class LockException(Exception):
# Error codes:
LOCK_FAILED = 1
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
elif os.name == 'posix':
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
else:
raise RuntimeError("PortaLocker only defined for nt and posix platforms")
if os.name == 'nt':
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
# error: (33, 'LockFileEx', 'The process cannot access the file because another process has locked a portion of the file.')
if exc_value[0] == 33:
raise LockException(LockException.LOCK_FAILED, exc_value[2])
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
try:
win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped)
except pywintypes.error, exc_value:
if exc_value[0] == 158:
# error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
# To match the 'posix' implementation, silently ignore this error
pass
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
elif os.name == 'posix':
def lock(file, flags):
try:
fcntl.flock(file.fileno(), flags)
except IOError, exc_value:
# The exception code varies on different systems so we'll catch
# every IO error
raise LockException(*exc_value)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
if __name__ == '__main__':
from time import time, strftime, localtime
import sys
import portalocker
log = open('log.txt', "a+")
portalocker.lock(log, portalocker.LOCK_EX)
timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time()))
log.write( timestamp )
print "Wrote lines. Hit enter to release lock."
dummy = sys.stdin.readline()
log.close()

View File

@ -0,0 +1,287 @@
#!/usr/bin/env python
""" stresstest.py: A stress-tester for ConcurrentRotatingFileHandler
This utility spawns a bunch of processes that all try to concurrently write to
the same file. This is pretty much the worst-case scenario for my log handler.
Once all of the processes have completed writing to the log file, the output is
compared to see if any log messages have been lost.
In the future, I may also add in support for testing with each process having
multiple threads.
"""
__version__ = '$Id$'
__author__ = 'Lowell Alleman'
import os
import sys
from subprocess import call, Popen, STDOUT
from time import sleep
ROTATE_COUNT = 5000
# local lib; for testing
from cloghandler import ConcurrentRotatingFileHandler
class RotateLogStressTester:
def __init__(self, sharedfile, uniquefile, name="LogStressTester", logger_delay=0):
self.sharedfile = sharedfile
self.uniquefile = uniquefile
self.name = name
self.writeLoops = 100000
self.rotateSize = 128 * 1024
self.rotateCount = ROTATE_COUNT
self.random_sleep_mode = False
self.debug = False
self.logger_delay = logger_delay
def getLogHandler(self, fn):
""" Override this method if you want to test a different logging handler
class. """
return ConcurrentRotatingFileHandler(fn, 'a', self.rotateSize,
self.rotateCount, delay=self.logger_delay,
debug=self.debug)
# To run the test with the standard library's RotatingFileHandler:
# from logging.handlers import RotatingFileHandler
# return RotatingFileHandler(fn, 'a', self.rotateSize, self.rotateCount)
def start(self):
from logging import getLogger, FileHandler, Formatter, DEBUG
self.log = getLogger(self.name)
self.log.setLevel(DEBUG)
formatter = Formatter('%(asctime)s [%(process)d:%(threadName)s] %(levelname)-8s %(name)s: %(message)s')
# Unique log handler (single file)
handler = FileHandler(self.uniquefile, "w")
handler.setLevel(DEBUG)
handler.setFormatter(formatter)
self.log.addHandler(handler)
# If you suspect that the diff stuff isn't working, un comment the next
# line. You should see this show up once per-process.
# self.log.info("Here is a line that should only be in the first output.")
# Setup output used for testing
handler = self.getLogHandler(self.sharedfile)
handler.setLevel(DEBUG)
handler.setFormatter(formatter)
self.log.addHandler(handler)
# If this ever becomes a real "Thread", then remove this line:
self.run()
def run(self):
c = 0
from random import choice, randint
# Use a bunch of random quotes, numbers, and severity levels to mix it up a bit!
msgs = ["I found %d puppies", "There are %d cats in your hatz",
"my favorite number is %d", "I am %d years old.", "1 + 1 = %d",
"%d/0 = DivideByZero", "blah! %d thingies!", "8 15 16 23 48 %d",
"the worlds largest prime number: %d", "%d happy meals!"]
logfuncts = [self.log.debug, self.log.info, self.log.warn, self.log.error]
self.log.info("Starting to write random log message. Loop=%d", self.writeLoops)
while c <= self.writeLoops:
c += 1
msg = choice(msgs)
logfunc = choice(logfuncts)
logfunc(msg, randint(0,99999999))
if self.random_sleep_mode and c % 1000 == 0:
# Sleep from 0-15 seconds
s = randint(1,15)
print("PID %d sleeping for %d seconds" % (os.getpid(), s))
sleep(s)
# break
self.log.info("Done witting random log messages.")
def iter_lognames(logfile, count):
""" Generator for log file names based on a rotation scheme """
for i in range(count -1, 0, -1):
yield "%s.%d" % (logfile, i)
yield logfile
def iter_logs(iterable, missing_ok=False):
""" Generator to extract log entries from shared log file. """
for fn in iterable:
if os.path.exists(fn):
for line in open(fn):
yield line
elif not missing_ok:
raise ValueError("Missing log file %s" % fn)
def combine_logs(combinedlog, iterable, mode="w"):
""" write all lines (iterable) into a single log file. """
fp = open(combinedlog, mode)
for chunk in iterable:
fp.write(chunk)
fp.close()
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog",
version=__version__,
description="Stress test the cloghandler module.")
parser.add_option("--log-calls", metavar="NUM",
action="store", type="int", default=50000,
help="Number of logging entries to write to each log file. "
"Default is %default")
parser.add_option("--random-sleep-mode",
action="store_true", default=False)
parser.add_option("--debug",
action="store_true", default=False)
parser.add_option("--logger-delay",
action="store_true", default=False,
help="Enable the 'delay' mode in the logger class. "
"This means that the log file will be opened on demand.")
def main_client(args):
(options, args) = parser.parse_args(args)
if len(args) != 2:
raise ValueError("Require 2 arguments. We have %d args" % len(args))
(shared, client) = args
if os.path.isfile(client):
sys.stderr.write("Already a client using output file %s\n" % client)
sys.exit(1)
tester = RotateLogStressTester(shared, client, logger_delay=options.logger_delay)
tester.random_sleep_mode = options.random_sleep_mode
tester.debug = options.debug
tester.writeLoops = options.log_calls
tester.start()
print("We are done pid=%d" % os.getpid())
class TestManager:
class ChildProc(object):
""" Very simple child container class."""
__slots__ = [ "popen", "sharedfile", "clientfile" ]
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def __init__(self):
self.tests = []
def launchPopen(self, *args, **kwargs):
proc = Popen(*args, **kwargs)
cp = self.ChildProc(popen=proc)
self.tests.append(cp)
return cp
def wait(self, check_interval=3):
""" Wait for all child test processes to complete. """
print("Waiting while children are out running and playing!")
while True:
sleep(check_interval)
waiting = []
for cp in self.tests:
if cp.popen.poll() is None:
waiting.append(cp.popen.pid)
if not waiting:
break
print("Waiting on %r " % waiting)
print("All children have stopped.")
def checkExitCodes(self):
for cp in self.tests:
if cp.popen.poll() != 0:
return False
return True
def unified_diff(a,b, out=sys.stdout):
import difflib
ai = open(a).readlines()
bi = open(b).readlines()
for line in difflib.unified_diff(ai, bi, a, b):
out.write(line)
def main_runner(args):
parser.add_option("--processes", metavar="NUM",
action="store", type="int", default=3,
help="Number of processes to spawn. Default: %default")
parser.add_option("--delay", metavar="secs",
action="store", type="float", default=2.5,
help="Wait SECS before spawning next processes. "
"Default: %default")
parser.add_option("-p", "--path", metavar="DIR",
action="store", default="test",
help="Path to a temporary directory. Default: '%default'")
this_script = args[0]
(options, args) = parser.parse_args(args)
options.path = os.path.abspath(options.path)
if not os.path.isdir(options.path):
os.makedirs(options.path)
manager = TestManager()
shared = os.path.join(options.path, "shared.log")
for client_id in range(options.processes):
client = os.path.join(options.path, "client.log_client%s.log" % client_id)
cmdline = [ sys.executable, this_script, "client", shared, client,
"--log-calls=%d" % options.log_calls ]
if options.random_sleep_mode:
cmdline.append("--random-sleep-mode")
if options.debug:
cmdline.append("--debug")
if options.logger_delay:
cmdline.append("--logger-delay")
child = manager.launchPopen(cmdline)
child.update(sharedfile=shared, clientfile=client)
sleep(options.delay)
# Wait for all of the subprocesses to exit
manager.wait()
# Check children exit codes
if not manager.checkExitCodes():
sys.stderr.write("One or more of the child process has failed.\n"
"Aborting test.\n")
sys.exit(2)
client_combo = os.path.join(options.path, "client.log.combo")
shared_combo = os.path.join(options.path, "shared.log.combo")
# Combine all of the log files...
client_files = [ child.clientfile for child in manager.tests ]
if False:
def sort_em(iterable):
return iterable
else:
sort_em = sorted
print("Writing out combined client logs...")
combine_logs(client_combo, sort_em(iter_logs(client_files)))
print("done.")
print("Writing out combined shared logs...")
shared_log_files = iter_lognames(shared, ROTATE_COUNT)
log_lines = iter_logs(shared_log_files, missing_ok=True)
combine_logs(shared_combo, sort_em(log_lines))
print("done.")
print("Running internal diff: (If the next line is 'end of diff', then the stress test passed!)")
unified_diff(client_combo, shared_combo)
print(" --- end of diff ----")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1].lower() == "client":
main_client(sys.argv[2:])
else:
main_runner(sys.argv)

View File

@ -421,12 +421,14 @@ class PostProcessor(object):
# At this point, let's just drop it into the Comic Location folder and forget about it..
if 'S' in sandwich:
self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
logger.info(module + 'One-off STORYARC mode enabled for Post-Processing for ' + str(sarc))
logger.info(module + ' One-off STORYARC mode enabled for Post-Processing for ' + str(sarc))
if mylar.STORYARCDIR:
storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc)
self._log("StoryArc Directory set to : " + storyarcd)
logger.info(module + ' Story Arc Directory set to : ' + storyarcd)
else:
self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR)
logger.info(module + ' Story Arc Directory set to : ' + mylar.GRABBAG_DIR)
else:
self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.")
@ -497,12 +499,13 @@ class PostProcessor(object):
grab_dst = os.path.join(grdst, ofilename)
self._log("Destination Path : " + grab_dst)
logger.info(module + ' Destination Path : ' + grab_dst)
grab_src = os.path.join(self.nzb_folder, ofilename)
self._log("Source Path : " + grab_src)
logger.info(module + ' Source Path : ' + grab_src)
logger.info(module + ' Moving ' + str(ofilename) + ' into directory : ' + str(grdst))
logger.info(module + ' Moving ' + str(ofilename) + ' into directory : ' + str(grab_dst))
try:
shutil.move(grab_src, grab_dst)
@ -623,6 +626,7 @@ class PostProcessor(object):
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
iss_decval = issuenum[iss_find+1:]
if iss_decval.endswith('.'): iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
iss = iss_b4dec
issdec = int(iss_decval)
@ -771,9 +775,9 @@ class PostProcessor(object):
try:
import cmtagmylar
if ml is None:
pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid)
pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, comversion=comversion)
else:
pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation'])
pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, comversion=comversion, manual="yes", filename=ml['ComicLocation'])
except ImportError:
logger.fdebug(module + ' comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/')

View File

@ -80,6 +80,12 @@ CFG = None
CONFIG_VERSION = None
DB_FILE = None
DBCHOICE = None
#these are used depending on dbchoice.
DBUSER = None
DBPASS = None
DBNAME = None
LOG_DIR = None
LOG_LIST = []
@ -142,7 +148,7 @@ DOWNLOAD_SCAN_INTERVAL = 5
CHECK_FOLDER = None
ENABLE_CHECK_FOLDER = False
INTERFACE = None
DUPECONSTRAINT = None
PREFERRED_QUALITY = 0
CORRECT_METADATA = False
MOVE_FILES = False
@ -266,6 +272,8 @@ GRABBAG_DIR = None
HIGHCOUNT = 0
READ2FILENAME = 0
STORYARCDIR = 0
COPY2ARCDIR = 0
CVAPIFIX = 0
CVURL = None
WEEKFOLDER = 0
@ -364,10 +372,10 @@ def initialize():
with INIT_LOCK:
global __INITIALIZED__, COMICVINE_API, DEFAULT_CVAPI, CVAPI_COUNT, CVAPI_TIME, CVAPI_MAX, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, LOGVERBOSE, OLDCONFIG_VERSION, OS_DETECT, OS_LANG, OS_ENCODING, \
global __INITIALIZED__, DBCHOICE, DBUSER, DBPASS, DBNAME, COMICVINE_API, DEFAULT_CVAPI, CVAPI_COUNT, CVAPI_TIME, CVAPI_MAX, FULL_PATH, PROG_DIR, VERBOSE, DAEMON, COMICSORT, DATA_DIR, CONFIG_FILE, CFG, CONFIG_VERSION, LOG_DIR, CACHE_DIR, MAX_LOGSIZE, LOGVERBOSE, OLDCONFIG_VERSION, OS_DETECT, OS_LANG, OS_ENCODING, \
queue, HTTP_PORT, HTTP_HOST, HTTP_USERNAME, HTTP_PASSWORD, HTTP_ROOT, HTTPS_FORCE_ON, API_ENABLED, API_KEY, LAUNCH_BROWSER, GIT_PATH, SAFESTART, \
CURRENT_VERSION, LATEST_VERSION, CHECK_GITHUB, CHECK_GITHUB_ON_STARTUP, CHECK_GITHUB_INTERVAL, USER_AGENT, DESTINATION_DIR, MULTIPLE_DEST_DIRS, CREATE_FOLDERS, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \
DOWNLOAD_DIR, USENET_RETENTION, SEARCH_INTERVAL, NZB_STARTUP_SEARCH, INTERFACE, DUPECONSTRAINT, AUTOWANT_ALL, AUTOWANT_UPCOMING, ZERO_LEVEL, ZERO_LEVEL_N, COMIC_COVER_LOCAL, HIGHCOUNT, \
LIBRARYSCAN, LIBRARYSCAN_INTERVAL, DOWNLOAD_SCAN_INTERVAL, NZB_DOWNLOADER, USE_SABNZBD, SAB_HOST, SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_PRIORITY, SAB_DIRECTORY, USE_BLACKHOLE, BLACKHOLE_DIR, ADD_COMICS, COMIC_DIR, IMP_MOVE, IMP_RENAME, IMP_METADATA, \
USE_NZBGET, NZBGET_HOST, NZBGET_PORT, NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_PRIORITY, NZBGET_DIRECTORY, NZBSU, NZBSU_UID, NZBSU_APIKEY, DOGNZB, DOGNZB_UID, DOGNZB_APIKEY, \
NEWZNAB, NEWZNAB_NAME, NEWZNAB_HOST, NEWZNAB_APIKEY, NEWZNAB_UID, NEWZNAB_ENABLED, EXTRA_NEWZNABS, NEWZNAB_EXTRA, \
@ -378,7 +386,7 @@ def initialize():
ENABLE_RSS, RSS_CHECKINTERVAL, RSS_LASTRUN, FAILED_DOWNLOAD_HANDLING, FAILED_AUTO, ENABLE_TORRENT_SEARCH, ENABLE_KAT, KAT_PROXY, ENABLE_CBT, CBT_PASSKEY, SNATCHEDTORRENT_NOTIFY, \
PROWL_ENABLED, PROWL_PRIORITY, PROWL_KEYS, PROWL_ONSNATCH, NMA_ENABLED, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, PUSHOVER_ENABLED, PUSHOVER_PRIORITY, PUSHOVER_APIKEY, PUSHOVER_USERKEY, PUSHOVER_ONSNATCH, BOXCAR_ENABLED, BOXCAR_ONSNATCH, BOXCAR_TOKEN, \
PUSHBULLET_ENABLED, PUSHBULLET_APIKEY, PUSHBULLET_DEVICEID, PUSHBULLET_ONSNATCH, LOCMOVE, NEWCOM_DIR, FFTONEWCOM_DIR, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, STORYARCDIR, CVURL, CVAPIFIX, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
PREFERRED_QUALITY, MOVE_FILES, RENAME_FILES, LOWERCASE_FILENAMES, USE_MINSIZE, MINSIZE, USE_MAXSIZE, MAXSIZE, CORRECT_METADATA, FOLDER_FORMAT, FILE_FORMAT, REPLACE_CHAR, REPLACE_SPACES, ADD_TO_CSV, CVINFO, LOG_LEVEL, POST_PROCESSING, POST_PROCESSING_SCRIPT, SEARCH_DELAY, GRABBAG_DIR, READ2FILENAME, STORYARCDIR, COPY2ARCDIR, CVURL, CVAPIFIX, CHECK_FOLDER, ENABLE_CHECK_FOLDER, \
COMIC_LOCATION, QUAL_ALTVERS, QUAL_SCANNER, QUAL_TYPE, QUAL_QUALITY, ENABLE_EXTRA_SCRIPTS, EXTRA_SCRIPTS, ENABLE_PRE_SCRIPTS, PRE_SCRIPTS, PULLNEW, ALT_PULL, COUNT_ISSUES, COUNT_HAVES, COUNT_COMICS, SYNO_FIX, CHMOD_FILE, CHMOD_DIR, ANNUALS_ON, CV_ONLY, CV_ONETIMER, WEEKFOLDER, UMASK
if __INITIALIZED__:
@ -404,6 +412,11 @@ def initialize():
HTTP_PORT = 8090
CONFIG_VERSION = check_setting_str(CFG, 'General', 'config_version', '')
DBCHOICE = check_setting_str(CFG, 'General', 'dbchoice', '')
DBUSER = check_setting_str(CFG, 'General', 'dbuser', '')
DBPASS = check_setting_str(CFG, 'General', 'dbpass', '')
DBNAME = check_setting_str(CFG, 'General', 'dbname', '')
COMICVINE_API = check_setting_str(CFG, 'General', 'comicvine_api', '')
if not COMICVINE_API:
COMICVINE_API = None
@ -455,6 +468,7 @@ def initialize():
CHECK_FOLDER = check_setting_str(CFG, 'General', 'check_folder', '')
ENABLE_CHECK_FOLDER = bool(check_setting_int(CFG, 'General', 'enable_check_folder', 0))
INTERFACE = check_setting_str(CFG, 'General', 'interface', 'default')
DUPECONSTRAINT = check_setting_str(CFG, 'General', 'dupeconstraint', 'filesize')
AUTOWANT_ALL = bool(check_setting_int(CFG, 'General', 'autowant_all', 0))
AUTOWANT_UPCOMING = bool(check_setting_int(CFG, 'General', 'autowant_upcoming', 1))
COMIC_COVER_LOCAL = bool(check_setting_int(CFG, 'General', 'comic_cover_local', 0))
@ -494,6 +508,7 @@ def initialize():
if not HIGHCOUNT: HIGHCOUNT = 0
READ2FILENAME = bool(check_setting_int(CFG, 'General', 'read2filename', 0))
STORYARCDIR = bool(check_setting_int(CFG, 'General', 'storyarcdir', 0))
COPY2ARCDIR = bool(check_setting_int(CFG, 'General', 'copy2arcdir', 0))
PROWL_ENABLED = bool(check_setting_int(CFG, 'Prowl', 'prowl_enabled', 0))
PROWL_KEYS = check_setting_str(CFG, 'Prowl', 'prowl_keys', '')
PROWL_ONSNATCH = bool(check_setting_int(CFG, 'Prowl', 'prowl_onsnatch', 0))
@ -833,14 +848,18 @@ def initialize():
logger.initLogger(verbose=VERBOSE) #logger.mylar_log.initLogger(verbose=VERBOSE)
# verbatim back the logger being used since it's now started.
# if LOGTYPE == 'clog':
# logprog = 'Concurrent Log Handler'
# else:
# logprog = 'Rotational Log Handler (default)'
# logger.fdebug('ConcurrentLogHandler package not installed. Using builtin log handler for Rotational logs (default)')
# logger.fdebug('[Windows Users] If you are experiencing log file locking, you should install the ConcurrentLogHandler ( https://pypi.python.org/pypi/ConcurrentLogHandler/0.8.7 )')
if LOGTYPE == 'clog':
logprog = 'Concurrent Rotational Log Handler'
else:
logprog = 'Rotational Log Handler (default)'
# logger.fdebug('Logger set to use : ' + logprog)
logger.fdebug('Logger set to use : ' + logprog)
if LOGTYPE == 'log' and platform.system() == 'Windows':
logger.fdebug('ConcurrentLogHandler package not installed. Using builtin log handler for Rotational logs (default)')
logger.fdebug('[Windows Users] If you are experiencing log file locking and want this auto-enabled, you need to install Python Extensions for Windows ( http://sourceforge.net/projects/pywin32/ )')
# verbatim DB module.
logger.info('[DB Module] Loading : ' + DBCHOICE + ' as the database module to use.')
# Put the cache dir in the data dir for now
if not CACHE_DIR:
@ -1062,6 +1081,11 @@ def config_write():
new_config.encoding = 'UTF8'
new_config['General'] = {}
new_config['General']['config_version'] = CONFIG_VERSION
new_config['General']['dbchoice'] = DBCHOICE
new_config['General']['dbuser'] = DBUSER
new_config['General']['dbpass'] = DBPASS
new_config['General']['dbname'] = DBNAME
if COMICVINE_API is None or COMICVINE_API == '':
new_config['General']['comicvine_api'] = COMICVINE_API
else:
@ -1113,6 +1137,7 @@ def config_write():
new_config['General']['download_scan_interval'] = DOWNLOAD_SCAN_INTERVAL
new_config['General']['check_folder'] = CHECK_FOLDER
new_config['General']['interface'] = INTERFACE
new_config['General']['dupeconstraint'] = DUPECONSTRAINT
new_config['General']['autowant_all'] = int(AUTOWANT_ALL)
new_config['General']['autowant_upcoming'] = int(AUTOWANT_UPCOMING)
new_config['General']['preferred_quality'] = int(PREFERRED_QUALITY)
@ -1137,6 +1162,7 @@ def config_write():
new_config['General']['highcount'] = HIGHCOUNT
new_config['General']['read2filename'] = int(READ2FILENAME)
new_config['General']['storyarcdir'] = int(STORYARCDIR)
new_config['General']['copy2arcdir'] = int(COPY2ARCDIR)
new_config['General']['use_minsize'] = int(USE_MINSIZE)
new_config['General']['minsize'] = MINSIZE
new_config['General']['use_maxsize'] = int(USE_MAXSIZE)
@ -1352,18 +1378,24 @@ def start():
started = True
def dbcheck():
#if DBCHOICE == 'postgresql':
# import psycopg2
# conn = psycopg2.connect(database=DBNAME, user=DBUSER, password=DBPASS)
# c_error = 'psycopg2.DatabaseError'
#else:
conn=sqlite3.connect(DB_FILE)
conn = sqlite3.connect(DB_FILE)
c_error = 'sqlite3.OperationalError'
c=conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, ForceContinuing INTEGER, ComicName_Filesafe TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS comics (ComicID TEXT UNIQUE, ComicName TEXT, ComicSortName TEXT, ComicYear TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, Have INTEGER, Total INTEGER, ComicImage TEXT, ComicPublisher TEXT, ComicLocation TEXT, ComicPublished TEXT, LatestIssue TEXT, LatestDate TEXT, Description TEXT, QUALalt_vers TEXT, QUALtype TEXT, QUALscanner TEXT, QUALquality TEXT, LastUpdated TEXT, AlternateSearch TEXT, UseFuzzy TEXT, ComicVersion TEXT, SortOrder INTEGER, DetailURL TEXT, ForceContinuing INTEGER, ComicName_Filesafe TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS issues (IssueID TEXT, ComicName TEXT, IssueName TEXT, Issue_Number TEXT, DateAdded TEXT, Status TEXT, Type TEXT, ComicID TEXT, ArtworkURL Text, ReleaseDate TEXT, Location TEXT, IssueDate TEXT, Int_IssueNumber INT, ComicSize TEXT, AltIssueNumber TEXT, IssueDate_Edit TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS snatched (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Size INTEGER, DateAdded TEXT, Status TEXT, FolderName TEXT, ComicID TEXT, Provider TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS upcoming (ComicName TEXT, IssueNumber TEXT, ComicID TEXT, IssueID TEXT, IssueDate TEXT, Status TEXT, DisplayComicName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT, PROVIDER TEXT, ID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text)')
# c.execute('CREATE TABLE IF NOT EXISTS sablog (nzo_id TEXT, ComicName TEXT, ComicYEAR TEXT, ComicIssue TEXT, name TEXT, nzo_complete TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS importresults (impID TEXT, ComicName TEXT, ComicYear TEXT, Status TEXT, ImportDate TEXT, ComicFilename TEXT, ComicLocation TEXT, WatchMatch TEXT, DisplayName TEXT, SRID TEXT, ComicID TEXT, IssueID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readlist (IssueID TEXT, ComicName TEXT, Issue_Number TEXT, Status TEXT, DateAdded TEXT, Location TEXT, inCacheDir TEXT, SeriesYear TEXT, ComicID TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS readinglist(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT, Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT, ComicID TEXT, StoreDate TEXT, IssueDate TEXT, Publisher TEXT, IssuePublisher TEXT, IssueName TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS annuals (IssueID TEXT, Issue_Number TEXT, IssueName TEXT, IssueDate TEXT, Status TEXT, ComicID TEXT, GCDComicID TEXT, Location TEXT, ComicSize TEXT, Int_IssueNumber INT, ComicName TEXT, ReleaseDate TEXT, ReleaseComicID TEXT, ReleaseComicName TEXT, IssueDate_Edit TEXT)')
@ -1383,49 +1415,49 @@ def dbcheck():
try:
c.execute('SELECT LastUpdated from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN LastUpdated TEXT')
try:
c.execute('SELECT QUALalt_vers from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN QUALalt_vers TEXT')
try:
c.execute('SELECT QUALtype from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN QUALtype TEXT')
try:
c.execute('SELECT QUALscanner from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN QUALscanner TEXT')
try:
c.execute('SELECT QUALquality from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN QUALquality TEXT')
try:
c.execute('SELECT AlternateSearch from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN AlternateSearch TEXT')
try:
c.execute('SELECT ComicVersion from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN ComicVersion TEXT')
try:
c.execute('SELECT SortOrder from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN SortOrder INTEGER')
try:
c.execute('SELECT UseFuzzy from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN UseFuzzy TEXT')
try:
c.execute('SELECT DetailURL from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN DetailURL TEXT')
try:
@ -1443,12 +1475,12 @@ def dbcheck():
try:
c.execute('SELECT ComicSize from issues')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE issues ADD COLUMN ComicSize TEXT')
try:
c.execute('SELECT inCacheDir from issues')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE issues ADD COLUMN inCacheDIR TEXT')
try:
@ -1466,27 +1498,27 @@ def dbcheck():
try:
c.execute('SELECT WatchMatch from importresults')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE importresults ADD COLUMN WatchMatch TEXT')
try:
c.execute('SELECT IssueCount from importresults')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE importresults ADD COLUMN IssueCount TEXT')
try:
c.execute('SELECT ComicLocation from importresults')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE importresults ADD COLUMN ComicLocation TEXT')
try:
c.execute('SELECT ComicFilename from importresults')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE importresults ADD COLUMN ComicFilename TEXT')
try:
c.execute('SELECT impID from importresults')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE importresults ADD COLUMN impID TEXT')
try:
@ -1503,31 +1535,42 @@ def dbcheck():
c.execute('SELECT SRID from importresults')
except:
c.execute('ALTER TABLE importresults ADD COLUMN SRID TEXT')
try:
c.execute('SELECT ComicID from importresults')
except:
c.execute('ALTER TABLE importresults ADD COLUMN ComicID TEXT')
try:
c.execute('SELECT IssueID from importresults')
except:
c.execute('ALTER TABLE importresults ADD COLUMN IssueID TEXT')
## -- Readlist Table --
try:
c.execute('SELECT inCacheDIR from readlist')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE readlist ADD COLUMN inCacheDIR TEXT')
try:
c.execute('SELECT Location from readlist')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE readlist ADD COLUMN Location TEXT')
try:
c.execute('SELECT IssueDate from readlist')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE readlist ADD COLUMN IssueDate TEXT')
try:
c.execute('SELECT SeriesYear from readlist')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE readlist ADD COLUMN SeriesYear TEXT')
try:
c.execute('SELECT ComicID from readlist')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE readlist ADD COLUMN ComicID TEXT')
@ -1676,7 +1719,7 @@ def dbcheck():
#value in the sql so we can display it in the details screen for everyone to wonder at.
try:
c.execute('SELECT not_updated_db from comics')
except sqlite3.OperationalError:
except c_error:
c.execute('ALTER TABLE comics ADD COLUMN not_updated_db TEXT')
# -- not implemented just yet ;)
@ -1686,12 +1729,12 @@ def dbcheck():
# MetaData will hold the MetaData itself in tuple format
# try:
# c.execute('SELECT MetaData_Present from comics')
# except sqlite3.OperationalError:
# except c_error:
# c.execute('ALTER TABLE importresults ADD COLUMN MetaData_Present TEXT')
# try:
# c.execute('SELECT MetaData from importresults')
# except sqlite3.OperationalError:
# except c_error:
# c.execute('ALTER TABLE importresults ADD COLUMN MetaData TEXT')
#let's delete errant comics that are stranded (ie. Comicname = Comic ID: )
@ -1710,7 +1753,7 @@ def dbcheck():
def csv_load():
# for redudant module calls..include this.
conn=sqlite3.connect(DB_FILE)
conn = sqlite3.connect(DB_FILE)
c=conn.cursor()
c.execute('DROP TABLE IF EXISTS exceptions')
@ -1752,11 +1795,11 @@ def csv_load():
for row in creader:
try:
c.execute("INSERT INTO exceptions VALUES (?,?,?,?);", row)
#print row.split(',')
c.execute("INSERT INTO exceptions VALUES (?,?,?,?)", row)
except Exception, e:
#print ("Error - invald arguments...-skipping")
pass
pass
csvfile.close()
i+=1

View File

@ -18,7 +18,7 @@ import mylar
from mylar import logger
from mylar.helpers import cvapi_check
def run (dirName, nzbName=None, issueid=None, manual=None, filename=None, module=None):
def run (dirName, nzbName=None, issueid=None, comversion=None, manual=None, filename=None, module=None):
if module is None:
module = ''
module += '[META-TAGGER]'
@ -153,11 +153,19 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None, module
if fcount > 1:
logger.fdebug(module + ' More than one cbr/cbz within path, performing Post-Process on first file detected: ' + f)
break
shutil.move( f, comicpath )
if f.endswith('.cbz'):
logger.fdebug(module + ' .cbz file detected. Excluding from temporary directory move at this time.')
comicpath = downloadpath
else:
shutil.move( f, comicpath )
filename = f #just the filename itself
fcount+=1
else:
# if the filename is identical to the parent folder, the entire subfolder gets copied since it's the first match, instead of just the file
#if os.path.isfile(filename):
#if the filename doesn't exist - force the path assuming it's the 'download path'
filename = os.path.join(downloadpath, filename)
logger.fdebug(module + ' The path where the file is that I was provided is probably wrong - modifying it to : ' + filename)
shutil.move( filename, comicpath )
try:
@ -217,7 +225,7 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None, module
shutil.rmtree( comicpath )
logger.fdebug(module + ' Successfully removed temporary directory: ' + comicpath)
else:
loggger.fdebug(module + ' Unable to remove temporary directory since it is identical to the download location : ' + comicpath)
logger.fdebug(module + ' Unable to remove temporary directory since it is identical to the download location : ' + comicpath)
logger.fdebug(module + ' new filename : ' + base)
nfilename = base
@ -301,7 +309,10 @@ def run (dirName, nzbName=None, issueid=None, manual=None, filename=None, module
logger.fdebug(module + ' absDirName: ' + os.path.abspath(dirName))
##set up default comictagger options here.
tagoptions = [ "-s", "--verbose" ]
if comversion is None or comversion == '':
comversion = 1
cvers = 'volume=' + str(comversion)
tagoptions = [ "-s", "--verbose", "-m", cvers ]
## check comictagger version - less than 1.15.beta - take your chances.
if sys_type == 'windows':

View File

@ -102,6 +102,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#print item
#subname = os.path.join(basedir, item)
subname = item
subname = re.sub('\_', ' ', subname)
@ -115,6 +116,17 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
vers4year = "no"
vers4vol = "no"
digitchk = 0
if sarc and mylar.READ2FILENAME:
logger.info('subname: ' + subname)
removest = subname.find('-') # the - gets removed above so we test for the first blank space...
logger.info('removest: ' + str(removest))
logger.info('removestdig: ' + str(subname[:removest-1]))
if subname[:removest].isdigit() and removest == 3:
subname = subname[4:]
logger.info('subname set to : ' + subname)
for subit in subsplit:
if subit[0].lower() == 'v':
@ -213,8 +225,8 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
subthis = re.sub('\s+',' ', subthis)
logger.fdebug('[FILECHECKER] sub-cleaned: ' + subthis)
#we need to make sure the file is part of the correct series or else will match falsely
if watchname not in subthis:
logger.fdebug('[FILECHECKER] this is a false match. Ignoring this result.')
if watchname.lower() not in subthis.lower():
logger.fdebug('[FILECHECKER] ' + watchname + ' this is a false match to ' + subthis + ' - Ignoring this result.')
continue
subthis = subthis[len(watchname):] #remove watchcomic
#we need to now check the remainder of the string for digits assuming it's a possible year
@ -265,8 +277,8 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
subthis = re.sub('\s+',' ', subthis)
logger.fdebug('[FILECHECKER] sub-cleaned: ' + subthis)
#we need to make sure the file is part of the correct series or else will match falsely
if watchname not in subthis:
logger.fdebug('[FILECHECKER] this is a false match. Ignoring this result.')
if watchname.lower() not in subthis.lower():
logger.fdebug('[FILECHECKER] ' + watchname + ' this is a false match to ' + subthis + ' - Ignoring this result.')
continue
subthis = subthis[len(watchname):].strip() #remove watchcomic
#we need to now check the remainder of the string for digits assuming it's a possible year
@ -379,7 +391,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
if nono in subname:
subcnt = subname.count(nono)
charpos = indices(subname,nono) # will return a list of char positions in subname
#print "charpos: " + str(charpos)
logger.fdebug("charpos: " + str(charpos))
if nono == '-':
i=0
while (i < len(charpos)):
@ -408,20 +420,29 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#logger.fdebug('[FILECHECKER] (str(nono) + " detected " + str(subcnt) + " times.")
# segment '.' having a . by itself will denote the entire string which we don't want
elif nono == '.':
logger.fdebug('[FILECHECKER] Decimal check.')
x = 0
fndit = 0
dcspace = 0
while x < subcnt:
fndit = subname.find(nono, fndit)
if subname[fndit-1:fndit].isdigit() and subname[fndit+1:fndit+2].isdigit():
logger.fdebug('[FILECHECKER] decimal issue detected.')
dcspace+=1
x+=1
if dcspace == 1:
nonocount = nonocount + subcnt + dcspace
else:
subname = re.sub('\.', ' ', subname)
nonocount = nonocount + subcnt - 1 #(remove the extension from the length)
while (x < len(charpos)):
for x,j in enumerate(charpos):
fndit = j
logger.fdebug('fndit: ' + str(fndit))
logger.fdebug('isdigit1: ' + subname[fndit-1:fndit])
logger.fdebug('isdigit2: ' + subname[fndit+1:fndit+2])
if subname[fndit-1:fndit].isdigit() and subname[fndit+1:fndit+2].isdigit():
logger.fdebug('[FILECHECKER] decimal issue detected.')
dcspace+=1
else:
subname = subname[:fndit] + ' ' + subname[fndit+1:]
nonocount+=1
x+=1
nonocount += (subcnt + dcspace)
#if dcspace == 1:
# nonocount = nonocount + subcnt + dcspace
#else:
# subname = re.sub('\.', ' ', subname)
# nonocount = nonocount + subcnt - 1 #(remove the extension from the length)
else:
#this is new - if it's a symbol seperated by a space on each side it drags in an extra char.
x = 0
@ -581,9 +602,12 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] ' + str(j) + ' is >= ' + str(len(subname)) + ' .End reached. ignoring remainder.')
break
elif subname[j:] == '-':
if j <= len(subname) and subname[j+1].isdigit():
logger.fdebug('[FILECHECKER] negative issue detected.')
#detneg = "yes"
try:
if j <= len(subname) and subname[j+1].isdigit():
logger.fdebug('[FILECHECKER] negative issue detected.')
#detneg = "yes"
except IndexError:
logger.fdebug('[FILECHECKER] There was a problem parsing the information from this filename: ' + comicpath)
elif j > findtitlepos:
if subname[j:] == '#':
if subname[j+1].isdigit():
@ -593,6 +617,7 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
elif ('-' in watchcomic or '.' in watchcomic) and j < len(watchcomic):
logger.fdebug('[FILECHECKER] - appears in series title, ignoring.')
else:
digitchk = subname[j:]
logger.fdebug('[FILECHECKER] special character appears outside of title - ignoring @ position: ' + str(charpos[i]))
nonocount-=1
@ -602,11 +627,11 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
else:
jtd_len = len(cchk)# + nonocount
if sarc and mylar.READ2FILENAME:
removest = subname.find(' ') # the - gets removed above so we test for the first blank space...
if subname[:removest].isdigit():
jtd_len += removest + 1 # +1 to account for space in place of -
logger.fdebug('[FILECHECKER] adjusted jtd_len to : ' + str(removest) + ' because of story-arc reading order tags')
# if sarc and mylar.READ2FILENAME:
# removest = subname.find(' ') # the - gets removed above so we test for the first blank space...
# if subname[:removest].isdigit():
# jtd_len += removest + 1 # +1 to account for space in place of -
# logger.fdebug('[FILECHECKER] adjusted jtd_len to : ' + str(removest) + ' because of story-arc reading order tags')
logger.fdebug('[FILECHECKER] nonocount [' + str(nonocount) + '] cchk [' + cchk + '] length [' + str(len(cchk)) + ']')
@ -637,6 +662,68 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] after title removed from SUBNAME [' + justthedigits_1 + ']')
titlechk = False
if digitchk:
try:
#do the issue title check here
logger.fdebug('[FILECHECKER] Possible issue title is : ' + str(digitchk))
# see if it can float the digits
try:
st = digitchk.find('.')
logger.fdebug('st:' + str(st))
st_d = digitchk[:st]
logger.fdebug('st_d:' + str(st_d))
st_e = digitchk[st+1:]
logger.fdebug('st_e:' + str(st_e))
#x = int(float(st_d))
#logger.fdebug('x:' + str(x))
#validity check
if helpers.is_number(st_d):
#x2 = int(float(st_e))
if helpers.is_number(st_e):
logger.fdebug('[FILECHECKER] This is a decimal issue.')
else: raise ValueError
else: raise ValueError
except ValueError, e:
if digitchk.startswith('.'):
pass
else:
if len(justthedigits_1) >= len(digitchk):
logger.fdebug('[FILECHECKER] Removing issue title.')
justthedigits_1 = re.sub(digitchk,'', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
hyphensplit = digitchk
issue_firstword = digitchk.split()[0]
splitit = subname.split()
splitst = len(splitit)
logger.fdebug('[FILECHECKER] splitit :' + str(splitit))
logger.fdebug('[FILECHECKER] splitst :' + str(len(splitit)))
orignzb = item
except:
#test this out for manual post-processing items like original sin 003.3 - thor and loki 002...
#***************************************************************************************
# need to assign digitchk here for issues that don't have a title and fail the above try.
#***************************************************************************************
try:
logger.fdebug('[FILECHECKER] justthedigits_1 len : ' + str(len(justthedigits_1)))
logger.fdebug('[FILECHECKER] digitchk len : ' + str(len(digitchk)))
if len(justthedigits_1) >= len(digitchk):
logger.fdebug('[FILECHECKER] Removing issue title.')
justthedigits_1 = re.sub(digitchk,'', justthedigits_1).strip()
logger.fdebug('[FILECHECKER] After issue title removed [' + justthedigits_1 + ']')
titlechk = True
hyphensplit = digitchk
issue_firstword = digitchk.split()[0]
splitit = subname.split()
splitst = len(splitit)
logger.info('[FILECHECKER] splitit :' + str(splitit))
logger.info('[FILECHECKER] splitst :' + str(len(splitit)))
orignzb = item
except:
pass #(revert this back if above except doesn't work)
#remove the title if it appears
#findtitle = justthedigits.find('-')
#if findtitle > 0 and detneg == "no":
@ -771,31 +858,13 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#print ("there are " + str(lenm) + " words.")
cnt = 0
yearmatch = "none"
#vers4year = "no"
#vers4vol = "no"
logger.fdebug('[FILECHECKER] subsplit : ' + str(subsplit))
#for ct in subsplit:
# if ct.lower().startswith('v') and ct[1:].isdigit():
# logger.fdebug('[FILECHECKER] possible versioning..checking')
# #we hit a versioning # - account for it
# if ct[1:].isdigit():
# if len(ct[1:]) == 4: #v2013
# logger.fdebug('[FILECHECKER] Version detected as ' + str(ct))
# vers4year = "yes" #re.sub("[^0-9]", " ", str(ct)) #remove the v
# break
# else:
# if len(ct) < 4:
# logger.fdebug('[FILECHECKER] Version detected as ' + str(ct))
# vers4vol = str(ct)
# break
# logger.fdebug('[FILECHECKER] false version detection..ignoring.')
versionmatch = "false"
if vers4year is not "no" or vers4vol is not "no":
if comicvolume: #is not "None" and comicvolume is not None:
if comicvolume:
D_ComicVersion = re.sub("[^0-9]", "", comicvolume)
if D_ComicVersion == '':
D_ComicVersion = 0
@ -833,9 +902,10 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
# logger.fdebug('[FILECHECKER] Series version detected as V1 (only series in existance with that title). Bypassing year check')
# yearmatch = "true"
# break
if subnm[cnt][:-2] == '19' or subnm[cnt][:-2] == '20':
if (subnm[cnt].startswith('19') or subnm[cnt].startswith('20')) and len(subnm[cnt]) == 4:
logger.fdebug('[FILECHECKER] year detected: ' + str(subnm[cnt]))
result_comyear = subnm[cnt]
##### - checking to see what removing this does for the masses
if int(result_comyear) <= int(maxyear) and int(result_comyear) >= int(comyear):
logger.fdebug('[FILECHECKER] ' + str(result_comyear) + ' is within the series range of ' + str(comyear) + '-' + str(maxyear))
#still possible for incorrect match if multiple reboots of series end/start in same year
@ -843,8 +913,9 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
break
else:
logger.fdebug('[FILECHECKER] ' + str(result_comyear) + ' - not right - year not within series range of ' + str(comyear) + '-' + str(maxyear))
yearmatch = "false"
yearmatch = "false" #set to true for mass push check.
break
##### - end check
cnt+=1
if versionmatch == "false":
if yearmatch == "false":
@ -873,6 +944,32 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
#if the sub has an annual, let's remove it from the modwatch as well
modwatchcomic = re.sub('annual', '', modwatchcomic.lower())
isstitle_chk = False
if titlechk:
issuetitle = helpers.get_issue_title(ComicID=manual['ComicID'], IssueNumber=justthedigits)
if issuetitle:
vals = []
watchcomic_split = watchcomic.split()
vals = mylar.search.IssueTitleCheck(issuetitle, watchcomic_split, splitit, splitst, issue_firstword, hyphensplit, orignzb=item)
print 'vals: ' + str(vals)
if vals:
if vals[0]['status'] == 'continue':
continue
else:
logger.fdebug('Issue title status returned of : ' + str(vals[0]['status'])) # will either be OK or pass.
splitit = vals[0]['splitit']
splitst = vals[0]['splitst']
isstitle_chk = vals[0]['isstitle_chk']
possibleissue_num = vals[0]['possibleissue_num']
#if the issue title was present and it contained a numeric, it will pull that as the issue incorrectly
if isstitle_chk == True:
justthedigits = possibleissue_num
subname = re.sub(' '.join(vals[0]['isstitle_removal']),'',subname).strip()
else:
logger.fdebug('No issue title.')
#tmpitem = item[:jtd_len]
# if it's an alphanumeric with a space, rejoin, so we can remove it cleanly just below this.
substring_removal = None
@ -895,6 +992,13 @@ def listFiles(dir,watchcomic,Publisher,AlternateSearch=None,manual=None,sarc=Non
logger.fdebug('[FILECHECKER] sub_removed: ' + str(sub_removed))
split_sub = sub_removed.rsplit(' ',1)[0].split(' ') #removes last word (assuming it's the issue#)
split_mod = modwatchcomic.replace('_', ' ').split() #batman
i = 0
newc = ''
while (i < len(split_mod)):
newc += split_sub[i] + ' '
i+=1
if newc:
split_sub = newc.strip().split()
logger.fdebug('[FILECHECKER] split_sub: ' + str(split_sub))
logger.fdebug('[FILECHECKER] split_mod: ' + str(split_mod))

View File

@ -129,8 +129,8 @@ def Startit(searchName, searchIssue, searchYear, ComicVersion, IssDateFix):
if cName.lower().startswith('for'):
pass
else:
#this is the crap we ignore. Continue
logger.fdebug('this starts with FOR : ' + str(subs) + '. This is not present in the series - ignoring.')
#this is the crap we ignore. Continue (commented else, as it spams the logs)
#logger.fdebug('this starts with FOR : ' + str(subs) + '. This is not present in the series - ignoring.')
continue
logger.fdebug('match.')
if IssDateFix != "no":

View File

@ -518,7 +518,7 @@ def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=N
'$Annual': 'Annual'
}
extensions = ('.cbr', '.cbz')
extensions = ('.cbr', '.cbz', '.cb7')
if ofilename.lower().endswith(extensions):
path, ext = os.path.splitext(ofilename)
@ -574,17 +574,16 @@ def ComicSort(comicorder=None,sequence=None,imported=None):
# if it's on startup, load the sql into a tuple for use to avoid record-locking
i = 0
import logger
#if mylar.DBCHOICE == 'postgresql':
# import db_postgresql as db
# myDB = db.DBConnection()
# comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE ?", [mylar.OS_LANG])
#else:
# import db
# myDB = db.DBConnection()
# comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
import db
myDB = db.DBConnection()
comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
if mylar.DBCHOICE == 'postgresql':
import db_postgresql as db
myDB = db.DBConnection()
oscollate = mylar.OS_LANG + '.UTF8'
logger.info('OS_LANG: ' + oscollate)
comicsort = myDB.select('SELECT * FROM comics ORDER BY ComicSortName')# COLLATE "%s"', [oscollate])
else:
import db
myDB = db.DBConnection()
comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
comicorderlist = []
comicorder = {}
comicidlist = []
@ -1077,17 +1076,15 @@ def havetotals(refreshit=None):
if refreshit is None:
#if mylar.DBCHOICE == 'postgresql':
# import db_postgresql as db
# myDB = db.DBConnection()
# comiclist = myDB.select("SELECT * from comics order by ComicSortName COLLATE ?",[mylar.OS_LANG])
#else:
# import db
# myDB = db.DBConnection()
# comiclist = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE')
import db
myDB = db.DBConnection()
comiclist = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE')
if mylar.DBCHOICE == 'postgresql':
import db_postgresql as db
myDB = db.DBConnection()
print 'here'
comiclist = myDB.select('SELECT * from comics order by ComicSortName')# COLLATE "%s"',[mylar.OS_LANG])
else:
import db
myDB = db.DBConnection()
comiclist = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE')
else:
comiclist = []
myDB = db.DBConnection()
@ -1140,7 +1137,10 @@ def havetotals(refreshit=None):
percent = 0
totalissuess = '?'
if comic['ComicPublished'] is None or comic['ComicPublished'] == '':
if comic['LatestDate'] is None:
logger.warn(comic['ComicName'] + ' has not finished loading. Nulling some values so things display properly until they can populate.')
recentstatus = 'Loading'
elif comic['ComicPublished'] is None or comic['ComicPublished'] == '' or comic['LatestDate'] is None:
recentstatus = 'Unknown'
elif comic['ForceContinuing'] == 1:
recentstatus = 'Continuing'
@ -1481,13 +1481,17 @@ def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None):
if IssueID:
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
if issue is None:
logger.warn('Unable to locate given IssueID within the db.')
return None
issue = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if issue is None:
logger.fdebug('Unable to locate given IssueID within the db. Assuming Issue Title is None.')
return None
else:
issue = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [ComicID, issuedigits(IssueNumber)]).fetchone()
if issue is None:
logger.warn('Unable to locate given IssueID within the db.')
return None
issue = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if issue is None:
logger.fdebug('Unable to locate given IssueID within the db. Assuming Issue Title is None.')
return None
return issue['IssueName']
@ -1530,7 +1534,7 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
import db, logger
myDB = db.DBConnection()
logger.info('duplicate check for ' + filename)
logger.info('[DUPECHECK] Duplicate check for ' + filename)
filesz = os.path.getsize(filename)
if IssueID:
@ -1538,33 +1542,71 @@ def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None):
if dupchk is None:
dupchk = myDB.selectone("SELECT * FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if dupchk is None:
logger.info('Unable to find corresponding Issue within the DB. Do you still have the series on your watchlist?')
logger.info('[DUPECHECK] Unable to find corresponding Issue within the DB. Do you still have the series on your watchlist?')
return
if any( [ dupchk['Status'] == 'Downloaded', dupchk['Status'] == 'Archived' ] ):
logger.info('Existing Status already set to ' + dupchk['Status'])
logger.info('[DUPECHECK] Existing Status already set to ' + dupchk['Status'])
dupsize = dupchk['ComicSize']
cid = []
if dupsize is None:
logger.info('Existing filesize is 0 bytes as I cannot locate the orginal entry - it is probably archived.')
rtnval = "dupe"
logger.info('[DUPECHECK] Existing filesize is 0 bytes as I cannot locate the orginal entry - it is probably archived.')
logger.fdebug('[DUPECHECK] Checking series for unrefreshed series syndrome (USS).')
havechk = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
if havechk:
if havechk['Have'] >= havechk['Total']:
logger.info('[DUPECHECK] Series has invalid issue totals [' + str(havechk['Have']) + '/' + str(havechk['Total']) + '] Attempting to Refresh & continue post-processing this issue.')
cid.append(ComicID)
logger.fdebug('[DUPECHECK] ComicID: ' + str(ComicID))
mylar.updater.dbUpdate(ComicIDList=cid,calledfrom='dupechk')
return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
else:
rtnval = "dupe"
else:
rtnval = "dupe"
else:
logger.info('Existing file :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.')
logger.info('[DUPECHECK] Existing file :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.')
#keywords to force keep / delete
#this will be eventually user-controlled via the GUI once the options are enabled.
if int(dupsize) >= filesz:
logger.info('Existing filesize is greater than : ' + str(filesz) + ' bytes.')
rtnval = "dupe"
elif int(dupsize) == 0:
logger.info('Existing filesize is 0 as I cannot locate the original entry. Will assume it is Archived already.')
if int(dupsize) == 0:
logger.info('[DUPECHECK] Existing filesize is 0 as I cannot locate the original entry. Will assume it is Archived already.')
rtnval = "dupe"
else:
logger.info('Existing filesize is less than : ' + str(filesz) + ' bytes. Checking configuration if I should keep this or not.')
rtnval = "write"
logger.fdebug('[DUPECHECK] Based on duplication preferences I will retain based on : ' + mylar.DUPECONSTRAINT)
if 'cbr' in mylar.DUPECONSTRAINT or 'cbz' in mylar.DUPECONSTRAINT:
if 'cbr' in mylar.DUPECONSTRAINT:
#this has to be configured in config - either retain cbr or cbz.
if dupchk['Location'].endswith('.cbz'):
#keep dupechk['Location']
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in file : ' + dupchk['Location'])
rtnval = "dupe"
else:
#keep filename
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in file : ' + filename)
rtnval = "write"
elif 'cbz' in mylar.DUPECONSTRAINT:
if dupchk['Location'].endswith('.cbr'):
#keep dupchk['Location']
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location'])
rtnval = "dupe"
else:
#keep filename
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename)
rtnval = "write"
if mylar.DUPECONSTRAINT == 'filesize':
if filesz <= dupsize:
logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + filename)
rtnval = "dupe"
else:
logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + dupchk['Location'])
rtnval = "write"
else:
logger.info('Duplication detection returned no hits. This is not a duplicate of anything currently on your watchlist.')
logger.info('[DUPECHECK] Duplication detection returned no hits. This is not a duplicate of anything that I have scanned in as of yet.')
rtnval = "write"
return rtnval

View File

@ -598,6 +598,18 @@ def addComictoDB(comicid,mismatch=None,pullupd=None,imported=None,ogcname=None,c
logger.info('Returning to Future-Check module to complete the add & remove entry.')
return
if imported == 'yes':
logger.info('Successfully imported : ' + comic['ComicName'])
#now that it's moved / renamed ... we remove it from importResults or mark as completed.
results = myDB.select("SELECT * from importresults WHERE ComicName=?", [ogcname])
if results is not None:
for result in results:
controlValue = {"impID": result['impid']}
newValue = {"Status": "Imported",
"ComicID": comicid}
myDB.upsert("importresults", newValue, controlValue)
if calledfrom == 'addbyid':
logger.info('Sucessfully added ' + comic['ComicName'] + ' (' + str(SeriesYear) + ') by directly using the ComicVine ID')
return

View File

@ -269,10 +269,11 @@ def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None)
else:
logger.fdebug("false decimal represent. Chunking to extra word.")
cn = cn + cs[i] + " "
break
issue = cs[i]
logger.fdebug("issue detected : " + str(issue))
idetected = 'yes'
#break
else:
issue = cs[i]
logger.fdebug("issue detected : " + str(issue))
idetected = 'yes'
elif '\#' in cs[i] or decimaldetect == 'yes':
logger.fdebug("issue detected: " + str(cs[i]))

View File

@ -15,12 +15,13 @@
import os
import sys
import logging
#import logging
import traceback
import threading
import platform
import mylar
from logging import handlers
from logging import getLogger, INFO, DEBUG, StreamHandler, Formatter, Handler
from mylar import helpers
@ -29,9 +30,9 @@ FILENAME = 'mylar.log'
MAX_FILES = 5
# Mylar logger
logger = logging.getLogger('mylar')
logger = getLogger('mylar')
class LogListHandler(logging.Handler):
class LogListHandler(Handler):
"""
Log handler for Web UI.
"""
@ -42,6 +43,24 @@ class LogListHandler(logging.Handler):
mylar.LOG_LIST.insert(0, (helpers.now(), message, record.levelname, record.threadName))
def initLogger(verbose=1):
#concurrentLogHandler/0.8.7 (to deal with windows locks)
#since this only happens on windows boxes, if it's nix/mac use the default logger.
if platform.system() == 'Windows':
#set the path to the lib here - just to make sure it can detect cloghandler & portalocker.
import sys
sys.path.append(os.path.join(mylar.PROG_DIR, 'lib'))
try:
from ConcurrentLogHandler.cloghandler import ConcurrentRotatingFileHandler as RFHandler
mylar.LOGTYPE = 'clog'
except ImportError:
mylar.LOGTYPE = 'log'
from logging.handlers import RotatingFileHandler as RFHandler
else:
mylar.LOGTYPE = 'log'
from logging.handlers import RotatingFileHandler as RFHandler
if mylar.MAX_LOGSIZE:
MAX_SIZE = mylar.MAX_LOGSIZE
else:
@ -58,14 +77,14 @@ def initLogger(verbose=1):
# Configure the logger to accept all messages
logger.propagate = False
logger.setLevel(logging.DEBUG)# if verbose == 2 else logging.INFO)
logger.setLevel(DEBUG)# if verbose == 2 else logging.INFO)
# Setup file logger
filename = os.path.join(mylar.LOG_DIR, FILENAME)
file_formatter = logging.Formatter('%(asctime)s - %(levelname)-7s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
file_handler = handlers.RotatingFileHandler(filename, maxBytes=MAX_SIZE, backupCount=MAX_FILES)
file_handler.setLevel(logging.DEBUG)
file_formatter = Formatter('%(asctime)s - %(levelname)-7s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
file_handler = RFHandler(filename, "a", maxBytes=MAX_SIZE, backupCount=MAX_FILES)
file_handler.setLevel(DEBUG)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
@ -79,20 +98,20 @@ def initLogger(verbose=1):
#else:
# loglist_handler.setLevel(logging.INFO)
#--
loglist_handler.setLevel(logging.INFO)
loglist_handler.setLevel(INFO)
logger.addHandler(loglist_handler)
# Setup console logger
if verbose:
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
console_handler = logging.StreamHandler()
console_formatter = Formatter('%(asctime)s - %(levelname)s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
console_handler = StreamHandler()
console_handler.setFormatter(console_formatter)
#print 'verbose is ' + str(verbose)
#if verbose == 2:
# console_handler.setLevel(logging.DEBUG)
#else:
# console_handler.setLevel(logging.INFO)
console_handler.setLevel(logging.INFO)
console_handler.setLevel(INFO)
logger.addHandler(console_handler)

View File

@ -374,7 +374,8 @@ def findComic(name, mode, issue, limityear=None, explicit=None, type=None):
})
#logger.fdebug('year: ' + str(xmlYr) + ' - constraint met: ' + str(xmlTag) + '[' + str(xmlYr) + '] --- 4050-' + str(xmlid))
else:
logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
pass
#logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear))
n+=1
#search results are limited to 100 and by pagination now...let's account for this.
countResults = countResults + 100

View File

@ -51,7 +51,6 @@ def newpull():
#918 - Marvel Comics
#952 - Comics & Graphic Novels
# - Magazines
#print ("titlet: " + str(headt))
findurl_link = headt.findAll('a', href=True)[0]
urlID = findurl_link.findNext(text=True)
issue_link = findurl_link['href']
@ -66,13 +65,13 @@ def newpull():
#logger.fdebug('publisher:' + str(isspublisher))
found_iss = headt.findAll('td')
if "Home/1/1/71/920" in issue_link:
logger.fdebug('Ignoring - menu option.')
#logger.fdebug('Ignoring - menu option.')
return
if "PREVIEWS" in headt:
logger.fdebug('Ignoring: ' + found_iss[0])
#logger.fdebug('Ignoring: ' + found_iss[0])
break
if "MAGAZINES" in headt:
logger.fdebug('End.')
#logger.fdebug('End.')
endthis = True
break
if len(found_iss) > 0:
@ -104,7 +103,7 @@ def newpull():
if pl['publisher'] == oldpub:
exceptln = str(pl['ID']) + "\t" + str(pl['name']) + "\t" + str(pl['price'])
else:
exceptln = pl['publisher']
exceptln = pl['publisher'] + "\n" + str(pl['ID']) + "\t" + str(pl['name']) + "\t" + str(pl['price'])
for lb in breakhtml:
exceptln = re.sub(lb,'', exceptln).strip()

View File

@ -1386,7 +1386,7 @@ def searchforissue(issueid=None, new=False, rsscheck=None):
result = myDB.selectone('SELECT * FROM annuals where IssueID=?', [issueid]).fetchone()
mode = 'want_ann'
if result is None:
logger.info("Unable to locate IssueID - you probably should delete/refresh the series.")
logger.fdebug("Unable to locate IssueID - you probably should delete/refresh the series.")
return
ComicID = result['ComicID']
comic = myDB.selectone('SELECT * FROM comics where ComicID=?', [ComicID]).fetchone()

View File

@ -51,8 +51,8 @@ def dbUpdate(ComicIDList=None, calledfrom=None):
else:
if CV_EXcomicid['variloop'] == '99':
mismatch = "yes"
if ComicID[:1] == "G": threading.Thread(target=importer.GCDimport, args=[ComicID]).start()
else: threading.Thread(target=importer.addComictoDB, args=[ComicID,mismatch]).start()
if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID)
else: importer.addComictoDB(ComicID,mismatch)
else:
if mylar.CV_ONETIMER == 1:
logger.fdebug("CV_OneTimer option enabled...")
@ -624,16 +624,16 @@ def forceRescan(ComicID,archive=None,module=None):
if archive is None:
tmpval = filechecker.listFiles(dir=rescan['ComicLocation'], watchcomic=rescan['ComicName'], Publisher=rescan['ComicPublisher'], AlternateSearch=altnames)
comiccnt = int(tmpval['comiccount'])
logger.info('comiccnt is:' + str(comiccnt))
logger.fdebug(module + 'comiccnt is:' + str(comiccnt))
fca.append(tmpval)
if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation'])) != rescan['ComicLocation']:
logger.info('multiple_dest_dirs:' + mylar.MULTIPLE_DEST_DIRS)
logger.info('dir: ' + rescan['ComicLocation'])
logger.info('os.path.basename: ' + os.path.basename(rescan['ComicLocation']))
logger.fdebug(module + 'multiple_dest_dirs:' + mylar.MULTIPLE_DEST_DIRS)
logger.fdebug(module + 'dir: ' + rescan['ComicLocation'])
logger.fdebug(module + 'os.path.basename: ' + os.path.basename(rescan['ComicLocation']))
pathdir = os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(rescan['ComicLocation']))
logger.info(module + ' Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in :' + pathdir )
tmpv = filechecker.listFiles(dir=pathdir, watchcomic=rescan['ComicName'], Publisher=rescan['ComicPublisher'], AlternateSearch=altnames)
logger.info('tmpv filecount: ' + str(tmpv['comiccount']))
logger.fdebug(module + 'tmpv filecount: ' + str(tmpv['comiccount']))
comiccnt += int(tmpv['comiccount'])
fca.append(tmpv)
else:
@ -712,199 +712,142 @@ def forceRescan(ComicID,archive=None,module=None):
break
temploc= tmpfc['JusttheDigits'].replace('_', ' ')
# temploc = tmpfc['ComicFilename'].replace('_', ' ')
temploc = re.sub('[\#\']', '', temploc)
logger.fdebug(module + ' temploc: ' + str(temploc))
if 'annual' not in temploc.lower():
#remove the extension here
extensions = ('.cbr','.cbz')
extensions = ('.cbr','.cbz','.cb7')
if temploc.lower().endswith(extensions):
logger.fdebug(module + ' Removed extension for issue: ' + str(temploc))
temploc = temploc[:-4]
# deccnt = str(temploc).count('.')
# if deccnt > 1:
#logger.fdebug('decimal counts are :' + str(deccnt))
#if the file is formatted with '.' in place of spaces we need to adjust.
#before replacing - check to see if digits on either side of decimal and if yes, DON'T REMOVE
# occur=1
# prevstart = 0
# digitfound = "no"
# decimalfound = "no"
# tempreconstruct = ''
# while (occur <= deccnt):
# n = occur
# start = temploc.find('.')
# while start >=0 and n > 1:
# start = temploc.find('.', start+len('.'))
# n-=1
# #logger.fdebug('occurance ' + str(occur) + ' of . at position: ' + str(start))
# if temploc[prevstart:start].isdigit():
# if digitfound == "yes":
# #logger.fdebug('this is a decimal, assuming decimal issue.')
# decimalfound = "yes"
# reconst = "." + temploc[prevstart:start] + " "
# else:
# #logger.fdebug('digit detected.')
# digitfound = "yes"
# reconst = temploc[prevstart:start]
# else:
# reconst = temploc[prevstart:start] + " "
# #logger.fdebug('word: ' + reconst)
# tempreconstruct = tempreconstruct + reconst
# #logger.fdebug('tempreconstruct is : ' + tempreconstruct)
# prevstart = (start+1)
# occur+=1
# #logger.fdebug('word: ' + temploc[prevstart:])
# tempreconstruct = tempreconstruct + " " + temploc[prevstart:]
# #logger.fdebug('final filename to use is : ' + str(tempreconstruct))
# temploc = tempreconstruct
#logger.fdebug("checking " + str(temploc))
#fcnew_b4 = shlex.split(str(temploc))
fcnew_af = re.findall('[^\()]+', temploc)
fcnew = shlex.split(fcnew_af[0])
fcn = len(fcnew)
n = 0
while (n <= iscnt):
som = 0
while True:
try:
reiss = reissues[n]
except IndexError:
break
# int_iss, iss_except = helpers.decimal_issue(reiss['Issue_Number'])
int_iss = helpers.issuedigits(reiss['Issue_Number'])
issyear = reiss['IssueDate'][:4]
old_status = reiss['Status']
issname = reiss['IssueName']
#logger.fdebug('integer_issue:' + str(int_iss) + ' ... status: ' + str(old_status))
#if comic in format of "SomeSeries 5(c2c)(2013).cbr" whatever...it'll die.
#can't distinguish the 5(c2c) to tell it's the issue #...
fnd_iss_except = 'None'
#print ("Issue, int_iss, iss_except: " + str(reiss['Issue_Number']) + "," + str(int_iss) + "," + str(iss_except))
while (som < fcn):
#counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
#logger.fdebug('checking word - ' + str(fcnew[som]))
if ".cbr" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbr", "")
elif ".cbz" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace(".cbz", "")
if "(c2c)" in fcnew[som].lower():
fcnew[som] = fcnew[som].replace("(c2c)", " ")
get_issue = shlex.split(str(fcnew[som]))
if fcnew[som] != " ":
fcnew[som] = get_issue[0]
if som+1 < len(fcnew) and len(fcnew[som+1]) == 2:
#print "fcnew[som+1]: " + str(fcnew[som+1])
#print "fcnew[som]: " + str(fcnew[som])
if 'au' in fcnew[som+1].lower():
#if the 'AU' is in 005AU vs 005 AU it will yield different results.
fcnew[som] = fcnew[som] + 'AU'
fcnew[som+1] = '93939999919190933'
logger.info(module + ' AU Detected seperate from issue - combining and continuing')
elif 'ai' in fcnew[som+1].lower():
#if the 'AI' is in 005AI vs 005 AI it will yield different results.
fcnew[som] = fcnew[som] + 'AI'
fcnew[som+1] = '93939999919190933'
logger.info(module + ' AI Detected seperate from issue - combining and continuing')
#sometimes scanners refuse to use spaces between () and lump the issue right at the start
#mylar assumes it's all one word in this case..let's dump the brackets.
fcdigit = helpers.issuedigits(fcnew[som])
fcdigit = helpers.issuedigits(temploc)
#logger.fdebug("fcdigit: " + str(fcdigit))
#logger.fdebug("int_iss: " + str(int_iss))
if int(fcdigit) == int_iss:
logger.fdebug(module + ' [' + str(reiss['IssueID']) + '] Issue match - fcdigit: ' + str(fcdigit) + ' ... int_iss: ' + str(int_iss))
if int(fcdigit) == int_iss:
logger.fdebug(module + ' [' + str(reiss['IssueID']) + '] Issue match - fcdigit: ' + str(fcdigit) + ' ... int_iss: ' + str(int_iss))
if '-' in temploc and temploc.find(reiss['Issue_Number']) > temploc.find('-'):
logger.fdebug(module + ' I have detected a possible Title in the filename')
logger.fdebug(module + ' the issue # has occured after the -, so I assume that it is part of the Title')
break
multiplechk = False
for d in issuedupechk:
if int(d['fcdigit']) == int(fcdigit):
if len(mc_issue) > 1:
logger.fdebug('[Initial Check] multiple check issue detected - more than one issue with identical numbering for series.')
for mi in mc_issue:
if (mi['IssueYear'] in tmpfc['ComicFilename']):# and (int(d['issueyear']) == int(mi['IssueYear'])) and (d['fcdigit'] == mi['Int_IssueNumber']):
logger.fdebug('[Initial Check] detected : ' + str(mi['IssueYear']) + ' within filename.')
multiplechk = False
issuedupe = "no"
break
else:
logger.fdebug('[Initial Check] ' + str(mi['Int_IssueNumber']) + ' - did not detect year within filename - expecting (' + str(mi['IssueYear']) + '). Assuming this is the identical numbered issue.')
multiplechk = True
if multiplechk == False: break
if multiplechk == True:
logger.fdebug(module + ' Duplicate issue detected - not counting this: ' + str(tmpfc['ComicFilename']))
#logger.fdebug(module + ' is a duplicate of ' + d['filename'])
#logger.fdebug('fcdigit:' + str(fcdigit) + ' === dupedigit: ' + str(d['fcdigit']))
issuedupe = "yes"
break
if issuedupe == "no":
foundchk = False
#make sure we are adding the correct issue.
for d in issuedupechk:
if int(d['fcdigit']) == int(fcdigit):
if len(mc_issue) > 1 and multiplechk == False:
#if len(mc_issue) > 1 and multiplechk != False and any d['Int_IssueNumber'] == int_iss for d in mc_issue):
for mi in mc_issue:
logger.fdebug('[DupeCheck]' + str(mi['IssueID']) + ' comparing to ' + str(d['issueid']))
if mi['IssueID'] != d['issueid'] and mi['IssueID'] == reiss['IssueID']:
logger.fdebug('Most likely, I should not be marking this as a dupe.')
if (mi['IssueYear'] in tmpfc['ComicFilename']) and (d['fcdigit'] == mi['Int_IssueNumber']):
logger.fdebug('[DupeCheck] detected : ' + str(mi['IssueYear']) + ' within filename. This is an issue that happens to have duplicate numbering and is acceptable')
foundchk = True
break
else:
logger.fdebug('[DupeCheck] ' + str(mi['Int_IssueNumber']) + ': did not detect year (' + str(mi['IssueYear']) + ').')
foundchk = False
else:
foundchk = True
if foundchk == True:
logger.fdebug('[DupeCheck] This is not a duplicate. foundchk is : ' + str(foundchk))
letitgo = True
break
if foundchk == False:
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + reiss['Issue_Number'] + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + str(isslocation))
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': int(fcdigit),
'filename': tmpfc['ComicFilename'],
'issueyear': issyear,
'issueid': reiss['IssueID']})
break
#else:
# if the issue # matches, but there is no year present - still match.
# determine a way to match on year if present, or no year (currently).
if issuedupe == "yes":
logger.fdebug(module + ' I should break out here because of a dupe.')
if '-' in temploc and temploc.find(reiss['Issue_Number']) > temploc.find('-'):
logger.fdebug(module + ' I have detected a possible Title in the filename')
logger.fdebug(module + ' the issue # has occured after the -, so I assume that it is part of the Title')
break
som+=1
#baseline these to default to normal scanning
multiplechk = False
issuedupe = "no"
foundchk = False
#check here if muliple identical numbering issues exist for the series
if len(mc_issue) > 1:
for mi in mc_issue:
if mi['Int_IssueNumber'] == int_iss:
if mi['IssueID'] == reiss['IssueID']:
logger.fdebug(module + ' IssueID matches to multiple issues : ' + str(mi['IssueID']) + '. Checking dupe.')
logger.fdebug(module + ' miISSUEYEAR: ' + str(mi['IssueYear']) + ' -- issyear : ' + str(issyear))
if any(mi['IssueID'] == d['issueid'] for d in issuedupechk):
logger.fdebug(module + ' IssueID already within dupe. Checking next if available.')
multiplechk = True
break
if (mi['IssueYear'] in tmpfc['ComicFilename']) and (issyear == mi['IssueYear']):
logger.fdebug(module + ' Matched to year within filename : ' + str(issyear))
multiplechk = False
break
else:
logger.fdebug(module + ' Did not match to year within filename : ' + str(issyear))
multiplechk = True
if multiplechk == True:
n+=1
continue
#this will detect duplicate filenames within the same directory.
for di in issuedupechk:
if di['fcdigit'] == fcdigit:
#base off of config - base duplication keep on filesize or file-type (or both)
logger.fdebug('[DUPECHECK] Duplicate issue detected [' + di['filename'] + '] [' + tmpfc['ComicFilename'] + ']')
# mylar.DUPECONSTRAINT = 'filesize' / 'filetype-cbr' / 'filetype-cbz'
logger.fdebug('[DUPECHECK] Based on duplication preferences I will retain based on : ' + mylar.DUPECONSTRAINT)
removedupe = False
if 'cbr' in mylar.DUPECONSTRAINT or 'cbz' in mylar.DUPECONSTRAINT:
if 'cbr' in mylar.DUPECONSTRAINT:
#this has to be configured in config - either retain cbr or cbz.
if tmpfc['ComicFilename'].endswith('.cbz'):
#keep di['filename']
logger.fdebug('[DUPECHECK-CBR PRIORITY] [#' + reiss['Issue_Number'] + '] Retaining currently scanned in file : ' + di['filename'])
issuedupe = "yes"
break
else:
#keep tmpfc['ComicFilename']
logger.fdebug('[DUPECHECK-CBR PRIORITY] [#' + reiss['Issue_Number'] + '] Retaining newly scanned in file : ' + tmpfc['ComicFilename'])
removedupe = True
elif 'cbz' in mylar.DUPECONSTRAINT:
if tmpfc['ComicFilename'].endswith('.cbr'):
#keep di['filename']
logger.fdebug('[DUPECHECK-CBZ PRIORITY] [#' + reiss['Issue_Number'] + '] Retaining currently scanned in filename : ' + di['filename'])
issuedupe = "yes"
break
else:
#keep tmpfc['ComicFilename']
logger.fdebug('[DUPECHECK-CBZ PRIORITY] [#' + reiss['Issue_Number'] + '] Retaining newly scanned in filename : ' + tmpfc['ComicFilename'])
removedupe = True
if mylar.DUPECONSTRAINT == 'filesize':
if tmpfc['ComicSize'] <= di['filesize']:
logger.fdebug('[DUPECHECK-FILESIZE PRIORITY] [#' + reiss['Issue_Number'] + '] Retaining currently scanned in filename : ' + di['filename'])
issuedupe = "yes"
break
else:
logger.fdebug('[DUPECHECK-FILESIZE PRIORITY] [#' + reiss['Issue_Number'] + '] Retaining newly scanned in filename : ' + tmpfc['ComicFilename'])
removedupe = True
if removedupe:
#need to remove the entry from issuedupechk so can add new one.
#tuple(y for y in x if y) for x in a
issuedupe_temp = []
for x in issuedupechk:
if x['filename'] != di['filename']:
issuedupe_temp.append(x)
issuedupechk = issuedupe_temp
foundchk = False
break
if issuedupe == "no":
if foundchk == False:
logger.fdebug(module + ' Matched...issue: ' + rescan['ComicName'] + '#' + reiss['Issue_Number'] + ' --- ' + str(int_iss))
havefiles+=1
haveissue = "yes"
isslocation = str(tmpfc['ComicFilename'])
issSize = str(tmpfc['ComicSize'])
logger.fdebug(module + ' .......filename: ' + str(isslocation))
logger.fdebug(module + ' .......filesize: ' + str(tmpfc['ComicSize']))
# to avoid duplicate issues which screws up the count...let's store the filename issues then
# compare earlier...
issuedupechk.append({'fcdigit': fcdigit,
'filename': tmpfc['ComicFilename'],
'filesize': tmpfc['ComicSize'],
'issueyear': issyear,
'issueid': reiss['IssueID']})
break
if issuedupe == "yes":
logger.fdebug(module + ' I should break out here because of a dupe.')
break
if haveissue == "yes" or issuedupe == "yes": break
n+=1
else:

View File

@ -1,3 +1,4 @@
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
@ -320,12 +321,18 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
addComic.exposed = True
def addbyid(self,comicid):
def addbyid(self,comicid,calledby=None,imported=None,ogcname=None):
mismatch = "no"
logger.info('Attempting to add directly by ComicVineID: ' + str(comicid))
if comicid.startswith('4050-'): comicid = re.sub('4050-','', comicid)
threading.Thread(target=importer.addComictoDB, args=[comicid,mismatch,None]).start()
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
threading.Thread(target=importer.addComictoDB, args=[comicid,mismatch,None,imported,ogcname]).start()
print calledby
if calledby == True or calledby == 'True':
return
elif calledby == 'web-import':
raise cherrypy.HTTPRedirect("importResults")
else:
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
addbyid.exposed = True
def addStoryArc(self, storyarcname, storyarcyear, storyarcpublisher, storyarcissues, arcid, arclist , desc, image):
@ -512,6 +519,9 @@ class WebInterface(object):
myDB.upsert("readinglist", newVals, newCtrl)
#run the Search for Watchlist matches now.
logger.fdebug('Now searching your watchlist for matches belonging to this story arc.')
self.ArcWatchlist(storyarcid)
raise cherrypy.HTTPRedirect("detailReadlist?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
addStoryArc.exposed = True
@ -1177,12 +1187,21 @@ class WebInterface(object):
else:
logger.info(u"Story Arc : " + str(SARC) + " queueing selected issue...")
logger.info(u"IssueArcID : " + str(IssueArcID))
#try to load the issue dates - can now sideload issue details.
dateload = myDB.selectone('SELECT * FROM readinglist WHERE IssueArcID=?', [IssueArcID]).fetchone()
if dateload is None:
IssueDate = None
StoreDate = None
else:
IssueDate = dateload['IssueDate']
StoreDate = dateload['StoreDate']
if ComicYear is None: ComicYear = SeriesYear
logger.info(u"Marking " + ComicName + " " + ComicIssue + " as wanted...")
controlValueDict = {"IssueArcID": IssueArcID}
newStatus = {"Status": "Wanted"}
myDB.upsert("readinglist", newStatus, controlValueDict)
foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=None, IssueDate=None, StoreDate=None, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
foundcom, prov = search.search_init(ComicName=ComicName, IssueNumber=ComicIssue, ComicYear=ComicYear, SeriesYear=None, Publisher=None, IssueDate=IssueDate, StoreDate=StoreDate, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
if foundcom == "yes":
logger.info(u"Downloaded " + ComicName + " #" + ComicIssue + " (" + str(ComicYear) + ")")
#raise cherrypy.HTTPRedirect("readlist")
@ -1383,6 +1402,15 @@ class WebInterface(object):
def pullist(self):
myDB = db.DBConnection()
autowants = myDB.select("SELECT * FROM futureupcoming WHERE Status='Wanted'")
autowant = []
if autowants:
for aw in autowants:
autowant.append({"ComicName": aw['ComicName'],
"IssueNumber": aw['IssueNumber'],
"Publisher": aw['Publisher'],
"Status": aw['Status'],
"DisplayComicName": aw['DisplayComicName']})
weeklyresults = []
wantedcount = 0
popit = myDB.select("SELECT * FROM sqlite_master WHERE name='weekly' and type='table'")
@ -1397,12 +1425,32 @@ class WebInterface(object):
x = weekly['ISSUE']
if x is not None:
weeklyresults.append({
if not autowant:
weeklyresults.append({
"PUBLISHER" : weekly['PUBLISHER'],
"ISSUE" : weekly['ISSUE'],
"COMIC" : weekly['COMIC'],
"STATUS" : weekly['STATUS']
"STATUS" : weekly['STATUS'],
"AUTOWANT" : False
})
else:
if any(x['ComicName'].lower() == weekly['COMIC'].lower() for x in autowant):
weeklyresults.append({
"PUBLISHER" : weekly['PUBLISHER'],
"ISSUE" : weekly['ISSUE'],
"COMIC" : weekly['COMIC'],
"STATUS" : weekly['STATUS'],
"AUTOWANT" : True
})
else:
weeklyresults.append({
"PUBLISHER" : weekly['PUBLISHER'],
"ISSUE" : weekly['ISSUE'],
"COMIC" : weekly['COMIC'],
"STATUS" : weekly['STATUS'],
"AUTOWANT" : False
})
if weekly['STATUS'] == 'Wanted':
wantedcount +=1
@ -1811,7 +1859,7 @@ class WebInterface(object):
comic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone()
comicdir = comic['ComicLocation']
comicname = comic['ComicName']
extensions = ('.cbr', '.cbz')
extensions = ('.cbr', '.cbz', '.cb7')
issues = myDB.select("SELECT * FROM issues WHERE ComicID=?", [comicid])
if mylar.ANNUALS_ON:
issues += myDB.select("SELECT * FROM annuals WHERE ComicID=?", [comicid])
@ -1986,6 +2034,9 @@ class WebInterface(object):
logger.info("Removed " + str(IssueID) + " from Reading List")
elif StoryArcID:
myDB.action('DELETE from readinglist WHERE StoryArcID=?', [StoryArcID])
stid = 'S' + str(StoryArcID) + '_%'
#delete from the nzblog so it will always find the most current downloads. Nzblog has issueid, but starts with ArcID
myDB.action('DELETE from nzblog WHERE IssueID LIKE ?', [stid])
logger.info("Removed " + str(StoryArcID) + " from Story Arcs.")
elif IssueArcID:
myDB.action('DELETE from readinglist WHERE IssueArcID=?', [IssueArcID])
@ -2239,11 +2290,15 @@ class WebInterface(object):
#cycle through the story arcs here for matches on the watchlist
if sarc_title != arc['storyarc']:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['storyarc'])
if mylar.STORYARCDIR:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['storyarc'])
else:
dstloc = os.path.join(mylar.DESTINATION_DIR, mylar.GRABBAG_DIR)
if os.path.isdir(dstloc):
logger.info('Validating Directory (' + dstloc + '). Already exists! Continuing...')
else:
logger.fdebug('Updated Directory doesn not exist! - attempting to create now.')
logger.fdebug('Updated Directory does not exist! - attempting to create now.')
filechecker.validateAndCreateDirectory(dstloc, True)
@ -2269,8 +2324,8 @@ class WebInterface(object):
isschk = myDB.selectone("SELECT * FROM issues WHERE Issue_Number=? AND ComicID=?", [str(GCDissue), comic['ComicID']]).fetchone()
else:
issue_int = helpers.issuedigits(arc['IssueNumber'])
logger.info('int_issue = ' + str(issue_int))
isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=?", [issue_int, comic['ComicID']]).fetchone()
logger.fdebug('int_issue = ' + str(issue_int))
isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=? AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone()
if isschk is None:
logger.fdebug("we matched on name, but issue " + str(arc['IssueNumber']) + " doesn't exist for " + comic['ComicName'])
else:
@ -2306,7 +2361,10 @@ class WebInterface(object):
"IssueNumber": arc['IssueNumber'],
"IssueYear": arc['IssueYear']})
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['storyarc'])
if mylar.STORYARCDIR:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', arc['storyarc'])
else:
dstloc = mylar.GRABBAG_DIR
logger.fdebug('destination location set to : ' + dstloc)
filechk = filechecker.listFiles(dstloc, arc['ComicName'], Publisher=None, sarc='true')
@ -2368,27 +2426,32 @@ class WebInterface(object):
if issue['Status'] == 'Downloaded':
issloc = os.path.join(m_arc['match_filedirectory'], issue['Location'])
logger.fdebug('source location set to : ' + issloc)
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', m_arc['match_storyarc'])
if mylar.STORYARCDIR:
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', m_arc['match_storyarc'])
else:
dstloc = mylar.GRABBAG_DIR
logger.fdebug('destination location set to : ' + dstloc)
logger.fdebug('attempting to copy into StoryArc directory')
#copy into StoryArc directory...
if os.path.isfile(issloc):
if not os.path.isfile(dstloc):
if mylar.READ2FILENAME:
readorder = helpers.renamefile_readingorder(m_arc['match_readingorder'])
dfilename = str(readorder) + "-" + issue['Location']
if mylar.COPY2ARCDIR:
logger.fdebug('attempting to copy into StoryArc directory')
#copy into StoryArc directory...
if os.path.isfile(issloc):
if not os.path.isfile(dstloc):
if mylar.READ2FILENAME:
readorder = helpers.renamefile_readingorder(m_arc['match_readingorder'])
dfilename = str(readorder) + "-" + issue['Location']
else:
dfilename = issue['Location']
dstloc = os.path.join(dstloc, dfilename)
logger.fdebug('copying ' + issloc + ' to ' + dstloc)
shutil.copy(issloc, dstloc)
else:
dfilename = issue['Location']
dstloc = os.path.join(mylar.DESTINATION_DIR, 'StoryArcs', m_arc['match_storyarc'], dfilename)
logger.fdebug('copying ' + issloc + ' to ' + dstloc)
shutil.copy(issloc, dstloc)
logger.fdebug('destination file exists: ' + dstloc)
else:
logger.fdebug('destination file exists: ' + dstloc)
else:
logger.fdebug('source file does not exist: ' + issloc)
logger.fdebug('source file does not exist: ' + issloc)
else:
logger.fdebug("We don't have " + issue['ComicName'] + " :# " + str(issue['Issue_Number']))
@ -2398,19 +2461,20 @@ class WebInterface(object):
myDB.upsert("readinglist",newVal,ctrlVal)
logger.info("Marked " + issue['ComicName'] + " :# " + str(issue['Issue_Number']) + " as Wanted.")
return
ArcWatchlist.exposed = True
def ReadGetWanted(self, StoryArcID):
# this will queue up (ie. make 'Wanted') issues in a given Story Arc that are 'Not Watched'
print StoryArcID
#print StoryArcID
stupdate = []
mode = 'story_arc'
myDB = db.DBConnection()
wantedlist = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=? AND Status is Null", [StoryArcID])
if wantedlist is not None:
for want in wantedlist:
print want
#print want
issuechk = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [want['IssueArcID']]).fetchone()
SARC = want['StoryArc']
IssueArcID = want['IssueArcID']
@ -2422,10 +2486,10 @@ class WebInterface(object):
issdate = want['IssueDate']
logger.info("-- NOT a watched series queue.")
logger.info(want['ComicName'] + " -- #" + str(want['IssueNumber']))
logger.info(u"Story Arc : " + str(SARC) + " queueing selected issue...")
logger.info(u"Story Arc : " + str(SARC) + " queueing the selected issue...")
logger.info(u"IssueArcID : " + str(IssueArcID))
logger.info('ComicID: ' + str(s_comicid) + ' --- IssueID: ' + str(s_issueid))
logger.info('StoreDate: ' + str(stdate) + ' --- IssueDate: ' + str(issdate))
logger.info(u"ComicID: " + s_comicid + " --- IssueID: " + s_issueid)
logger.info(u"StoreDate: " + str(stdate) + " --- IssueDate: " + str(issdate))
foundcom, prov = search.search_init(ComicName=want['ComicName'], IssueNumber=want['IssueNumber'], ComicYear=want['IssueYear'], SeriesYear=want['SeriesYear'], Publisher=want['Publisher'], IssueDate=issdate, StoreDate=stdate, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
else:
# it's a watched series
@ -2436,11 +2500,11 @@ class WebInterface(object):
foundcom, prov = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], Publisher=None, IssueDate=None, StoreDate=issuechk['ReleaseDate'], IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID)
if foundcom == "yes":
print "sucessfully found."
logger.fdebug('sucessfully found.')
#update the status - this is necessary for torrents as they are in 'snatched' status.
updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
else:
print "not sucessfully found."
logger.fdebug('not sucessfully found.')
stupdate.append({"Status": "Wanted",
"IssueArcID": IssueArcID,
"IssueID": "None"})
@ -2448,7 +2512,7 @@ class WebInterface(object):
watchlistchk = myDB.select("SELECT * FROM readinglist WHERE StoryArcID=? AND Status='Wanted'", [StoryArcID])
if watchlistchk is not None:
for watchchk in watchlistchk:
print "Watchlist hit - " + str(watchchk['ComicName'])
logger.fdebug('Watchlist hit - ' + str(watchchk['ComicName']))
issuechk = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [watchchk['IssueArcID']]).fetchone()
SARC = watchchk['StoryArc']
IssueArcID = watchchk['IssueArcID']
@ -2458,9 +2522,21 @@ class WebInterface(object):
s_issueid = None
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug(watchchk['ComicName'] + " -- #" + str(watchchk['IssueNumber']))
logger.info(u"Story Arc : " + str(SARC) + " queueing selected issue...")
logger.info(u"Story Arc : " + str(SARC) + " queueing up the selected issue...")
logger.info(u"IssueArcID : " + str(IssueArcID))
foundcom, prov = search.search_init(ComicName=watchchk['ComicName'], IssueNumber=watchchk['IssueNumber'], ComicYear=watchchk['IssueYEAR'], SeriesYear=watchchk['SeriesYear'], Publisher=watchchk['ComicPublisher'], IssueDate=None, StoreDate=None, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
try:
issueyear = watchchk['IssueYEAR']
logger.fdebug('issueYEAR : ' + issueyear)
except:
issueyear = watchchk['StoreDate'][:4]
logger.info('issueyear : ' + str(issueyear))
logger.info('comicname : ' + watchchk['ComicName'])
logger.info('issuenumber : ' + watchchk['IssueNumber'])
logger.info('comicyear : ' + watchchk['SeriesYear'])
logger.info('publisher : ' + watchchk['IssuePublisher'])
logger.info('SARC : ' + SARC)
logger.info('IssueArcID : ' + IssueArcID)
foundcom, prov = search.search_init(ComicName=watchchk['ComicName'], IssueNumber=watchchk['IssueNumber'], ComicYear=issueyear, SeriesYear=watchchk['SeriesYear'], Publisher=watchchk['IssuePublisher'], IssueDate=None, StoreDate=None, IssueID=None, AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
else:
# it's a watched series
s_comicid = issuechk['ComicID']
@ -2469,23 +2545,22 @@ class WebInterface(object):
logger.fdebug(issuechk['ComicName'] + " -- #" + str(issuechk['Issue_Number']))
foundcom,prov = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], Publisher=None, IssueDate=None, StoreDate=issuechk['ReleaseDate'], IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
if foundcom == "yes":
print "sucessfully found."
updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
else:
print "Watchlist issue not sucessfully found."
print "issuearcid: " + str(IssueArcID)
print "issueid: " + str(s_issueid)
logger.fdebug('Watchlist issue not sucessfully found')
logger.fdebug('issuearcid: ' + str(IssueArcID))
logger.fdebug('issueid: ' + str(s_issueid))
stupdate.append({"Status": "Wanted",
"IssueArcID": IssueArcID,
"IssueID": s_issueid})
if len(stupdate) > 0:
print str(len(stupdate)) + " issues need to get updated to Wanted Status"
logger.fdebug(str(len(stupdate)) + ' issues need to get updated to Wanted Status')
for st in stupdate:
ctrlVal = {'IssueArcID': st['IssueArcID']}
newVal = {'Status': st['Status']}
if st['IssueID']:
print "issueid:" + str(st['IssueID'])
logger.fdebug('issueid:' + str(st['IssueID']))
newVal['IssueID'] = st['IssueID']
myDB.upsert("readinglist", newVal, ctrlVal)
ReadGetWanted.exposed = True
@ -2508,12 +2583,18 @@ class WebInterface(object):
ReadMassCopy.exposed = True
def importLog(self, ComicName):
def importLog(self, ComicName, SRID=None):
myDB = db.DBConnection()
impchk = myDB.selectone("SELECT * FROM importresults WHERE ComicName=?", [ComicName]).fetchone()
impchk = None
if SRID != 'None':
impchk = myDB.selectone("SELECT * FROM importresults WHERE SRID=?", [SRID]).fetchone()
if impchk is None:
logger.error('No associated log found for this ID : ' + SRID)
if impchk is None:
logger.error(u"No associated log found for this import : " + ComicName)
return
impchk = myDB.selectone("SELECT * FROM importresults WHERE ComicName=?", [ComicName]).fetchone()
if impchk is None:
logger.error('No associated log found for this ComicName : ' + ComicName)
return
implog = impchk['implog'].replace("\n","<br />\n")
return implog
@ -2735,13 +2816,13 @@ class WebInterface(object):
deleteimport.exposed = True
def preSearchit(self, ComicName, comiclist=None, mimp=0, displaycomic=None):
implog = ''
implog = implog + "imp_rename:" + str(mylar.IMP_RENAME) + "\n"
implog = implog + "imp_move:" + str(mylar.IMP_MOVE) + "\n"
if mimp == 0:
comiclist = []
comiclist.append(ComicName)
for cl in comiclist:
implog = ''
implog = implog + "imp_rename:" + str(mylar.IMP_RENAME) + "\n"
implog = implog + "imp_move:" + str(mylar.IMP_MOVE) + "\n"
ComicName = cl
logger.info('comicname is :' + ComicName)
implog = implog + "comicName: " + str(ComicName) + "\n"
@ -2800,20 +2881,21 @@ class WebInterface(object):
if (result['ComicYear'] not in yearRANGE) or (yearRANGE is None):
if result['ComicYear'] <> "0000":
implog = implog + "adding..." + str(result['ComicYear']) + "\n"
yearRANGE.append(result['ComicYear'])
yearRANGE.append(str(result['ComicYear']))
yearTOP = str(result['ComicYear'])
getiss_num = helpers.issuedigits(getiss)
miniss_num = helpers.issuedigits(str(minISSUE))
startiss_num = helpers.issuedigits(str(startISSUE))
if int(getiss_num) > int(miniss_num):
implog = implog + "issue now set to : " + str(getiss) + " ... it was : " + str(minISSUE) + "\n"
logger.fdebug('Minimum issue now set to : ' + str(getiss) + ' - it was : ' + str(minISSUE))
minISSUE = str(getiss)
if int(getiss_num) < int(startiss_num):
implog = implog + "issue now set to : " + str(getiss) + " ... it was : " + str(startISSUE) + "\n"
logger.fdebug('Start issue now set to : ' + str(getiss) + ' - it was : ' + str(startISSUE))
startISSUE = str(getiss)
if helpers.issuedigits(startISSUE) == 1000: # if it's an issue #1, get the year and assume that's the start.
starttheyear = result['ComicYear']
if helpers.issuedigits(startISSUE) == 1000: # if it's an issue #1, get the year and assume that's the start.
startyear = result['ComicYear']
#taking this outside of the transaction in an attempt to stop db locking.
if mylar.IMP_MOVE and movealreadyonlist == "yes":
@ -2826,8 +2908,12 @@ class WebInterface(object):
#figure out # of issues and the year range allowable
if starttheyear is None:
if yearTOP > 0:
maxyear = int(yearTOP) - (int(minISSUE) / 12)
yearRANGE.append(str(maxyear))
if helpers.int_num(minISSUE) < 1000:
maxyear = int(yearTOP)
else:
maxyear = int(yearTOP) - (int(minISSUE) / 12)
if str(maxyear) not in yearRANGE:
yearRANGE.append(str(maxyear))
implog = implog + "there is a " + str(maxyear) + " year variation based on the 12 issues/year\n"
else:
implog = implog + "no year detected in any issues...Nulling the value\n"
@ -2838,7 +2924,7 @@ class WebInterface(object):
#determine a best-guess to # of issues in series
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = helpers.int_num(minISSUE) - helpers.int_num(startISSUE)
numissues = helpers.int_num(minISSUE) - helpers.int_num(startISSUE) +1 # add 1 to account for one issue itself.
#normally minissue would work if the issue #'s started at #1.
implog = implog + "the years involved are : " + str(yearRANGE) + "\n"
implog = implog + "highest issue # is : " + str(minISSUE) + "\n"
@ -2851,7 +2937,6 @@ class WebInterface(object):
#cnvers = cnsplit[cnwords-1]
ogcname = ComicName
for splitt in cnsplit:
print ("split")
if 'v' in str(splitt):
implog = implog + "possible versioning detected.\n"
if splitt[1:].isdigit():
@ -2863,8 +2948,8 @@ class WebInterface(object):
mode='series'
displaycomic = helpers.filesafe(ComicName)
logger.info('displaycomic : ' + displaycomic)
logger.info('comicname : ' + ComicName)
logger.fdebug('displaycomic : ' + displaycomic)
logger.fdebug('comicname : ' + ComicName)
if yearRANGE is None:
sresults, explicit = mb.findComic(displaycomic, mode, issue=numissues, explicit='all') #ogcname, mode, issue=numissues, explicit='all') #ComicName, mode, issue=numissues)
else:
@ -2874,55 +2959,80 @@ class WebInterface(object):
if len(sresults) == 1:
sr = sresults[0]
implog = implog + "only one result...automagik-mode enabled for " + displaycomic + " :: " + str(sr['comicid']) + "\n"
logger.fdebug("only one result...automagik-mode enabled for " + displaycomic + " :: " + str(sr['comicid']))
resultset = 1
# #need to move the files here.
elif len(sresults) == 0 or len(sresults) is None:
implog = implog + "no results, removing the year from the agenda and re-querying.\n"
logger.fdebug("no results, removing the year from the agenda and re-querying.")
sresults, explicit = mb.findComic(ogcname, mode, issue=numissues, explicit='all') #ComicName, mode, issue=numissues)
if len(sresults) == 1:
sr = sresults[0]
implog = implog + "only one result...automagik-mode enabled for " + displaycomic + " :: " + str(sr['comicid']) + "\n"
logger.fdebug("only one result...automagik-mode enabled for " + displaycomic + " :: " + str(sr['comicid']))
resultset = 1
else:
resultset = 0
else:
implog = implog + "returning results to screen - more than one possibility.\n"
logger.fdebug("Returning results to Select option - more than one possibility, manual intervention required.")
resultset = 0
#write implog to db here.
print "Writing import log to db for viewing pleasure."
ctrlVal = {"ComicName": ComicName}
newVal = {"implog": implog}
myDB.upsert("importresults", newVal, ctrlVal)
#generate random Search Results ID to allow for easier access for viewing logs / search results.
import random
SRID = str(random.randint(100000,999999))
logger.info('sresults: ' + str(sresults))
#write implog to db here.
ctrlVal = {"ComicName": ogcname} #{"ComicName": ComicName}
newVal = {"implog": implog,
"SRID": SRID}
myDB.upsert("importresults", newVal, ctrlVal)
# store the search results for series that returned more than one result for user to select later / when they want.
# should probably assign some random numeric for an id to reference back at some point.
cVal = {"Series": ComicName}
for sr in sresults:
nVal = {"results": len(sresults),
cVal = {"SRID": SRID,
"comicid": sr['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(sresults),
"publisher": sr['publisher'],
"haveit": sr['haveit'],
"name": sr['name'],
"deck": sr['deck'],
"url": sr['url'],
"description": sr['description'],
"comicid": sr['comicid'],
"comicimage": sr['comicimage'],
"issues": sr['issues'],
"comicyear": sr['comicyear']}
myDB.upsert("searchresults", nVal, cVal)
if resultset == 1:
self.addbyid(sr['comicid'], calledby=True, imported='yes', ogcname=ogcname)
#implog = implog + "ogcname -- " + str(ogcname) + "\n"
cresults = self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported='yes',ogcname=ogcname) #imported=comicstoIMP,ogcname=ogcname)
return serve_template(templatename="searchfix.html", title="Error Check", comicname=sr['name'], comicid=sr['comicid'], comicyear=sr['comicyear'], comicimage=sr['comicimage'], comicissues=sr['issues'], cresults=cresults, imported='yes', ogcname=str(ogcname))
#cresults = self.addComic(comicid=sr['comicid'],comicname=sr['name'],comicyear=sr['comicyear'],comicpublisher=sr['publisher'],comicimage=sr['comicimage'],comicissues=sr['issues'],imported='yes',ogcname=ogcname) #imported=comicstoIMP,ogcname=ogcname)
#return serve_template(templatename="searchfix.html", title="Error Check", comicname=sr['name'], comicid=sr['comicid'], comicyear=sr['comicyear'], comicimage=sr['comicimage'], comicissues=sr['issues'], cresults=cresults, imported='yes', ogcname=str(ogcname))
#else:
# return serve_template(templatename="searchresults.html", title='Import Results for: "' + displaycomic + '"',searchresults=sresults, type=type, imported='yes', ogcname=ogcname, name=ogcname, explicit=explicit, serinfo=None) #imported=comicstoIMP, ogcname=ogcname)
#status update.
ctrlVal = {"ComicName": ComicName}
newVal = {"Status": 'Imported',
"SRID": SRID,
"ComicID": sr['comicid']}
myDB.upsert("importresults", newVal, ctrlVal)
preSearchit.exposed = True
def importresults_popup(self,SRID,ComicName,imported=None,ogcname=None):
myDB = db.DBConnection()
results = myDB.select("SELECT * FROM searchresults WHERE SRID=?", [SRID])
if results:
return serve_template(templatename="importresults_popup.html", title="results", searchtext=ComicName, searchresults=results)
else:
logger.warn('There are no search results to view for this entry ' + ComicName + ' [' + str(SRID) + ']. Something is probably wrong.')
return
importresults_popup.exposed = True
def pretty_git(self, br_history):
#in order to 'prettify' the history log for display, we need to break it down so it's line by line.
br_split = br_history.split("\n") #split it on each commit
@ -2962,7 +3072,6 @@ class WebInterface(object):
pretty_git.exposed = True
#---
def config(self):
interface_dir = os.path.join(mylar.PROG_DIR, 'data/interfaces/')
interface_list = [ name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name)) ]
#----
@ -3058,7 +3167,7 @@ class WebInterface(object):
"enable_kat" : helpers.checked(mylar.ENABLE_KAT),
"enable_cbt" : helpers.checked(mylar.ENABLE_CBT),
"cbt_passkey" : mylar.CBT_PASSKEY,
"snatchedtorrent_notify" : mylar.SNATCHEDTORRENT_NOTIFY,
"snatchedtorrent_notify" : helpers.checked(mylar.SNATCHEDTORRENT_NOTIFY),
"destination_dir" : mylar.DESTINATION_DIR,
"create_folders" : helpers.checked(mylar.CREATE_FOLDERS),
"chmod_dir" : mylar.CHMOD_DIR,
@ -3070,6 +3179,7 @@ class WebInterface(object):
"use_maxsize" : helpers.checked(mylar.USE_MAXSIZE),
"maxsize" : mylar.MAXSIZE,
"interface_list" : interface_list,
"dupeconstraint" : mylar.DUPECONSTRAINT,
"autowant_all" : helpers.checked(mylar.AUTOWANT_ALL),
"autowant_upcoming" : helpers.checked(mylar.AUTOWANT_UPCOMING),
"comic_cover_local" : helpers.checked(mylar.COMIC_COVER_LOCAL),
@ -3278,14 +3388,18 @@ class WebInterface(object):
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
comic_config.exposed = True
def readOptions(self, StoryArcID=None, StoryArcName=None, read2filename=0, storyarcdir=0):
def readOptions(self, StoryArcID=None, StoryArcName=None, read2filename=0, storyarcdir=0, copy2arcdir=0):
print 'initial'
print mylar.READ2FILENAME
print mylar.STORYARCDIR
print mylar.COPY2ARCDIR
mylar.READ2FILENAME = int(read2filename)
mylar.STORYARCDIR = int(storyarcdir)
mylar.COPY2ARCDIR = int(copy2arcdir)
print 'after int'
print mylar.READ2FILENAME
print mylar.STORYARCDIR
print mylar.COPY2ARCDIR
mylar.config_write()
#force the check/creation of directory com_location here
@ -3313,7 +3427,7 @@ class WebInterface(object):
prowl_enabled=0, prowl_onsnatch=0, prowl_keys=None, prowl_priority=None, nma_enabled=0, nma_apikey=None, nma_priority=0, nma_onsnatch=0, pushover_enabled=0, pushover_onsnatch=0, pushover_apikey=None, pushover_userkey=None, pushover_priority=None, boxcar_enabled=0, boxcar_onsnatch=0, boxcar_token=None,
pushbullet_enabled=0, pushbullet_apikey=None, pushbullet_deviceid=None, pushbullet_onsnatch=0,
preferred_quality=0, move_files=0, rename_files=0, add_to_csv=1, cvinfo=0, lowercase_filenames=0, folder_format=None, file_format=None, enable_extra_scripts=0, extra_scripts=None, enable_pre_scripts=0, pre_scripts=None, post_processing=0, syno_fix=0, search_delay=None, chmod_dir=0777, chmod_file=0660, cvapifix=0,
tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, **kwargs):
tsab=None, destination_dir=None, create_folders=1, replace_spaces=0, replace_char=None, use_minsize=0, minsize=None, use_maxsize=0, maxsize=None, autowant_all=0, autowant_upcoming=0, comic_cover_local=0, zero_level=0, zero_level_n=None, interface=None, dupeconstraint=None, **kwargs):
mylar.COMICVINE_API = comicvine_api
mylar.HTTP_HOST = http_host
mylar.HTTP_PORT = http_port
@ -3431,6 +3545,7 @@ class WebInterface(object):
mylar.AUTOWANT_UPCOMING = autowant_upcoming
mylar.COMIC_COVER_LOCAL = comic_cover_local
mylar.INTERFACE = interface
mylar.DUPECONSTRAINT = dupeconstraint
mylar.ENABLE_EXTRA_SCRIPTS = enable_extra_scripts
mylar.EXTRA_SCRIPTS = extra_scripts
mylar.ENABLE_PRE_SCRIPTS = enable_pre_scripts