Torznab Name
Torznab Host
Verify SSL
Torznab API
Torznab Category
Enabled
");
var tortestButton = $("
");
var torremoveButton = $("
");
torremoveButton.click(function() {
@@ -2322,10 +2334,11 @@
$(".torznabtest").click(function () {
var torznab = this.attributes["name"].value.replace('torznab_test', '');
- var imagechk = document.getElementById("tornabstatus"+torznab);
+ var imagechk = document.getElementById("torznabstatus"+torznab);
var name = document.getElementById("torznab_name"+torznab).value;
var host = document.getElementById("torznab_host"+torznab).value;
- var apikey = document.getElementById("torznab_api"+torznab).value;
+ var ssl = document.getElementById("torznab_verify"+torznab).checked;
+ var apikey = document.getElementById("torznab_apikey"+torznab).value;
$.get("testtorznab",
{ name: name, host: host, ssl: ssl, apikey: apikey },
function(data){
diff --git a/data/interfaces/default/header.html b/data/interfaces/default/header.html
new file mode 100644
index 00000000..b45a7fd5
--- /dev/null
+++ b/data/interfaces/default/header.html
@@ -0,0 +1,16 @@
+<%page args="jscolor=False"/>
+
+
Mylar WebViewer
+
+
+
+
+
+
+
+
+
+ % if jscolor is True:
+
+ % endif
+
\ No newline at end of file
diff --git a/data/interfaces/default/images/readabook.png b/data/interfaces/default/images/readabook.png
new file mode 100644
index 00000000..441c2349
Binary files /dev/null and b/data/interfaces/default/images/readabook.png differ
diff --git a/data/interfaces/default/manage.html b/data/interfaces/default/manage.html
index 0b1044dc..d07b8d6b 100755
--- a/data/interfaces/default/manage.html
+++ b/data/interfaces/default/manage.html
@@ -32,17 +32,23 @@
%if mylar.IMPORT_STATUS == 'Import completed.':
%else:
%endif
%else:
%endif
@@ -277,7 +283,9 @@
};
function turnitoff() {
CheckEnabled = false;
- clearInterval(ImportTimer);
+ if (typeof ImportTimer !== 'undefined') {
+ clearInterval(ImportTimer);
+ };
};
function turniton() {
if (CheckEnabled == false) {
diff --git a/data/interfaces/default/read.html b/data/interfaces/default/read.html
new file mode 100644
index 00000000..316e6643
--- /dev/null
+++ b/data/interfaces/default/read.html
@@ -0,0 +1,114 @@
+<%!
+ import mylar
+%>
+<%
+ now_page = pages[current_page]
+%>
+
+
+ <%include file="header.html" />
+
+
+
+% if (current_page + 1) == 1:
+
help_outline
+% endif
+
+
+
+
+
+ arrow_back
+
+
+
+
+
+
+ Home
+
+
+ On Page ${current_page + 1} of ${nop} Pages
+
+
+ Close Book
+
+
+ Fit Comic to Height/Width/No Fit
+
+
+ Width
+ Height
+ No Fit
+
+
+
+
+
+ arrow_forward
+
+
+
+
+
+% if size == "wide":
+
+% elif size == "high":
+
+% elif size == "norm":
+
+% else:
+
+% endif:
+
+
+
+
+
+
\ No newline at end of file
diff --git a/data/interfaces/default/weeklypull.html b/data/interfaces/default/weeklypull.html
index cc462308..f840f14b 100755
--- a/data/interfaces/default/weeklypull.html
+++ b/data/interfaces/default/weeklypull.html
@@ -369,6 +369,8 @@
"sInfoFiltered":"(filtered from _MAX_ total issues)",
"sSearch": ""},
"bStateSave": true,
+ "StateSave": true,
+ "StateDuration": 0,
"iDisplayLength": 25,
"sPaginationType": "full_numbers",
"aaSorting": [[0, 'asc']]
diff --git a/data/js/jscolor.min.js b/data/js/jscolor.min.js
new file mode 100644
index 00000000..2a7a788b
--- /dev/null
+++ b/data/js/jscolor.min.js
@@ -0,0 +1,10 @@
+/**
+ * jscolor - JavaScript Color Picker
+ *
+ * @link http://jscolor.com
+ * @license For open source use: GPLv3
+ * For commercial use: JSColor Commercial License
+ * @author Jan Odvarko
+ *
+ * See usage examples at http://jscolor.com/examples/
+ */"use strict";window.jscolor||(window.jscolor=function(){var e={register:function(){e.attachDOMReadyEvent(e.init),e.attachEvent(document,"mousedown",e.onDocumentMouseDown),e.attachEvent(document,"touchstart",e.onDocumentTouchStart),e.attachEvent(window,"resize",e.onWindowResize)},init:function(){e.jscolor.lookupClass&&e.jscolor.installByClassName(e.jscolor.lookupClass)},tryInstallOnElements:function(t,n){var r=new RegExp("(^|\\s)("+n+")(\\s*(\\{[^}]*\\})|\\s|$)","i");for(var i=0;i
s[u]?-r[u]+n[u]+i[u]/2>s[u]/2&&n[u]+i[u]-o[u]>=0?n[u]+i[u]-o[u]:n[u]:n[u],-r[a]+n[a]+i[a]+o[a]-l+l*f>s[a]?-r[a]+n[a]+i[a]/2>s[a]/2&&n[a]+i[a]-l-l*f>=0?n[a]+i[a]-l-l*f:n[a]+i[a]-l+l*f:n[a]+i[a]-l+l*f>=0?n[a]+i[a]-l+l*f:n[a]+i[a]-l-l*f];var h=c[u],p=c[a],d=t.fixed?"fixed":"absolute",v=(c[0]+o[0]>n[0]||c[0]2)switch(e.mode.charAt(2).toLowerCase()){case"s":return"s";case"v":return"v"}return null},onDocumentMouseDown:function(t){t||(t=window.event);var n=t.target||t.srcElement;n._jscLinkedInstance?n._jscLinkedInstance.showOnClick&&n._jscLinkedInstance.show():n._jscControlName?e.onControlPointerStart(t,n,n._jscControlName,"mouse"):e.picker&&e.picker.owner&&e.picker.owner.hide()},onDocumentTouchStart:function(t){t||(t=window.event);var n=t.target||t.srcElement;n._jscLinkedInstance?n._jscLinkedInstance.showOnClick&&n._jscLinkedInstance.show():n._jscControlName?e.onControlPointerStart(t,n,n._jscControlName,"touch"):e.picker&&e.picker.owner&&e.picker.owner.hide()},onWindowResize:function(t){e.redrawPosition()},onParentScroll:function(t){e.picker&&e.picker.owner&&e.picker.owner.hide()},_pointerMoveEvent:{mouse:"mousemove",touch:"touchmove"},_pointerEndEvent:{mouse:"mouseup",touch:"touchend"},_pointerOrigin:null,_capturedTarget:null,onControlPointerStart:function(t,n,r,i){var s=n._jscInstance;e.preventDefault(t),e.captureTarget(n);var o=function(s,o){e.attachGroupEvent("drag",s,e._pointerMoveEvent[i],e.onDocumentPointerMove(t,n,r,i,o)),e.attachGroupEvent("drag",s,e._pointerEndEvent[i],e.onDocumentPointerEnd(t,n,r,i))};o(document,[0,0]);if(window.parent&&window.frameElement){var u=window.frameElement.getBoundingClientRect(),a=[-u.left,-u.top];o(window.parent.window.document,a)}var f=e.getAbsPointerPos(t),l=e.getRelPointerPos(t);e._pointerOrigin={x:f.x-l.x,y:f.y-l.y};switch(r){case"pad":switch(e.getSliderComponent(s)){case"s":s.hsv[1]===0&&s.fromHSV(null,100,null);break;case"v":s.hsv[2]===0&&s.fromHSV(null,null,100)}e.setPad(s,t,0,0);break;case"sld":e.setSld(s,t,0)}e.dispatchFineChange(s)},onDocumentPointerMove:function(t,n,r,i,s){return function(t){var i=n._jscInstance;switch(r){case"pad":t||(t=window.event),e.setPad(i,t,s[0],s[1]),e.dispatchFineChange(i);break;case"sld":t||(t=window.event),e.setSld(i,t,s[1]),e.dispatchFineChange(i)}}},onDocumentPointerEnd:function(t,n,r,i){return function(t){var r=n._jscInstance;e.detachGroupEvents("drag"),e.releaseTarget(),e.dispatchChange(r)}},dispatchChange:function(t){t.valueElement&&e.isElementType(t.valueElement,"input")&&e.fireEvent(t.valueElement,"change")},dispatchFineChange:function(e){if(e.onFineChange){var t;typeof e.onFineChange=="string"?t=new Function(e.onFineChange):t=e.onFineChange,t.call(e)}},setPad:function(t,n,r,i){var s=e.getAbsPointerPos(n),o=r+s.x-e._pointerOrigin.x-t.padding-t.insetWidth,u=i+s.y-e._pointerOrigin.y-t.padding-t.insetWidth,a=o*(360/(t.width-1)),f=100-u*(100/(t.height-1));switch(e.getPadYComponent(t)){case"s":t.fromHSV(a,f,null,e.leaveSld);break;case"v":t.fromHSV(a,null,f,e.leaveSld)}},setSld:function(t,n,r){var i=e.getAbsPointerPos(n),s=r+i.y-e._pointerOrigin.y-t.padding-t.insetWidth,o=100-s*(100/(t.height-1));switch(e.getSliderComponent(t)){case"s":t.fromHSV(null,o,null,e.leavePad);break;case"v":t.fromHSV(null,null,o,e.leavePad)}},_vmlNS:"jsc_vml_",_vmlCSS:"jsc_vml_css_",_vmlReady:!1,initVML:function(){if(!e._vmlReady){var t=document;t.namespaces[e._vmlNS]||t.namespaces.add(e._vmlNS,"urn:schemas-microsoft-com:vml");if(!t.styleSheets[e._vmlCSS]){var n=["shape","shapetype","group","background","path","formulas","handles","fill","stroke","shadow","textbox","textpath","imagedata","line","polyline","curve","rect","roundrect","oval","arc","image"],r=t.createStyleSheet();r.owningElement.id=e._vmlCSS;for(var i=0;i=3&&(s=r[0].match(i))&&(o=r[1].match(i))&&(u=r[2].match(i))){var a=parseFloat((s[1]||"0")+(s[2]||"")),f=parseFloat((o[1]||"0")+(o[2]||"")),l=parseFloat((u[1]||"0")+(u[2]||""));return this.fromRGB(a,f,l,t),!0}}return!1},this.toString=function(){return(256|Math.round(this.rgb[0])).toString(16).substr(1)+(256|Math.round(this.rgb[1])).toString(16).substr(1)+(256|Math.round(this.rgb[2])).toString(16).substr(1)},this.toHEXString=function(){return"#"+this.toString().toUpperCase()},this.toRGBString=function(){return"rgb("+Math.round(this.rgb[0])+","+Math.round(this.rgb[1])+","+Math.round(this.rgb[2])+")"},this.isLight=function(){return.213*this.rgb[0]+.715*this.rgb[1]+.072*this.rgb[2]>127.5},this._processParentElementsInDOM=function(){if(this._linkedElementsProcessed)return;this._linkedElementsProcessed=!0;var t=this.targetElement;do{var n=e.getStyle(t);n&&n.position.toLowerCase()==="fixed"&&(this.fixed=!0),t!==this.targetElement&&(t._jscEventsAttached||(e.attachEvent(t,"scroll",e.onParentScroll),t._jscEventsAttached=!0))}while((t=t.parentNode)&&!e.isElementType(t,"body"))};if(typeof t=="string"){var h=t,p=document.getElementById(h);p?this.targetElement=p:e.warn("Could not find target element with ID '"+h+"'")}else t?this.targetElement=t:e.warn("Invalid target element: '"+t+"'");if(this.targetElement._jscLinkedInstance){e.warn("Cannot link jscolor twice to the same element. Skipping.");return}this.targetElement._jscLinkedInstance=this,this.valueElement=e.fetchElement(this.valueElement),this.styleElement=e.fetchElement(this.styleElement);var d=this,v=this.container?e.fetchElement(this.container):document.getElementsByTagName("body")[0],m=3;if(e.isElementType(this.targetElement,"button"))if(this.targetElement.onclick){var g=this.targetElement.onclick;this.targetElement.onclick=function(e){return g.call(this,e),!1}}else this.targetElement.onclick=function(){return!1};if(this.valueElement&&e.isElementType(this.valueElement,"input")){var y=function(){d.fromString(d.valueElement.value,e.leaveValue),e.dispatchFineChange(d)};e.attachEvent(this.valueElement,"keyup",y),e.attachEvent(this.valueElement,"input",y),e.attachEvent(this.valueElement,"blur",c),this.valueElement.setAttribute("autocomplete","off")}this.styleElement&&(this.styleElement._jscOrigStyle={backgroundImage:this.styleElement.style.backgroundImage,backgroundColor:this.styleElement.style.backgroundColor,color:this.styleElement.style.color}),this.value?this.fromString(this.value)||this.exportColor():this.importColor()}};return e.jscolor.lookupClass="jscolor",e.jscolor.installByClassName=function(t){var n=document.getElementsByTagName("input"),r=document.getElementsByTagName("button");e.tryInstallOnElements(n,t),e.tryInstallOnElements(r,t)},e.register(),e.jscolor}());
\ No newline at end of file
diff --git a/lib/pathlib.py b/lib/pathlib.py
new file mode 100644
index 00000000..65eb76b5
--- /dev/null
+++ b/lib/pathlib.py
@@ -0,0 +1,1279 @@
+import fnmatch
+import functools
+import io
+import ntpath
+import os
+import posixpath
+import re
+import sys
+import time
+from collections import Sequence
+from contextlib import contextmanager
+from errno import EINVAL, ENOENT
+from operator import attrgetter
+from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
+try:
+ from urllib import quote as urlquote, quote as urlquote_from_bytes
+except ImportError:
+ from urllib.parse import quote as urlquote, quote_from_bytes as urlquote_from_bytes
+
+
+try:
+ intern = intern
+except NameError:
+ intern = sys.intern
+try:
+ basestring = basestring
+except NameError:
+ basestring = str
+
+supports_symlinks = True
+try:
+ import nt
+except ImportError:
+ nt = None
+else:
+ if sys.getwindowsversion()[:2] >= (6, 0) and sys.version_info >= (3, 2):
+ from nt import _getfinalpathname
+ else:
+ supports_symlinks = False
+ _getfinalpathname = None
+
+
+__all__ = [
+ "PurePath", "PurePosixPath", "PureWindowsPath",
+ "Path", "PosixPath", "WindowsPath",
+ ]
+
+#
+# Internals
+#
+
+_py2 = sys.version_info < (3,)
+_py2_fs_encoding = 'ascii'
+
+def _py2_fsencode(parts):
+ # py2 => minimal unicode support
+ return [part.encode(_py2_fs_encoding) if isinstance(part, unicode)
+ else part for part in parts]
+
+def _is_wildcard_pattern(pat):
+ # Whether this pattern needs actual matching using fnmatch, or can
+ # be looked up directly as a file.
+ return "*" in pat or "?" in pat or "[" in pat
+
+
+class _Flavour(object):
+ """A flavour implements a particular (platform-specific) set of path
+ semantics."""
+
+ def __init__(self):
+ self.join = self.sep.join
+
+ def parse_parts(self, parts):
+ if _py2:
+ parts = _py2_fsencode(parts)
+ parsed = []
+ sep = self.sep
+ altsep = self.altsep
+ drv = root = ''
+ it = reversed(parts)
+ for part in it:
+ if not part:
+ continue
+ if altsep:
+ part = part.replace(altsep, sep)
+ drv, root, rel = self.splitroot(part)
+ if sep in rel:
+ for x in reversed(rel.split(sep)):
+ if x and x != '.':
+ parsed.append(intern(x))
+ else:
+ if rel and rel != '.':
+ parsed.append(intern(rel))
+ if drv or root:
+ if not drv:
+ # If no drive is present, try to find one in the previous
+ # parts. This makes the result of parsing e.g.
+ # ("C:", "/", "a") reasonably intuitive.
+ for part in it:
+ drv = self.splitroot(part)[0]
+ if drv:
+ break
+ break
+ if drv or root:
+ parsed.append(drv + root)
+ parsed.reverse()
+ return drv, root, parsed
+
+ def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
+ """
+ Join the two paths represented by the respective
+ (drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
+ """
+ if root2:
+ if not drv2 and drv:
+ return drv, root2, [drv + root2] + parts2[1:]
+ elif drv2:
+ if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
+ # Same drive => second path is relative to the first
+ return drv, root, parts + parts2[1:]
+ else:
+ # Second path is non-anchored (common case)
+ return drv, root, parts + parts2
+ return drv2, root2, parts2
+
+
+class _WindowsFlavour(_Flavour):
+ # Reference for Windows paths can be found at
+ # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
+
+ sep = '\\'
+ altsep = '/'
+ has_drv = True
+ pathmod = ntpath
+
+ is_supported = (nt is not None)
+
+ drive_letters = (
+ set(chr(x) for x in range(ord('a'), ord('z') + 1)) |
+ set(chr(x) for x in range(ord('A'), ord('Z') + 1))
+ )
+ ext_namespace_prefix = '\\\\?\\'
+
+ reserved_names = (
+ set(['CON', 'PRN', 'AUX', 'NUL']) |
+ set(['COM%d' % i for i in range(1, 10)]) |
+ set(['LPT%d' % i for i in range(1, 10)])
+ )
+
+ # Interesting findings about extended paths:
+ # - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
+ # but '\\?\c:/a' is not
+ # - extended paths are always absolute; "relative" extended paths will
+ # fail.
+
+ def splitroot(self, part, sep=sep):
+ first = part[0:1]
+ second = part[1:2]
+ if (second == sep and first == sep):
+ # XXX extended paths should also disable the collapsing of "."
+ # components (according to MSDN docs).
+ prefix, part = self._split_extended_path(part)
+ first = part[0:1]
+ second = part[1:2]
+ else:
+ prefix = ''
+ third = part[2:3]
+ if (second == sep and first == sep and third != sep):
+ # is a UNC path:
+ # vvvvvvvvvvvvvvvvvvvvv root
+ # \\machine\mountpoint\directory\etc\...
+ # directory ^^^^^^^^^^^^^^
+ index = part.find(sep, 2)
+ if index != -1:
+ index2 = part.find(sep, index + 1)
+ # a UNC path can't have two slashes in a row
+ # (after the initial two)
+ if index2 != index + 1:
+ if index2 == -1:
+ index2 = len(part)
+ if prefix:
+ return prefix + part[1:index2], sep, part[index2+1:]
+ else:
+ return part[:index2], sep, part[index2+1:]
+ drv = root = ''
+ if second == ':' and first in self.drive_letters:
+ drv = part[:2]
+ part = part[2:]
+ first = third
+ if first == sep:
+ root = first
+ part = part.lstrip(sep)
+ return prefix + drv, root, part
+
+ def casefold(self, s):
+ return s.lower()
+
+ def casefold_parts(self, parts):
+ return [p.lower() for p in parts]
+
+ def resolve(self, path):
+ s = str(path)
+ if not s:
+ return os.getcwd()
+ if _getfinalpathname is not None:
+ return self._ext_to_normal(_getfinalpathname(s))
+ # Means fallback on absolute
+ return None
+
+ def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
+ prefix = ''
+ if s.startswith(ext_prefix):
+ prefix = s[:4]
+ s = s[4:]
+ if s.startswith('UNC\\'):
+ prefix += s[:3]
+ s = '\\' + s[3:]
+ return prefix, s
+
+ def _ext_to_normal(self, s):
+ # Turn back an extended path into a normal DOS-like path
+ return self._split_extended_path(s)[1]
+
+ def is_reserved(self, parts):
+ # NOTE: the rules for reserved names seem somewhat complicated
+ # (e.g. r"..\NUL" is reserved but not r"foo\NUL").
+ # We err on the side of caution and return True for paths which are
+ # not considered reserved by Windows.
+ if not parts:
+ return False
+ if parts[0].startswith('\\\\'):
+ # UNC paths are never reserved
+ return False
+ return parts[-1].partition('.')[0].upper() in self.reserved_names
+
+ def make_uri(self, path):
+ # Under Windows, file URIs use the UTF-8 encoding.
+ drive = path.drive
+ if len(drive) == 2 and drive[1] == ':':
+ # It's a path on a local drive => 'file:///c:/a/b'
+ rest = path.as_posix()[2:].lstrip('/')
+ return 'file:///%s/%s' % (
+ drive, urlquote_from_bytes(rest.encode('utf-8')))
+ else:
+ # It's a path on a network drive => 'file://host/share/a/b'
+ return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
+
+
+class _PosixFlavour(_Flavour):
+ sep = '/'
+ altsep = ''
+ has_drv = False
+ pathmod = posixpath
+
+ is_supported = (os.name != 'nt')
+
+ def splitroot(self, part, sep=sep):
+ if part and part[0] == sep:
+ stripped_part = part.lstrip(sep)
+ # According to POSIX path resolution:
+ # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
+ # "A pathname that begins with two successive slashes may be
+ # interpreted in an implementation-defined manner, although more
+ # than two leading slashes shall be treated as a single slash".
+ if len(part) - len(stripped_part) == 2:
+ return '', sep * 2, stripped_part
+ else:
+ return '', sep, stripped_part
+ else:
+ return '', '', part
+
+ def casefold(self, s):
+ return s
+
+ def casefold_parts(self, parts):
+ return parts
+
+ def resolve(self, path):
+ sep = self.sep
+ accessor = path._accessor
+ seen = {}
+ def _resolve(path, rest):
+ if rest.startswith(sep):
+ path = ''
+
+ for name in rest.split(sep):
+ if not name or name == '.':
+ # current dir
+ continue
+ if name == '..':
+ # parent dir
+ path, _, _ = path.rpartition(sep)
+ continue
+ newpath = path + sep + name
+ if newpath in seen:
+ # Already seen this path
+ path = seen[newpath]
+ if path is not None:
+ # use cached value
+ continue
+ # The symlink is not resolved, so we must have a symlink loop.
+ raise RuntimeError("Symlink loop from %r" % newpath)
+ # Resolve the symbolic link
+ try:
+ target = accessor.readlink(newpath)
+ except OSError as e:
+ if e.errno != EINVAL:
+ raise
+ # Not a symlink
+ path = newpath
+ else:
+ seen[newpath] = None # not resolved symlink
+ path = _resolve(path, target)
+ seen[newpath] = path # resolved symlink
+
+ return path
+ # NOTE: according to POSIX, getcwd() cannot contain path components
+ # which are symlinks.
+ base = '' if path.is_absolute() else os.getcwd()
+ return _resolve(base, str(path)) or sep
+
+ def is_reserved(self, parts):
+ return False
+
+ def make_uri(self, path):
+ # We represent the path using the local filesystem encoding,
+ # for portability to other applications.
+ bpath = bytes(path)
+ return 'file://' + urlquote_from_bytes(bpath)
+
+
+_windows_flavour = _WindowsFlavour()
+_posix_flavour = _PosixFlavour()
+
+
+class _Accessor:
+ """An accessor implements a particular (system-specific or not) way of
+ accessing paths on the filesystem."""
+
+
+class _NormalAccessor(_Accessor):
+
+ def _wrap_strfunc(strfunc):
+ @functools.wraps(strfunc)
+ def wrapped(pathobj, *args):
+ return strfunc(str(pathobj), *args)
+ return staticmethod(wrapped)
+
+ def _wrap_binary_strfunc(strfunc):
+ @functools.wraps(strfunc)
+ def wrapped(pathobjA, pathobjB, *args):
+ return strfunc(str(pathobjA), str(pathobjB), *args)
+ return staticmethod(wrapped)
+
+ stat = _wrap_strfunc(os.stat)
+
+ lstat = _wrap_strfunc(os.lstat)
+
+ open = _wrap_strfunc(os.open)
+
+ listdir = _wrap_strfunc(os.listdir)
+
+ chmod = _wrap_strfunc(os.chmod)
+
+ if hasattr(os, "lchmod"):
+ lchmod = _wrap_strfunc(os.lchmod)
+ else:
+ def lchmod(self, pathobj, mode):
+ raise NotImplementedError("lchmod() not available on this system")
+
+ mkdir = _wrap_strfunc(os.mkdir)
+
+ unlink = _wrap_strfunc(os.unlink)
+
+ rmdir = _wrap_strfunc(os.rmdir)
+
+ rename = _wrap_binary_strfunc(os.rename)
+
+ if sys.version_info >= (3, 3):
+ replace = _wrap_binary_strfunc(os.replace)
+
+ if nt:
+ if supports_symlinks:
+ symlink = _wrap_binary_strfunc(os.symlink)
+ else:
+ def symlink(a, b, target_is_directory):
+ raise NotImplementedError("symlink() not available on this system")
+ else:
+ # Under POSIX, os.symlink() takes two args
+ @staticmethod
+ def symlink(a, b, target_is_directory):
+ return os.symlink(str(a), str(b))
+
+ utime = _wrap_strfunc(os.utime)
+
+ # Helper for resolve()
+ def readlink(self, path):
+ return os.readlink(path)
+
+
+_normal_accessor = _NormalAccessor()
+
+
+#
+# Globbing helpers
+#
+
+@contextmanager
+def _cached(func):
+ try:
+ func.__cached__
+ yield func
+ except AttributeError:
+ cache = {}
+ def wrapper(*args):
+ try:
+ return cache[args]
+ except KeyError:
+ value = cache[args] = func(*args)
+ return value
+ wrapper.__cached__ = True
+ try:
+ yield wrapper
+ finally:
+ cache.clear()
+
+def _make_selector(pattern_parts):
+ pat = pattern_parts[0]
+ child_parts = pattern_parts[1:]
+ if pat == '**':
+ cls = _RecursiveWildcardSelector
+ elif '**' in pat:
+ raise ValueError("Invalid pattern: '**' can only be an entire path component")
+ elif _is_wildcard_pattern(pat):
+ cls = _WildcardSelector
+ else:
+ cls = _PreciseSelector
+ return cls(pat, child_parts)
+
+if hasattr(functools, "lru_cache"):
+ _make_selector = functools.lru_cache()(_make_selector)
+
+
+class _Selector:
+ """A selector matches a specific glob pattern part against the children
+ of a given path."""
+
+ def __init__(self, child_parts):
+ self.child_parts = child_parts
+ if child_parts:
+ self.successor = _make_selector(child_parts)
+ else:
+ self.successor = _TerminatingSelector()
+
+ def select_from(self, parent_path):
+ """Iterate over all child paths of `parent_path` matched by this
+ selector. This can contain parent_path itself."""
+ path_cls = type(parent_path)
+ is_dir = path_cls.is_dir
+ exists = path_cls.exists
+ listdir = parent_path._accessor.listdir
+ return self._select_from(parent_path, is_dir, exists, listdir)
+
+
+class _TerminatingSelector:
+
+ def _select_from(self, parent_path, is_dir, exists, listdir):
+ yield parent_path
+
+
+class _PreciseSelector(_Selector):
+
+ def __init__(self, name, child_parts):
+ self.name = name
+ _Selector.__init__(self, child_parts)
+
+ def _select_from(self, parent_path, is_dir, exists, listdir):
+ if not is_dir(parent_path):
+ return
+ path = parent_path._make_child_relpath(self.name)
+ if exists(path):
+ for p in self.successor._select_from(path, is_dir, exists, listdir):
+ yield p
+
+
+class _WildcardSelector(_Selector):
+
+ def __init__(self, pat, child_parts):
+ self.pat = re.compile(fnmatch.translate(pat))
+ _Selector.__init__(self, child_parts)
+
+ def _select_from(self, parent_path, is_dir, exists, listdir):
+ if not is_dir(parent_path):
+ return
+ cf = parent_path._flavour.casefold
+ for name in listdir(parent_path):
+ casefolded = cf(name)
+ if self.pat.match(casefolded):
+ path = parent_path._make_child_relpath(name)
+ for p in self.successor._select_from(path, is_dir, exists, listdir):
+ yield p
+
+
+class _RecursiveWildcardSelector(_Selector):
+
+ def __init__(self, pat, child_parts):
+ _Selector.__init__(self, child_parts)
+
+ def _iterate_directories(self, parent_path, is_dir, listdir):
+ yield parent_path
+ for name in listdir(parent_path):
+ path = parent_path._make_child_relpath(name)
+ if is_dir(path):
+ for p in self._iterate_directories(path, is_dir, listdir):
+ yield p
+
+ def _select_from(self, parent_path, is_dir, exists, listdir):
+ if not is_dir(parent_path):
+ return
+ with _cached(listdir) as listdir:
+ yielded = set()
+ try:
+ successor_select = self.successor._select_from
+ for starting_point in self._iterate_directories(parent_path, is_dir, listdir):
+ for p in successor_select(starting_point, is_dir, exists, listdir):
+ if p not in yielded:
+ yield p
+ yielded.add(p)
+ finally:
+ yielded.clear()
+
+
+#
+# Public API
+#
+
+class _PathParents(Sequence):
+ """This object provides sequence-like access to the logical ancestors
+ of a path. Don't try to construct it yourself."""
+ __slots__ = ('_pathcls', '_drv', '_root', '_parts')
+
+ def __init__(self, path):
+ # We don't store the instance to avoid reference cycles
+ self._pathcls = type(path)
+ self._drv = path._drv
+ self._root = path._root
+ self._parts = path._parts
+
+ def __len__(self):
+ if self._drv or self._root:
+ return len(self._parts) - 1
+ else:
+ return len(self._parts)
+
+ def __getitem__(self, idx):
+ if idx < 0 or idx >= len(self):
+ raise IndexError(idx)
+ return self._pathcls._from_parsed_parts(self._drv, self._root,
+ self._parts[:-idx - 1])
+
+ def __repr__(self):
+ return "<{0}.parents>".format(self._pathcls.__name__)
+
+
+class PurePath(object):
+ """PurePath represents a filesystem path and offers operations which
+ don't imply any actual filesystem I/O. Depending on your system,
+ instantiating a PurePath will return either a PurePosixPath or a
+ PureWindowsPath object. You can also instantiate either of these classes
+ directly, regardless of your system.
+ """
+ __slots__ = (
+ '_drv', '_root', '_parts',
+ '_str', '_hash', '_pparts', '_cached_cparts',
+ )
+
+ def __new__(cls, *args):
+ """Construct a PurePath from one or several strings and or existing
+ PurePath objects. The strings and path objects are combined so as
+ to yield a canonicalized path, which is incorporated into the
+ new PurePath object.
+ """
+ if cls is PurePath:
+ cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
+ return cls._from_parts(args)
+
+ def __reduce__(self):
+ # Using the parts tuple helps share interned path parts
+ # when pickling related paths.
+ return (self.__class__, tuple(self._parts))
+
+ @classmethod
+ def _parse_args(cls, args):
+ # This is useful when you don't want to create an instance, just
+ # canonicalize some constructor arguments.
+ parts = []
+ for a in args:
+ if isinstance(a, PurePath):
+ parts += a._parts
+ elif isinstance(a, basestring):
+ parts.append(a)
+ else:
+ raise TypeError(
+ "argument should be a path or str object, not %r"
+ % type(a))
+ return cls._flavour.parse_parts(parts)
+
+ @classmethod
+ def _from_parts(cls, args, init=True):
+ # We need to call _parse_args on the instance, so as to get the
+ # right flavour.
+ self = object.__new__(cls)
+ drv, root, parts = self._parse_args(args)
+ self._drv = drv
+ self._root = root
+ self._parts = parts
+ if init:
+ self._init()
+ return self
+
+ @classmethod
+ def _from_parsed_parts(cls, drv, root, parts, init=True):
+ self = object.__new__(cls)
+ self._drv = drv
+ self._root = root
+ self._parts = parts
+ if init:
+ self._init()
+ return self
+
+ @classmethod
+ def _format_parsed_parts(cls, drv, root, parts):
+ if drv or root:
+ return drv + root + cls._flavour.join(parts[1:])
+ else:
+ return cls._flavour.join(parts)
+
+ def _init(self):
+ # Overriden in concrete Path
+ pass
+
+ def _make_child(self, args):
+ drv, root, parts = self._parse_args(args)
+ drv, root, parts = self._flavour.join_parsed_parts(
+ self._drv, self._root, self._parts, drv, root, parts)
+ return self._from_parsed_parts(drv, root, parts)
+
+ def __str__(self):
+ """Return the string representation of the path, suitable for
+ passing to system calls."""
+ try:
+ return self._str
+ except AttributeError:
+ self._str = self._format_parsed_parts(self._drv, self._root,
+ self._parts) or '.'
+ return self._str
+
+ def as_posix(self):
+ """Return the string representation of the path with forward (/)
+ slashes."""
+ f = self._flavour
+ return str(self).replace(f.sep, '/')
+
+ def __bytes__(self):
+ """Return the bytes representation of the path. This is only
+ recommended to use under Unix."""
+ if sys.version_info < (3, 2):
+ raise NotImplementedError("needs Python 3.2 or later")
+ return os.fsencode(str(self))
+
+ def __repr__(self):
+ return "{0}({1!r})".format(self.__class__.__name__, self.as_posix())
+
+ def as_uri(self):
+ """Return the path as a 'file' URI."""
+ if not self.is_absolute():
+ raise ValueError("relative path can't be expressed as a file URI")
+ return self._flavour.make_uri(self)
+
+ @property
+ def _cparts(self):
+ # Cached casefolded parts, for hashing and comparison
+ try:
+ return self._cached_cparts
+ except AttributeError:
+ self._cached_cparts = self._flavour.casefold_parts(self._parts)
+ return self._cached_cparts
+
+ def __eq__(self, other):
+ if not isinstance(other, PurePath):
+ return NotImplemented
+ return self._cparts == other._cparts and self._flavour is other._flavour
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ try:
+ return self._hash
+ except AttributeError:
+ self._hash = hash(tuple(self._cparts))
+ return self._hash
+
+ def __lt__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts < other._cparts
+
+ def __le__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts <= other._cparts
+
+ def __gt__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts > other._cparts
+
+ def __ge__(self, other):
+ if not isinstance(other, PurePath) or self._flavour is not other._flavour:
+ return NotImplemented
+ return self._cparts >= other._cparts
+
+ drive = property(attrgetter('_drv'),
+ doc="""The drive prefix (letter or UNC path), if any.""")
+
+ root = property(attrgetter('_root'),
+ doc="""The root of the path, if any.""")
+
+ @property
+ def anchor(self):
+ """The concatenation of the drive and root, or ''."""
+ anchor = self._drv + self._root
+ return anchor
+
+ @property
+ def name(self):
+ """The final path component, if any."""
+ parts = self._parts
+ if len(parts) == (1 if (self._drv or self._root) else 0):
+ return ''
+ return parts[-1]
+
+ @property
+ def suffix(self):
+ """The final component's last suffix, if any."""
+ name = self.name
+ i = name.rfind('.')
+ if 0 < i < len(name) - 1:
+ return name[i:]
+ else:
+ return ''
+
+ @property
+ def suffixes(self):
+ """A list of the final component's suffixes, if any."""
+ name = self.name
+ if name.endswith('.'):
+ return []
+ name = name.lstrip('.')
+ return ['.' + suffix for suffix in name.split('.')[1:]]
+
+ @property
+ def stem(self):
+ """The final path component, minus its last suffix."""
+ name = self.name
+ i = name.rfind('.')
+ if 0 < i < len(name) - 1:
+ return name[:i]
+ else:
+ return name
+
+ def with_name(self, name):
+ """Return a new path with the file name changed."""
+ if not self.name:
+ raise ValueError("%r has an empty name" % (self,))
+ return self._from_parsed_parts(self._drv, self._root,
+ self._parts[:-1] + [name])
+
+ def with_suffix(self, suffix):
+ """Return a new path with the file suffix changed (or added, if none)."""
+ # XXX if suffix is None, should the current suffix be removed?
+ drv, root, parts = self._flavour.parse_parts((suffix,))
+ if drv or root or len(parts) != 1:
+ raise ValueError("Invalid suffix %r" % (suffix))
+ suffix = parts[0]
+ if not suffix.startswith('.'):
+ raise ValueError("Invalid suffix %r" % (suffix))
+ name = self.name
+ if not name:
+ raise ValueError("%r has an empty name" % (self,))
+ old_suffix = self.suffix
+ if not old_suffix:
+ name = name + suffix
+ else:
+ name = name[:-len(old_suffix)] + suffix
+ return self._from_parsed_parts(self._drv, self._root,
+ self._parts[:-1] + [name])
+
+ def relative_to(self, *other):
+ """Return the relative path to another path identified by the passed
+ arguments. If the operation is not possible (because this is not
+ a subpath of the other path), raise ValueError.
+ """
+ # For the purpose of this method, drive and root are considered
+ # separate parts, i.e.:
+ # Path('c:/').relative_to('c:') gives Path('/')
+ # Path('c:/').relative_to('/') raise ValueError
+ if not other:
+ raise TypeError("need at least one argument")
+ parts = self._parts
+ drv = self._drv
+ root = self._root
+ if root:
+ abs_parts = [drv, root] + parts[1:]
+ else:
+ abs_parts = parts
+ to_drv, to_root, to_parts = self._parse_args(other)
+ if to_root:
+ to_abs_parts = [to_drv, to_root] + to_parts[1:]
+ else:
+ to_abs_parts = to_parts
+ n = len(to_abs_parts)
+ cf = self._flavour.casefold_parts
+ if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
+ formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
+ raise ValueError("{!r} does not start with {!r}"
+ .format(str(self), str(formatted)))
+ return self._from_parsed_parts('', root if n == 1 else '',
+ abs_parts[n:])
+
+ @property
+ def parts(self):
+ """An object providing sequence-like access to the
+ components in the filesystem path."""
+ # We cache the tuple to avoid building a new one each time .parts
+ # is accessed. XXX is this necessary?
+ try:
+ return self._pparts
+ except AttributeError:
+ self._pparts = tuple(self._parts)
+ return self._pparts
+
+ def joinpath(self, *args):
+ """Combine this path with one or several arguments, and return a
+ new path representing either a subpath (if all arguments are relative
+ paths) or a totally different path (if one of the arguments is
+ anchored).
+ """
+ return self._make_child(args)
+
+ def __truediv__(self, key):
+ return self._make_child((key,))
+
+ def __rtruediv__(self, key):
+ return self._from_parts([key] + self._parts)
+
+ if sys.version_info < (3,):
+ __div__ = __truediv__
+ __rdiv__ = __rtruediv__
+
+ @property
+ def parent(self):
+ """The logical parent of the path."""
+ drv = self._drv
+ root = self._root
+ parts = self._parts
+ if len(parts) == 1 and (drv or root):
+ return self
+ return self._from_parsed_parts(drv, root, parts[:-1])
+
+ @property
+ def parents(self):
+ """A sequence of this path's logical parents."""
+ return _PathParents(self)
+
+ def is_absolute(self):
+ """True if the path is absolute (has both a root and, if applicable,
+ a drive)."""
+ if not self._root:
+ return False
+ return not self._flavour.has_drv or bool(self._drv)
+
+ def is_reserved(self):
+ """Return True if the path contains one of the special names reserved
+ by the system, if any."""
+ return self._flavour.is_reserved(self._parts)
+
+ def match(self, path_pattern):
+ """
+ Return True if this path matches the given pattern.
+ """
+ cf = self._flavour.casefold
+ path_pattern = cf(path_pattern)
+ drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
+ if not pat_parts:
+ raise ValueError("empty pattern")
+ if drv and drv != cf(self._drv):
+ return False
+ if root and root != cf(self._root):
+ return False
+ parts = self._cparts
+ if drv or root:
+ if len(pat_parts) != len(parts):
+ return False
+ pat_parts = pat_parts[1:]
+ elif len(pat_parts) > len(parts):
+ return False
+ for part, pat in zip(reversed(parts), reversed(pat_parts)):
+ if not fnmatch.fnmatchcase(part, pat):
+ return False
+ return True
+
+
+class PurePosixPath(PurePath):
+ _flavour = _posix_flavour
+ __slots__ = ()
+
+
+class PureWindowsPath(PurePath):
+ _flavour = _windows_flavour
+ __slots__ = ()
+
+
+# Filesystem-accessing classes
+
+
+class Path(PurePath):
+ __slots__ = (
+ '_accessor',
+ )
+
+ def __new__(cls, *args, **kwargs):
+ if cls is Path:
+ cls = WindowsPath if os.name == 'nt' else PosixPath
+ self = cls._from_parts(args, init=False)
+ if not self._flavour.is_supported:
+ raise NotImplementedError("cannot instantiate %r on your system"
+ % (cls.__name__,))
+ self._init()
+ return self
+
+ def _init(self,
+ # Private non-constructor arguments
+ template=None,
+ ):
+ if template is not None:
+ self._accessor = template._accessor
+ else:
+ self._accessor = _normal_accessor
+
+ def _make_child_relpath(self, part):
+ # This is an optimization used for dir walking. `part` must be
+ # a single part relative to this path.
+ parts = self._parts + [part]
+ return self._from_parsed_parts(self._drv, self._root, parts)
+
+ def _opener(self, name, flags, mode=0o666):
+ # A stub for the opener argument to built-in open()
+ return self._accessor.open(self, flags, mode)
+
+ def _raw_open(self, flags, mode=0o777):
+ """
+ Open the file pointed by this path and return a file descriptor,
+ as os.open() does.
+ """
+ return self._accessor.open(self, flags, mode)
+
+ # Public API
+
+ @classmethod
+ def cwd(cls):
+ """Return a new path pointing to the current working directory
+ (as returned by os.getcwd()).
+ """
+ return cls(os.getcwd())
+
+ def iterdir(self):
+ """Iterate over the files in this directory. Does not yield any
+ result for the special paths '.' and '..'.
+ """
+ for name in self._accessor.listdir(self):
+ if name in ('.', '..'):
+ # Yielding a path object for these makes little sense
+ continue
+ yield self._make_child_relpath(name)
+
+ def glob(self, pattern):
+ """Iterate over this subtree and yield all existing files (of any
+ kind, including directories) matching the given pattern.
+ """
+ pattern = self._flavour.casefold(pattern)
+ drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
+ if drv or root:
+ raise NotImplementedError("Non-relative patterns are unsupported")
+ selector = _make_selector(tuple(pattern_parts))
+ for p in selector.select_from(self):
+ yield p
+
+ def rglob(self, pattern):
+ """Recursively yield all existing files (of any kind, including
+ directories) matching the given pattern, anywhere in this subtree.
+ """
+ pattern = self._flavour.casefold(pattern)
+ drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
+ if drv or root:
+ raise NotImplementedError("Non-relative patterns are unsupported")
+ selector = _make_selector(("**",) + tuple(pattern_parts))
+ for p in selector.select_from(self):
+ yield p
+
+ def absolute(self):
+ """Return an absolute version of this path. This function works
+ even if the path doesn't point to anything.
+
+ No normalization is done, i.e. all '.' and '..' will be kept along.
+ Use resolve() to get the canonical path to a file.
+ """
+ # XXX untested yet!
+ if self.is_absolute():
+ return self
+ # FIXME this must defer to the specific flavour (and, under Windows,
+ # use nt._getfullpathname())
+ obj = self._from_parts([os.getcwd()] + self._parts, init=False)
+ obj._init(template=self)
+ return obj
+
+ def resolve(self):
+ """
+ Make the path absolute, resolving all symlinks on the way and also
+ normalizing it (for example turning slashes into backslashes under
+ Windows).
+ """
+ s = self._flavour.resolve(self)
+ if s is None:
+ # No symlink resolution => for consistency, raise an error if
+ # the path doesn't exist or is forbidden
+ self.stat()
+ s = str(self.absolute())
+ # Now we have no symlinks in the path, it's safe to normalize it.
+ normed = self._flavour.pathmod.normpath(s)
+ obj = self._from_parts((normed,), init=False)
+ obj._init(template=self)
+ return obj
+
+ def stat(self):
+ """
+ Return the result of the stat() system call on this path, like
+ os.stat() does.
+ """
+ return self._accessor.stat(self)
+
+ def owner(self):
+ """
+ Return the login name of the file owner.
+ """
+ import pwd
+ return pwd.getpwuid(self.stat().st_uid).pw_name
+
+ def group(self):
+ """
+ Return the group name of the file gid.
+ """
+ import grp
+ return grp.getgrgid(self.stat().st_gid).gr_name
+
+ def open(self, mode='r', buffering=-1, encoding=None,
+ errors=None, newline=None):
+ """
+ Open the file pointed by this path and return a file object, as
+ the built-in open() function does.
+ """
+ if sys.version_info >= (3, 3):
+ return io.open(str(self), mode, buffering, encoding, errors, newline,
+ opener=self._opener)
+ else:
+ return io.open(str(self), mode, buffering, encoding, errors, newline)
+
+ def touch(self, mode=0o666, exist_ok=True):
+ """
+ Create this file with the given access mode, if it doesn't exist.
+ """
+ if exist_ok:
+ # First try to bump modification time
+ # Implementation note: GNU touch uses the UTIME_NOW option of
+ # the utimensat() / futimens() functions.
+ t = time.time()
+ try:
+ self._accessor.utime(self, (t, t))
+ except OSError:
+ # Avoid exception chaining
+ pass
+ else:
+ return
+ flags = os.O_CREAT | os.O_WRONLY
+ if not exist_ok:
+ flags |= os.O_EXCL
+ fd = self._raw_open(flags, mode)
+ os.close(fd)
+
+ def mkdir(self, mode=0o777, parents=False):
+ if not parents:
+ self._accessor.mkdir(self, mode)
+ else:
+ try:
+ self._accessor.mkdir(self, mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ self.parent.mkdir(parents=True)
+ self._accessor.mkdir(self, mode)
+
+ def chmod(self, mode):
+ """
+ Change the permissions of the path, like os.chmod().
+ """
+ self._accessor.chmod(self, mode)
+
+ def lchmod(self, mode):
+ """
+ Like chmod(), except if the path points to a symlink, the symlink's
+ permissions are changed, rather than its target's.
+ """
+ self._accessor.lchmod(self, mode)
+
+ def unlink(self):
+ """
+ Remove this file or link.
+ If the path is a directory, use rmdir() instead.
+ """
+ self._accessor.unlink(self)
+
+ def rmdir(self):
+ """
+ Remove this directory. The directory must be empty.
+ """
+ self._accessor.rmdir(self)
+
+ def lstat(self):
+ """
+ Like stat(), except if the path points to a symlink, the symlink's
+ status information is returned, rather than its target's.
+ """
+ return self._accessor.lstat(self)
+
+ def rename(self, target):
+ """
+ Rename this path to the given path.
+ """
+ self._accessor.rename(self, target)
+
+ def replace(self, target):
+ """
+ Rename this path to the given path, clobbering the existing
+ destination if it exists.
+ """
+ if sys.version_info < (3, 3):
+ raise NotImplementedError("replace() is only available "
+ "with Python 3.3 and later")
+ self._accessor.replace(self, target)
+
+ def symlink_to(self, target, target_is_directory=False):
+ """
+ Make this path a symlink pointing to the given path.
+ Note the order of arguments (self, target) is the reverse of os.symlink's.
+ """
+ self._accessor.symlink(target, self, target_is_directory)
+
+ # Convenience functions for querying the stat results
+
+ def exists(self):
+ """
+ Whether this path exists.
+ """
+ try:
+ self.stat()
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ return False
+ return True
+
+ def is_dir(self):
+ """
+ Whether this path is a directory.
+ """
+ try:
+ return S_ISDIR(self.stat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+ return False
+
+ def is_file(self):
+ """
+ Whether this path is a regular file (also True for symlinks pointing
+ to regular files).
+ """
+ try:
+ return S_ISREG(self.stat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+ return False
+
+ def is_symlink(self):
+ """
+ Whether this path is a symbolic link.
+ """
+ try:
+ return S_ISLNK(self.lstat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist
+ return False
+
+ def is_block_device(self):
+ """
+ Whether this path is a block device.
+ """
+ try:
+ return S_ISBLK(self.stat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+ return False
+
+ def is_char_device(self):
+ """
+ Whether this path is a character device.
+ """
+ try:
+ return S_ISCHR(self.stat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+ return False
+
+ def is_fifo(self):
+ """
+ Whether this path is a FIFO.
+ """
+ try:
+ return S_ISFIFO(self.stat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+ return False
+
+ def is_socket(self):
+ """
+ Whether this path is a socket.
+ """
+ try:
+ return S_ISSOCK(self.stat().st_mode)
+ except OSError as e:
+ if e.errno != ENOENT:
+ raise
+ # Path doesn't exist or is a broken symlink
+ # (see https://bitbucket.org/pitrou/pathlib/issue/12/)
+ return False
+
+
+class PosixPath(Path, PurePosixPath):
+ __slots__ = ()
+
+class WindowsPath(Path, PureWindowsPath):
+ __slots__ = ()
diff --git a/mylar/PostProcessor.py b/mylar/PostProcessor.py
index a0b29ac9..0777d9eb 100755
--- a/mylar/PostProcessor.py
+++ b/mylar/PostProcessor.py
@@ -1942,8 +1942,12 @@ class PostProcessor(object):
logger.info('%s Post-Processing completed for: [ %s #%s ] %s' % (module, comicname, issuenumber, grab_dst))
self._log(u"Post Processing SUCCESSFUL! ")
+ imageUrl = myDB.select('SELECT ImageURL from issues WHERE IssueID=?', [issueid])
+ if imageUrl:
+ imageUrl = imageUrl[0][0]
+
try:
- self.sendnotify(comicname, issueyear=None, issuenumOG=issuenumber, annchk=annchk, module=module)
+ self.sendnotify(comicname, issueyear=None, issuenumOG=issuenumber, annchk=annchk, module=module, imageUrl=imageUrl)
except:
pass
@@ -2521,7 +2525,7 @@ class PostProcessor(object):
rem_issueid = nfilename[xyb+3:yyb]
logger.fdebug('issueid: %s' % rem_issueid)
nfilename = '%s %s'.strip() % (nfilename[:xyb], nfilename[yyb+3:])
- logger.fdebug('issueid information [%s] removed successsfully: %s' % (rem_issueid, nfilename))
+ logger.fdebug('issueid information [%s] removed successfully: %s' % (rem_issueid, nfilename))
self._log("New Filename: %s" % nfilename)
logger.fdebug('%s New Filename: %s' % (module, nfilename))
@@ -2771,7 +2775,11 @@ class PostProcessor(object):
# self.sendnotify(series, issueyear, dispiss, annchk, module)
# return self.queue.put(self.valreturn)
- self.sendnotify(series, issueyear, dispiss, annchk, module)
+ imageUrl = myDB.select('SELECT ImageURL from issues WHERE IssueID=?', [issueid])
+ if imageUrl:
+ imageUrl = imageUrl[0][0]
+
+ self.sendnotify(series, issueyear, dispiss, annchk, module, imageUrl)
logger.info('%s Post-Processing completed for: %s %s' % (module, series, dispiss))
self._log(u"Post Processing SUCCESSFUL! ")
@@ -2784,7 +2792,7 @@ class PostProcessor(object):
return self.queue.put(self.valreturn)
- def sendnotify(self, series, issueyear, issuenumOG, annchk, module):
+ def sendnotify(self, series, issueyear, issuenumOG, annchk, module, imageUrl):
if issueyear is None:
prline = '%s %s' % (series, issuenumOG)
@@ -2812,7 +2820,7 @@ class PostProcessor(object):
if mylar.CONFIG.TELEGRAM_ENABLED:
telegram = notifiers.TELEGRAM()
- telegram.notify(prline2)
+ telegram.notify(prline2, imageUrl)
if mylar.CONFIG.SLACK_ENABLED:
slack = notifiers.SLACK()
diff --git a/mylar/config.py b/mylar/config.py
index 41accffc..33361821 100644
--- a/mylar/config.py
+++ b/mylar/config.py
@@ -11,6 +11,7 @@ import re
import ConfigParser
import mylar
from mylar import logger, helpers, encrypted
+import errno
config = ConfigParser.SafeConfigParser()
@@ -368,6 +369,7 @@ _CONFIG_DEFINITIONS = OrderedDict({
'OPDS_USERNAME': (str, 'OPDS', None),
'OPDS_PASSWORD': (str, 'OPDS', None),
'OPDS_METAINFO': (bool, 'OPDS', False),
+ 'OPDS_PAGESIZE': (int, 'OPDS', 30),
})
@@ -555,7 +557,7 @@ class Config(object):
if self.CONFIG_VERSION < 8:
print('Checking for existing torznab configuration...')
if not any([self.TORZNAB_NAME is None, self.TORZNAB_HOST is None, self.TORZNAB_APIKEY is None, self.TORZNAB_CATEGORY is None]):
- torznabs =[(self.TORZNAB_NAME, self.TORZNAB_HOST, self.TORZNAB_APIKEY, self.TORZNAB_CATEGORY, str(int(self.ENABLE_TORZNAB)))]
+ torznabs =[(self.TORZNAB_NAME, self.TORZNAB_HOST, self.TORZNAB_VERIFY, self.TORZNAB_APIKEY, self.TORZNAB_CATEGORY, str(int(self.ENABLE_TORZNAB)))]
setattr(self, 'EXTRA_TORZNABS', torznabs)
config.set('Torznab', 'EXTRA_TORZNABS', str(torznabs))
print('Successfully converted existing torznab for multiple configuration allowance. Removing old references.')
@@ -563,9 +565,9 @@ class Config(object):
print('No existing torznab configuration found. Just removing config references at this point..')
config.remove_option('Torznab', 'torznab_name')
config.remove_option('Torznab', 'torznab_host')
+ config.remove_option('Torznab', 'torznab_verify')
config.remove_option('Torznab', 'torznab_apikey')
config.remove_option('Torznab', 'torznab_category')
- config.remove_option('Torznab', 'torznab_verify')
print('Successfully removed outdated config entries.')
if self.newconfig < 9:
#rejig rtorrent settings due to change.
@@ -1111,7 +1113,7 @@ class Config(object):
return extra_newznabs
def get_extra_torznabs(self):
- extra_torznabs = zip(*[iter(self.EXTRA_TORZNABS.split(', '))]*5)
+ extra_torznabs = zip(*[iter(self.EXTRA_TORZNABS.split(', '))]*6)
return extra_torznabs
def provider_sequence(self):
@@ -1154,7 +1156,7 @@ class Config(object):
if self.ENABLE_TORZNAB:
for ets in self.EXTRA_TORZNABS:
- if str(ets[4]) == '1': # if torznabs are enabled
+ if str(ets[5]) == '1': # if torznabs are enabled
if ets[0] == "":
et_name = ets[1]
else:
diff --git a/mylar/filechecker.py b/mylar/filechecker.py
index 0fcffe67..85dab904 100755
--- a/mylar/filechecker.py
+++ b/mylar/filechecker.py
@@ -272,6 +272,11 @@ class FileChecker(object):
logger.fdebug('[SARC] Removed Reading Order sequence from subname. Now set to : %s' % modfilename)
#make sure all the brackets are properly spaced apart
+ if modfilename.find('\s') == -1:
+ #if no spaces exist, assume decimals being used as spacers (ie. nzb name)
+ modspacer = '.'
+ else:
+ modspacer = ' '
m = re.findall('[^()]+', modfilename)
cnt = 1
#2019-12-24----fixed to accomodate naming convention like Amazing Mary Jane (2019) 002.cbr, and to account for brackets properly
@@ -279,10 +284,10 @@ class FileChecker(object):
while cnt < len(m):
#logger.fdebug('[m=%s] modfilename.find: %s' % (m[cnt], modfilename[modfilename.find('('+m[cnt]+')')+len(m[cnt])+2]))
#logger.fdebug('mod_1: %s' % modfilename.find('('+m[cnt]+')'))
- if modfilename[modfilename.find('('+m[cnt]+')')-1] != ' ' and modfilename.find('('+m[cnt]+')') != -1:
+ if modfilename[modfilename.find('('+m[cnt]+')')-1] != modspacer and modfilename.find('('+m[cnt]+')') != -1:
#logger.fdebug('before_space: %s' % modfilename[modfilename.find('('+m[cnt]+')')-1])
#logger.fdebug('after_space: %s' % modfilename[modfilename.find('('+m[cnt]+')')+len(m[cnt])+2])
- modfilename = '%s%s%s' % (modfilename[:modfilename.find('('+m[cnt]+')')], ' ', modfilename[modfilename.find('('+m[cnt]+')'):])
+ modfilename = '%s%s%s' % (modfilename[:modfilename.find('('+m[cnt]+')')], modspacer, modfilename[modfilename.find('('+m[cnt]+')'):])
cnt+=1
except Exception as e:
#logger.warn('[ERROR] %s' % e)
@@ -335,7 +340,7 @@ class FileChecker(object):
issueid = modfilename[x+3:y]
logger.fdebug('issueid: %s' % issueid)
modfilename = '%s %s'.strip() % (modfilename[:x], modfilename[y+3:])
- logger.fdebug('issueid %s removed successsfully: %s' % (issueid, modfilename))
+ logger.fdebug('issueid %s removed successfully: %s' % (issueid, modfilename))
#here we take a snapshot of the current modfilename, the intent is that we will remove characters that match
#as we discover them - namely volume, issue #, years, etc
@@ -373,13 +378,14 @@ class FileChecker(object):
ret_sf1 = ' '.join(sf)
#here we should account for some characters that get stripped out due to the regex's
- #namely, unique characters - known so far: +, &
+ #namely, unique characters - known so far: +, &, @
#c11 = '\+'
#f11 = '\&'
#g11 = '\''
ret_sf1 = re.sub('\+', 'c11', ret_sf1).strip()
ret_sf1 = re.sub('\&', 'f11', ret_sf1).strip()
ret_sf1 = re.sub('\'', 'g11', ret_sf1).strip()
+ ret_sf1 = re.sub('\@', 'h11', ret_sf1).strip()
#split_file = re.findall('(?imu)\([\w\s-]+\)|[-+]?\d*\.\d+|\d+[\s]COVERS+|\d{4}-\d{2}-\d{2}|\d+[(th|nd|rd|st)]+|\d+|[\w-]+|#?\d\.\d+|#[\.-]\w+|#[\d*\.\d+|\w+\d+]+|#(? 1:
# if we matched on more than one series above, just save those results instead of the entire search result set.
for sres in search_matches:
+ if type(sres['haveit']) == dict:
+ imp_cid = sres['haveit']['comicid']
+ else:
+ imp_cid = sres['haveit']
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(search_matches),
"publisher": sres['publisher'],
- "haveit": sres['haveit'],
+ "haveit": imp_cid,
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
@@ -4707,6 +4711,7 @@ class WebInterface(object):
"issues": sres['issues'],
"ogcname": ogcname,
"comicyear": sres['comicyear']}
+ #logger.fdebug('search_values: [%s]/%s' % (cVal, nVal))
myDB.upsert("searchresults", nVal, cVal)
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
@@ -4718,13 +4723,17 @@ class WebInterface(object):
# store the search results for series that returned more than one result for user to select later / when they want.
# should probably assign some random numeric for an id to reference back at some point.
for sres in sresults:
+ if type(sres['haveit']) == dict:
+ imp_cid = sres['haveit']['comicid']
+ else:
+ imp_cid = sres['haveit']
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(sresults),
"publisher": sres['publisher'],
- "haveit": sres['haveit'],
+ "haveit": imp_cid,
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
@@ -5008,7 +5017,7 @@ class WebInterface(object):
"dognzb_verify": helpers.checked(mylar.CONFIG.DOGNZB_VERIFY),
"experimental": helpers.checked(mylar.CONFIG.EXPERIMENTAL),
"enable_torznab": helpers.checked(mylar.CONFIG.ENABLE_TORZNAB),
- "extra_torznabs": sorted(mylar.CONFIG.EXTRA_TORZNABS, key=itemgetter(4), reverse=True),
+ "extra_torznabs": sorted(mylar.CONFIG.EXTRA_TORZNABS, key=itemgetter(5), reverse=True),
"newznab": helpers.checked(mylar.CONFIG.NEWZNAB),
"extra_newznabs": sorted(mylar.CONFIG.EXTRA_NEWZNABS, key=itemgetter(5), reverse=True),
"enable_ddl": helpers.checked(mylar.CONFIG.ENABLE_DDL),
@@ -5140,6 +5149,7 @@ class WebInterface(object):
"opds_username": mylar.CONFIG.OPDS_USERNAME,
"opds_password": mylar.CONFIG.OPDS_PASSWORD,
"opds_metainfo": helpers.checked(mylar.CONFIG.OPDS_METAINFO),
+ "opds_pagesize": mylar.CONFIG.OPDS_PAGESIZE,
"dlstats": dlprovstats,
"dltotals": freq_tot,
"alphaindex": mylar.CONFIG.ALPHAINDEX
@@ -5286,28 +5296,41 @@ class WebInterface(object):
newValues['AlternateFileName'] = str(alt_filename)
#force the check/creation of directory com_location here
+ updatedir = True
if any([mylar.CONFIG.CREATE_FOLDERS is True, os.path.isdir(orig_location)]):
if os.path.isdir(str(com_location)):
logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
else:
- if orig_location != com_location:
+ if orig_location != com_location and os.path.isdir(orig_location) is True:
logger.fdebug('Renaming existing location [%s] to new location: %s' % (orig_location, com_location))
try:
os.rename(orig_location, com_location)
except Exception as e:
- logger.warn('Unable to rename existing directory: %s' % e)
- return
+ if 'No such file or directory' in e:
+ checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
+ if not checkdirectory:
+ logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
+ updatedir = False
+ else:
+ logger.warn('Unable to rename existing directory: %s' % e)
+ updatedir = False
else:
- logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
+ if orig_location != com_location and os.path.isdir(orig_location) is False:
+ logger.fdebug("Original Directory (%s) doesn't exist! - attempting to create new directory (%s)" % (orig_location, com_location))
+ else:
+ logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
- return
+ updatedir = False
- newValues['ComicLocation'] = com_location
+ else:
+ logger.info('[Create directories False] Not creating physical directory, but updating series location in dB to: %s' % com_location)
+ if updatedir is True:
+ newValues['ComicLocation'] = com_location
- myDB.upsert("comics", newValues, controlValueDict)
- logger.fdebug('Updated Series options!')
+ myDB.upsert("comics", newValues, controlValueDict)
+ logger.fdebug('Updated Series options!')
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
comic_config.exposed = True
@@ -5377,7 +5400,7 @@ class WebInterface(object):
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
'prowl_enabled', 'prowl_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
- 'email_enabled', 'email_enc', 'email_ongrab', 'email_onpost', 'opds_enable', 'opds_authentication', 'opds_metainfo', 'enable_ddl', 'deluge_pause'] #enable_public
+ 'email_enabled', 'email_enc', 'email_ongrab', 'email_onpost', 'opds_enable', 'opds_authentication', 'opds_metainfo', 'opds_pagesize', 'enable_ddl', 'deluge_pause'] #enable_public
for checked_config in checked_configs:
if checked_config not in kwargs:
@@ -5426,6 +5449,10 @@ class WebInterface(object):
if torznab_name == "":
continue
torznab_host = helpers.clean_url(kwargs['torznab_host' + torznab_number])
+ try:
+ torznab_verify = kwargs['torznab_verify' + torznab_number]
+ except:
+ torznab_verify = 0
torznab_api = kwargs['torznab_apikey' + torznab_number]
torznab_category = kwargs['torznab_category' + torznab_number]
try:
@@ -5435,7 +5462,7 @@ class WebInterface(object):
del kwargs[kwarg]
- mylar.CONFIG.EXTRA_TORZNABS.append((torznab_name, torznab_host, torznab_api, torznab_category, torznab_enabled))
+ mylar.CONFIG.EXTRA_TORZNABS.append((torznab_name, torznab_host, torznab_verify, torznab_api, torznab_category, torznab_enabled))
mylar.CONFIG.process_kwargs(kwargs)
@@ -6006,6 +6033,24 @@ class WebInterface(object):
return 'Error - failed running test for %s' % name
testnewznab.exposed = True
+ def testtorznab(self, name, host, ssl, apikey):
+ logger.fdebug('ssl/verify: %s' % ssl)
+ if 'ssl' == '0' or ssl == '1':
+ ssl = bool(int(ssl))
+ else:
+ if ssl == 'false':
+ ssl = False
+ else:
+ ssl = True
+ result = helpers.torznab_test(name, host, ssl, apikey)
+ if result is True:
+ logger.info('Successfully tested %s [%s] - valid api response received' % (name, host))
+ return 'Successfully tested %s!' % name
+ else:
+ print result
+ logger.warn('Testing failed to %s [HOST:%s][SSL:%s]' % (name, host, bool(ssl)))
+ return 'Error - failed running test for %s' % name
+ testtorznab.exposed = True
def orderThis(self, **kwargs):
return
@@ -6425,3 +6470,12 @@ class WebInterface(object):
download_specific_release.exposed = True
+ def read_comic(self, ish_id, page_num, size):
+ from mylar.webviewer import WebViewer
+ wv = WebViewer()
+ page_num = int(page_num)
+ #cherrypy.session['ishid'] = ish_id
+ data = wv.read_comic(ish_id, page_num, size)
+ #data = wv.read_comic(ish_id)
+ return data
+ read_comic.exposed = True
diff --git a/mylar/webviewer.py b/mylar/webviewer.py
new file mode 100644
index 00000000..e94c8821
--- /dev/null
+++ b/mylar/webviewer.py
@@ -0,0 +1,168 @@
+import os
+import re
+import cherrypy
+import stat
+import zipfile
+from lib.rarfile import rarfile
+
+import mylar
+
+try:
+ from PIL import Image
+except ImportError:
+ logger.debug("WebReader Requested, but PIL or pillow libraries must be installed. Please execute 'pip install pillow', then restart Mylar.")
+ return serve_template(templatename="index.html", title="Home", comics=comics, alphaindex=mylar.CONFIG.ALPHAINDEX)
+
+from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, librarysync, moveit, Failed, readinglist, config
+from mylar.webserve import serve_template
+
+class WebViewer(object):
+
+ def __init__(self):
+ self.ish_id = None
+ self.page_num = None
+ self.kwargs = None
+ self.data = None
+
+ if not os.path.exists(os.path.join(mylar.DATA_DIR, 'sessions')):
+ os.makedirs(os.path.abspath(os.path.join(mylar.DATA_DIR, 'sessions')))
+
+ updatecherrypyconf = {
+ 'tools.gzip.on': True,
+ 'tools.gzip.mime_types': ['text/*', 'application/*', 'image/*'],
+ 'tools.sessions.timeout': 1440,
+ 'tools.sessions.storage_class': cherrypy.lib.sessions.FileSession,
+ 'tools.sessions.storage_path': os.path.join(mylar.DATA_DIR, "sessions"),
+ 'request.show_tracebacks': False,
+ 'engine.timeout_monitor.on': False,
+ }
+ if mylar.CONFIG.HTTP_PASSWORD is None:
+ updatecherrypyconf.update({
+ 'tools.sessions.on': True,
+ })
+
+ cherrypy.config.update(updatecherrypyconf)
+ cherrypy.engine.signals.subscribe()
+
+ def read_comic(self, ish_id = None, page_num = None, size = None):
+ logger.debug("WebReader Requested, looking for ish_id %s and page_num %s" % (ish_id, page_num))
+ if size == None:
+ user_size_pref = 'wide'
+ else:
+ user_size_pref = size
+
+ try:
+ ish_id
+ except:
+ logger.warn("WebReader: ish_id not set!")
+
+ myDB = db.DBConnection()
+ comic = myDB.selectone('select comics.ComicLocation, issues.Location from comics, issues where comics.comicid = issues.comicid and issues.issueid = ?' , [ish_id]).fetchone()
+ if comic is None:
+ logger.warn("WebReader: ish_id %s requested but not in the database!" % ish_id)
+ raise cherrypy.HTTPRedirect("home")
+# cherrypy.config.update()
+ comic_path = os.path.join(comic['ComicLocation'], comic['Location'])
+ logger.debug("WebReader found ish_id %s at %s" % (ish_id, comic_path))
+
+# cherrypy.session['ish_id'].load()
+# if 'sizepref' not in cherrypy.session:
+# cherrypy.session['sizepref'] = user_size_pref
+# user_size_pref = cherrypy.session['sizepref']
+# logger.debug("WebReader setting user_size_pref to %s" % user_size_pref)
+
+ scanner = ComicScanner()
+ image_list = scanner.reading_images(ish_id)
+ logger.debug("Image list contains %s pages" % (len(image_list)))
+ if len(image_list) == 0:
+ logger.debug("Unpacking ish_id %s from comic_path %s" % (ish_id, comic_path))
+ scanner.user_unpack_comic(ish_id, comic_path)
+ else:
+ logger.debug("ish_id %s already unpacked." % ish_id)
+
+ num_pages = len(image_list)
+ logger.debug("Found %s pages for ish_id %s from comic_path %s" % (num_pages, ish_id, comic_path))
+
+ if num_pages == 0:
+ image_list = ['images/skipped_icon.png']
+
+ cookie_comic = re.sub(r'\W+', '', comic_path)
+ cookie_comic = "wv_" + cookie_comic.decode('unicode_escape')
+ logger.debug("about to drop a cookie for " + cookie_comic + " which represents " + comic_path)
+ cookie_check = cherrypy.request.cookie
+ if cookie_comic not in cookie_check:
+ logger.debug("Cookie Creation")
+ cookie_path = '/'
+ cookie_maxage = '2419200'
+ cookie_set = cherrypy.response.cookie
+ cookie_set['cookie_comic'] = 0
+ cookie_set['cookie_comic']['path'] = cookie_path.decode('unicode_escape')
+ cookie_set['cookie_comic']['max-age'] = cookie_maxage.decode('unicode_escape')
+ next_page = page_num + 1
+ prev_page = page_num - 1
+ else:
+ logger.debug("Cookie Read")
+ page_num = int(cherrypy.request.cookie['cookie_comic'].value)
+ logger.debug("Cookie Set To %d" % page_num)
+ next_page = page_num + 1
+ prev_page = page_num - 1
+
+ logger.info("Reader Served")
+ logger.debug("Serving comic " + comic['Location'] + " page number " + str(page_num))
+
+ return serve_template(templatename="read.html", pages=image_list, current_page=page_num, np=next_page, pp=prev_page, nop=num_pages, size=user_size_pref, cc=cookie_comic, comicpath=comic_path, ish_id=ish_id)
+
+ def up_size_pref(self, pref):
+ cherrypy.session.load()
+ cherrypy.session['sizepref'] = pref
+ cherrypy.session.save()
+ return
+
+class ComicScanner(object):
+
+ # This method will handle scanning the directories and returning a list of them all.
+ def dir_scan(self):
+ logger.debug("Dir Scan Requested")
+ full_paths = []
+ full_paths.append(mylar.CONFIG.DESTINATION_DIR)
+ for root, dirs, files in os.walk(mylar.CONFIG.DESTINATION_DIR):
+ full_paths.extend(os.path.join(root, d) for d in dirs)
+
+ logger.info("Dir Scan Completed")
+ logger.info("%i Dirs Found" % (len(full_paths)))
+ return full_paths
+
+ def user_unpack_comic(self, ish_id, comic_path):
+ logger.info("%s unpack requested" % comic_path)
+ for root, dirs, files in os.walk(os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id), topdown=False):
+ for f in files:
+ os.chmod(os.path.join(root, f), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
+ os.remove(os.path.join(root, f))
+ for root, dirs, files in os.walk(os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id), topdown=False):
+ for d in dirs:
+ os.chmod(os.path.join(root, d), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
+ os.rmdir(os.path.join(root, d))
+ if comic_path.endswith(".cbr"):
+ opened_rar = rarfile.RarFile(comic_path)
+ opened_rar.extractall(os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id))
+ elif comic_path.endswith(".cbz"):
+ opened_zip = zipfile.ZipFile(comic_path)
+ opened_zip.extractall(os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id))
+ return
+
+ # This method will return a list of .jpg files in their numberical order to be fed into the reading view.
+ def reading_images(self, ish_id):
+ logger.debug("Image List Requested")
+ image_list = []
+ image_src = os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id)
+ image_loc = os.path.join(mylar.CONFIG.HTTP_ROOT, 'cache', "webviewer", ish_id)
+ for root, dirs, files in os.walk(image_src):
+ for f in files:
+ if f.endswith((".png", ".gif", ".bmp", ".dib", ".jpg", ".jpeg", ".jpe", ".jif", ".jfif", ".jfi", ".tiff", ".tif")):
+ image_list.append( os.path.join(image_loc, f) )
+ image_list.sort()
+ logger.debug("Image List Created")
+ return image_list
+
+
+