Updated Mako template system to last version (1.0.6)

This commit is contained in:
Roberto Pastor 2016-12-16 12:59:19 +01:00 committed by evilhero
parent 47445452a7
commit 8a10181d32
25 changed files with 3714 additions and 2522 deletions

View File

@ -1,9 +1,8 @@
# mako/__init__.py # mako/__init__.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '0.4.1' __version__ = '1.0.6'

View File

@ -1,5 +1,5 @@
# mako/_ast_util.py # mako/_ast_util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -30,46 +30,46 @@
:copyright: Copyright 2008 by Armin Ronacher. :copyright: Copyright 2008 by Armin Ronacher.
:license: Python License. :license: Python License.
""" """
from _ast import * from _ast import * # noqa
from mako.compat import arg_stringname
BOOLOP_SYMBOLS = { BOOLOP_SYMBOLS = {
And: 'and', And: 'and',
Or: 'or' Or: 'or'
} }
BINOP_SYMBOLS = { BINOP_SYMBOLS = {
Add: '+', Add: '+',
Sub: '-', Sub: '-',
Mult: '*', Mult: '*',
Div: '/', Div: '/',
FloorDiv: '//', FloorDiv: '//',
Mod: '%', Mod: '%',
LShift: '<<', LShift: '<<',
RShift: '>>', RShift: '>>',
BitOr: '|', BitOr: '|',
BitAnd: '&', BitAnd: '&',
BitXor: '^' BitXor: '^'
} }
CMPOP_SYMBOLS = { CMPOP_SYMBOLS = {
Eq: '==', Eq: '==',
Gt: '>', Gt: '>',
GtE: '>=', GtE: '>=',
In: 'in', In: 'in',
Is: 'is', Is: 'is',
IsNot: 'is not', IsNot: 'is not',
Lt: '<', Lt: '<',
LtE: '<=', LtE: '<=',
NotEq: '!=', NotEq: '!=',
NotIn: 'not in' NotIn: 'not in'
} }
UNARYOP_SYMBOLS = { UNARYOP_SYMBOLS = {
Invert: '~', Invert: '~',
Not: 'not', Not: 'not',
UAdd: '+', UAdd: '+',
USub: '-' USub: '-'
} }
ALL_SYMBOLS = {} ALL_SYMBOLS = {}
@ -215,8 +215,8 @@ def get_compile_mode(node):
if not isinstance(node, mod): if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__) raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return { return {
Expression: 'eval', Expression: 'eval',
Interactive: 'single' Interactive: 'single'
}.get(node.__class__, 'expr') }.get(node.__class__, 'expr')
@ -246,6 +246,7 @@ def walk(node):
class NodeVisitor(object): class NodeVisitor(object):
""" """
Walks the abstract syntax tree and call visitor functions for every node Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded found. The visitor functions may return values which will be forwarded
@ -290,6 +291,7 @@ class NodeVisitor(object):
class NodeTransformer(NodeVisitor): class NodeTransformer(NodeVisitor):
""" """
Walks the abstract syntax tree and allows modifications of nodes. Walks the abstract syntax tree and allows modifications of nodes.
@ -349,6 +351,7 @@ class NodeTransformer(NodeVisitor):
class SourceGenerator(NodeVisitor): class SourceGenerator(NodeVisitor):
""" """
This visitor is able to transform a well formed syntax tree into python This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the sourcecode. For more details have a look at the docstring of the
@ -388,6 +391,7 @@ class SourceGenerator(NodeVisitor):
def signature(self, node): def signature(self, node):
want_comma = [] want_comma = []
def write_comma(): def write_comma():
if want_comma: if want_comma:
self.write(', ') self.write(', ')
@ -403,10 +407,10 @@ class SourceGenerator(NodeVisitor):
self.visit(default) self.visit(default)
if node.vararg is not None: if node.vararg is not None:
write_comma() write_comma()
self.write('*' + node.vararg) self.write('*' + arg_stringname(node.vararg))
if node.kwarg is not None: if node.kwarg is not None:
write_comma() write_comma()
self.write('**' + node.kwarg) self.write('**' + arg_stringname(node.kwarg))
def decorators(self, node): def decorators(self, node):
for decorator in node.decorator_list: for decorator in node.decorator_list:
@ -460,6 +464,7 @@ class SourceGenerator(NodeVisitor):
def visit_ClassDef(self, node): def visit_ClassDef(self, node):
have_args = [] have_args = []
def paren_or_comma(): def paren_or_comma():
if have_args: if have_args:
self.write(', ') self.write(', ')
@ -481,11 +486,11 @@ class SourceGenerator(NodeVisitor):
paren_or_comma() paren_or_comma()
self.write(keyword.arg + '=') self.write(keyword.arg + '=')
self.visit(keyword.value) self.visit(keyword.value)
if node.starargs is not None: if getattr(node, "starargs", None):
paren_or_comma() paren_or_comma()
self.write('*') self.write('*')
self.visit(node.starargs) self.visit(node.starargs)
if node.kwargs is not None: if getattr(node, "kwargs", None):
paren_or_comma() paren_or_comma()
self.write('**') self.write('**')
self.visit(node.kwargs) self.visit(node.kwargs)
@ -631,6 +636,7 @@ class SourceGenerator(NodeVisitor):
def visit_Call(self, node): def visit_Call(self, node):
want_comma = [] want_comma = []
def write_comma(): def write_comma():
if want_comma: if want_comma:
self.write(', ') self.write(', ')
@ -646,11 +652,11 @@ class SourceGenerator(NodeVisitor):
write_comma() write_comma()
self.write(keyword.arg + '=') self.write(keyword.arg + '=')
self.visit(keyword.value) self.visit(keyword.value)
if node.starargs is not None: if getattr(node, "starargs", None):
write_comma() write_comma()
self.write('*') self.write('*')
self.visit(node.starargs) self.visit(node.starargs)
if node.kwargs is not None: if getattr(node, "kwargs", None):
write_comma() write_comma()
self.write('**') self.write('**')
self.visit(node.kwargs) self.visit(node.kwargs)
@ -659,6 +665,12 @@ class SourceGenerator(NodeVisitor):
def visit_Name(self, node): def visit_Name(self, node):
self.write(node.id) self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node): def visit_Str(self, node):
self.write(repr(node.s)) self.write(repr(node.s))

View File

@ -1,36 +1,42 @@
# mako/ast.py # mako/ast.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python """utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes""" code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, util from mako import exceptions, pyparser, compat
import re import re
class PythonCode(object): class PythonCode(object):
"""represents information about a string containing Python code""" """represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs): def __init__(self, code, **exception_kwargs):
self.code = code self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames, # represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages: # code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if # - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code # its declared later in the same block of code
# - AST is less likely to break with version changes # - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit # (for example, the behavior of co_names changed a little bit
# in python version 2.5) # in python version 2.5)
if isinstance(code, basestring): if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs) expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else: else:
expr = code expr = code
@ -38,44 +44,51 @@ class PythonCode(object):
f = pyparser.FindIdentifiers(self, **exception_kwargs) f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr) f.visit(expr)
class ArgumentList(object): class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions""" """parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs): def __init__(self, code, **exception_kwargs):
self.codeargs = [] self.codeargs = []
self.args = [] self.args = []
self.declared_identifiers = set() self.declared_identifiers = set()
self.undeclared_identifiers = set() self.undeclared_identifiers = set()
if isinstance(code, basestring): if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code): if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed # if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma # as a tuple by adding a trailing comma
code += "," code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs) expr = pyparser.parse(code, "exec", **exception_kwargs)
else: else:
expr = code expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs) f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr) f.visit(expr)
class PythonFragment(PythonCode): class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
"""extends PythonCode to provide identifier lookups in partial control
e.g. statements
e.g.
for x in 5: for x in 5:
elif y==9: elif y==9:
except (MyException, e): except (MyException, e):
etc. etc.
""" """
def __init__(self, code, **exception_kwargs): def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S) m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m: if not m:
raise exceptions.CompileException( raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" % "Fragment '%s' is not a partial control statement" %
code, **exception_kwargs) code, **exception_kwargs)
if m.group(3): if m.group(3):
code = code[:m.start(3)] code = code[:m.start(3)]
(keyword, expr) = m.group(1,2) (keyword, expr) = m.group(1, 2)
if keyword in ['for','if', 'while']: if keyword in ['for', 'if', 'while']:
code = code + "pass" code = code + "pass"
elif keyword == 'try': elif keyword == 'try':
code = code + "pass\nexcept:pass" code = code + "pass\nexcept:pass"
@ -83,61 +96,96 @@ class PythonFragment(PythonCode):
code = "if False:pass\n" + code + "pass" code = "if False:pass\n" + code + "pass"
elif keyword == 'except': elif keyword == 'except':
code = "try:pass\n" + code + "pass" code = "try:pass\n" + code + "pass"
elif keyword == 'with':
code = code + "pass"
else: else:
raise exceptions.CompileException( raise exceptions.CompileException(
"Unsupported control keyword: '%s'" % "Unsupported control keyword: '%s'" %
keyword, **exception_kwargs) keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs) super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object): class FunctionDecl(object):
"""function declaration""" """function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs): def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs) expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs) f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr) f.visit(expr)
if not hasattr(self, 'funcname'): if not hasattr(self, 'funcname'):
raise exceptions.CompileException( raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code, "Code '%s' is not a function declaration" % code,
**exception_kwargs) **exception_kwargs)
if not allow_kwargs and self.kwargs: if not allow_kwargs and self.kwargs:
raise exceptions.CompileException( raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" % "'**%s' keyword argument not allowed here" %
self.argnames[-1], **exception_kwargs) self.kwargnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True): def get_argument_expressions(self, as_call=False):
"""return the argument declarations of this FunctionDecl as a printable list.""" """Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = [] namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs # Build in reverse order, since defaults and slurpy args come last
varargs = self.varargs argnames = self.argnames[::-1]
argnames = [f for f in self.argnames] kwargnames = self.kwargnames[::-1]
argnames.reverse() defaults = self.defaults[::-1]
for arg in argnames: kwdefaults = self.kwdefaults[::-1]
default = None
if kwargs: # Named arguments
arg = "**" + arg if self.kwargs:
kwargs = False namedecls.append("**" + kwargnames.pop(0))
elif varargs:
arg = "*" + arg for name in kwargnames:
varargs = False # Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else: else:
default = len(defaults) and defaults.pop() or None namedecls.append(name)
if include_defaults and default:
namedecls.insert(0, "%s=%s" % # Positional arguments
(arg, if self.varargs:
pyparser.ExpressionGenerator(default).value() namedecls.append("*" + argnames.pop(0))
)
) for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else: else:
namedecls.insert(0, arg) default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls return namedecls
@property
def allargnames(self):
return tuple(self.argnames) + tuple(self.kwargnames)
class FunctionArgs(FunctionDecl): class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration""" """the argument portion of a function declaration"""
def __init__(self, code, **kwargs): def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs) super(FunctionArgs, self).__init__("def ANON(%s):pass" % code,
**kwargs)

View File

@ -1,124 +1,240 @@
# mako/cache.py # mako/cache.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import exceptions from mako import compat, util
cache = None _cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class BeakerMissing(object):
def get_cache(self, name, **kwargs):
raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.")
class Cache(object): class Cache(object):
"""Represents a data content cache made available to the module
space of a :class:`.Template` object.
:class:`.Cache` is a wrapper on top of a Beaker CacheManager object.
This object in turn references any number of "containers", each of
which defines its own backend (i.e. file, memory, memcached, etc.)
independently of the rest.
"""
def __init__(self, id, starttime):
self.id = id
self.starttime = starttime
self.def_regions = {}
def put(self, key, value, **kwargs):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).put_value(key, starttime=self.starttime, expiretime=expiretime)
def get(self, key, **kwargs):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
return self._get_cache(defname, **kwargs).get_value(key, starttime=self.starttime, expiretime=expiretime, createfunc=createfunc)
def invalidate(self, key, **kwargs):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).remove_value(key, starttime=self.starttime, expiretime=expiretime)
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this template.
"""
self.invalidate('render_body', defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular <%def> within this template."""
self.invalidate('render_%s' % name, defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested <%def> within this template.
Caching of nested defs is a blunt tool as there is no
management of scope - nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, defname=name)
def _get_cache(self, defname, type=None, **kw):
global cache
if not cache:
try:
from beaker import cache as beaker_cache
cache = beaker_cache.CacheManager()
except ImportError:
# keep a fake cache around so subsequent
# calls don't attempt to re-import
cache = BeakerMissing()
if type == 'memcached': """Represents a data content cache made available to the module
type = 'ext:memcached' space of a specific :class:`.Template` object.
if not type:
(type, kw) = self.def_regions.get(defname, ('memory', {})) .. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, compat.string_types) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(
key,
creation_function,
**self._get_cache_kw(kw, context))
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate('render_body', __M_defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate('render_%s' % name, __M_defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop('__M_defname', None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else: else:
self.def_regions[defname] = (type, kw) tmpl_kw = self.template.cache_args.copy()
return cache.get_cache(self.id, type=type, **kw) tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault('context', context)
return tmpl_kw
class CacheImpl(object):
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()

67
lib/mako/cmd.py Executable file
View File

@ -0,0 +1,67 @@
# mako/cmd.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from argparse import ArgumentParser
from os.path import isfile, dirname
import sys
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
def varsplit(var):
if "=" not in var:
return (var, "")
return var.split("=", 1)
def _exit():
sys.stderr.write(exceptions.text_error_template().render())
sys.exit(1)
def cmdline(argv=None):
parser = ArgumentParser("usage: %prog [FILENAME]")
parser.add_argument(
"--var", default=[], action="append",
help="variable (can be used multiple times, use name=value)")
parser.add_argument(
"--template-dir", default=[], action="append",
help="Directory to use for template lookup (multiple "
"directories may be provided). If not given then if the "
"template is read from stdin, the value defaults to be "
"the current directory, otherwise it defaults to be the "
"parent directory of the file provided.")
parser.add_argument('input', nargs='?', default='-')
options = parser.parse_args(argv)
if options.input == '-':
lookup_dirs = options.template_dir or ["."]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(sys.stdin.read(), lookup=lookup)
except:
_exit()
else:
filename = options.input
if not isfile(filename):
raise SystemExit("error: can't find %s" % filename)
lookup_dirs = options.template_dir or [dirname(filename)]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(filename=filename, lookup=lookup)
except:
_exit()
kw = dict([varsplit(var) for var in options.var])
try:
print(template.render(**kw))
except:
_exit()
if __name__ == "__main__":
cmdline()

File diff suppressed because it is too large Load Diff

201
lib/mako/compat.py Normal file
View File

@ -0,0 +1,201 @@
import sys
import time
py3k = sys.version_info >= (3, 0)
py33 = sys.version_info >= (3, 3)
py2k = sys.version_info < (3,)
py26 = sys.version_info >= (2, 6)
py27 = sys.version_info >= (2, 7)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
pypy = hasattr(sys, 'pypy_version_info')
if py3k:
# create a "getargspec" from getfullargspec(), which is not deprecated
# in Py3K; getargspec() has started to emit warnings as of Py3.5.
# As of Py3.4, now they are trying to move from getfullargspec()
# to "signature()", but getfullargspec() is not deprecated, so stick
# with that for now.
import collections
ArgSpec = collections.namedtuple(
"ArgSpec",
["args", "varargs", "keywords", "defaults"])
from inspect import getfullargspec as inspect_getfullargspec
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
else:
from inspect import getargspec as inspect_getargspec # noqa
if py3k:
from io import StringIO
import builtins as compat_builtins
from urllib.parse import quote_plus, unquote_plus
from html.entities import codepoint2name, name2codepoint
string_types = str,
binary_type = bytes
text_type = str
from io import BytesIO as byte_buffer
def u(s):
return s
def b(s):
return s.encode("latin-1")
def octal(lit):
return eval("0o" + lit)
else:
import __builtin__ as compat_builtins # noqa
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
byte_buffer = StringIO
from urllib import quote_plus, unquote_plus # noqa
from htmlentitydefs import codepoint2name, name2codepoint # noqa
string_types = basestring, # noqa
binary_type = str
text_type = unicode # noqa
def u(s):
return unicode(s, "utf-8") # noqa
def b(s):
return s
def octal(lit):
return eval("0" + lit)
if py33:
from importlib import machinery
def load_module(module_id, path):
return machinery.SourceFileLoader(module_id, path).load_module()
else:
import imp
def load_module(module_id, path):
fp = open(path, 'rb')
try:
return imp.load_source(module_id, path, fp)
finally:
fp.close()
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
exec("def reraise(tp, value, tb=None, cause=None):\n"
" raise tp, value, tb\n")
def exception_as():
return sys.exc_info()[1]
try:
import threading
if py3k:
import _thread as thread
else:
import thread
except ImportError:
import dummy_threading as threading # noqa
if py3k:
import _dummy_thread as thread
else:
import dummy_thread as thread # noqa
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
all = all
import json # noqa
def exception_name(exc):
return exc.__class__.__name__
try:
from inspect import CO_VARKEYWORDS, CO_VARARGS
def inspect_func_args(fn):
if py3k:
co = fn.__code__
else:
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
if py3k:
return args, varargs, varkw, fn.__defaults__
else:
return args, varargs, varkw, fn.func_defaults
except ImportError:
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)
if py3k:
def callable(fn):
return hasattr(fn, '__call__')
else:
callable = callable
################################################
# cross-compatible metaclass implementation
# Copyright (c) 2010-2012 Benjamin Peterson
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("%sBase" % meta.__name__, (base,), {})
################################################
def arg_stringname(func_arg):
"""Gets the string name of a kwarg or vararg
In Python3.4 a function's args are
of _ast.arg type not _ast.name
"""
if hasattr(func_arg, 'arg'):
return func_arg.arg
else:
return str(func_arg)

View File

@ -1,90 +1,112 @@
# mako/exceptions.py # mako/exceptions.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes""" """exception classes"""
import traceback, sys, re import traceback
from mako import util import sys
from mako import util, compat
class MakoException(Exception): class MakoException(Exception):
pass pass
class RuntimeException(MakoException): class RuntimeException(MakoException):
pass pass
def _format_filepos(lineno, pos, filename): def _format_filepos(lineno, pos, filename):
if filename is None: if filename is None:
return " at line: %d char: %d" % (lineno, pos) return " at line: %d char: %d" % (lineno, pos)
else: else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos) return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException): class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename): def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename)) MakoException.__init__(
self.lineno =lineno self,
self.pos = pos message + _format_filepos(lineno, pos, filename))
self.filename = filename self.lineno = lineno
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos self.pos = pos
self.filename = filename self.filename = filename
self.source = source self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(
self,
message + _format_filepos(lineno, pos, filename))
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException): class UnsupportedError(MakoException):
"""raised when a retired feature is used.""" """raised when a retired feature is used."""
class NameConflictError(MakoException):
"""raised when a reserved word is used inappropriately"""
class TemplateLookupException(MakoException): class TemplateLookupException(MakoException):
pass pass
class TopLevelLookupException(TemplateLookupException): class TopLevelLookupException(TemplateLookupException):
pass pass
class RichTraceback(object): class RichTraceback(object):
"""Pulls the current exception from the sys traceback and extracts
"""Pull the current exception from the ``sys`` traceback and extracts
Mako-specific template information. Mako-specific template information.
See the usage examples in :ref:`handling_exceptions`. See the usage examples in :ref:`handling_exceptions`.
""" """
def __init__(self, error=None, traceback=None): def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0 self.source, self.lineno = "", 0
if error is None or traceback is None: if error is None or traceback is None:
t, value, tback = sys.exc_info() t, value, tback = sys.exc_info()
if error is None: if error is None:
error = value or t error = value or t
if traceback is None: if traceback is None:
traceback = tback traceback = tback
self.error = error self.error = error
self.records = self._init(traceback) self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)): if isinstance(self.error, (CompileException, SyntaxException)):
import mako.template
self.source = self.error.source self.source = self.error.source
self.lineno = self.error.lineno self.lineno = self.error.lineno
self._has_source = True self._has_source = True
self._init_message() self._init_message()
@property @property
def errorname(self): def errorname(self):
return util.exception_name(self.error) return compat.exception_name(self.error)
def _init_message(self): def _init_message(self):
"""Find a unicode representation of self.error""" """Find a unicode representation of self.error"""
try: try:
self.message = unicode(self.error) self.message = compat.text_type(self.error)
except UnicodeError: except UnicodeError:
try: try:
self.message = str(self.error) self.message = str(self.error)
@ -92,8 +114,8 @@ class RichTraceback(object):
# Fallback to args as neither unicode nor # Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6 # str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0] self.message = self.error.args[0]
if not isinstance(self.message, unicode): if not isinstance(self.message, compat.text_type):
self.message = unicode(self.message, 'ascii', 'replace') self.message = compat.text_type(self.message, 'ascii', 'replace')
def _get_reformatted_records(self, records): def _get_reformatted_records(self, records):
for rec in records: for rec in records:
@ -101,25 +123,25 @@ class RichTraceback(object):
yield (rec[4], rec[5], rec[2], rec[6]) yield (rec[4], rec[5], rec[2], rec[6])
else: else:
yield tuple(rec[0:4]) yield tuple(rec[0:4])
@property @property
def traceback(self): def traceback(self):
"""return a list of 4-tuple traceback records (i.e. normal python """Return a list of 4-tuple traceback records (i.e. normal python
format) with template-corresponding lines remapped to the originating format) with template-corresponding lines remapped to the originating
template. template.
""" """
return list(self._get_reformatted_records(self.records)) return list(self._get_reformatted_records(self.records))
@property @property
def reverse_records(self): def reverse_records(self):
return reversed(self.records) return reversed(self.records)
@property @property
def reverse_traceback(self): def reverse_traceback(self):
"""return the same data as traceback, except in reverse order. """Return the same data as traceback, except in reverse order.
""" """
return list(self._get_reformatted_records(self.reverse_records)) return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback): def _init(self, trcback):
@ -145,7 +167,7 @@ class RichTraceback(object):
template_filename = info.template_filename or filename template_filename = info.template_filename or filename
except KeyError: except KeyError:
# A normal .py file (not a Template) # A normal .py file (not a Template)
if not util.py3k: if not compat.py3k:
try: try:
fp = open(filename, 'rb') fp = open(filename, 'rb')
encoding = util.parse_encoding(fp) encoding = util.parse_encoding(fp)
@ -156,32 +178,32 @@ class RichTraceback(object):
line = line.decode(encoding) line = line.decode(encoding)
else: else:
line = line.decode('ascii', 'replace') line = line.decode('ascii', 'replace')
new_trcback.append((filename, lineno, function, line, new_trcback.append((filename, lineno, function, line,
None, None, None, None)) None, None, None, None))
continue continue
template_ln = module_ln = 1 template_ln = 1
line_map = {}
for line in module_source.split("\n"): source_map = mako.template.ModuleInfo.\
match = re.match(r'\s*# SOURCE LINE (\d+)', line) get_module_source_metadata(
if match: module_source, full_line_map=True)
template_ln = int(match.group(1)) line_map = source_map['full_line_map']
module_ln += 1
line_map[module_ln] = template_ln template_lines = [line_ for line_ in
template_lines = [line for line in template_source.split("\n")]
template_source.split("\n")]
mods[filename] = (line_map, template_lines) mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno] template_ln = line_map[lineno - 1]
if template_ln <= len(template_lines): if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1] template_line = template_lines[template_ln - 1]
else: else:
template_line = None template_line = None
new_trcback.append((filename, lineno, function, new_trcback.append((filename, lineno, function,
line, template_filename, template_ln, line, template_filename, template_ln,
template_line, template_source)) template_line, template_source))
if not self.source: if not self.source:
for l in range(len(new_trcback)-1, 0, -1): for l in range(len(new_trcback) - 1, 0, -1):
if new_trcback[l][5]: if new_trcback[l][5]:
self.source = new_trcback[l][7] self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5] self.lineno = new_trcback[l][5]
@ -202,13 +224,13 @@ class RichTraceback(object):
self.lineno = new_trcback[-1][1] self.lineno = new_trcback[-1][1]
return new_trcback return new_trcback
def text_error_template(lookup=None): def text_error_template(lookup=None):
"""Provides a template that renders a stack trace in a similar format to """Provides a template that renders a stack trace in a similar format to
the Python interpreter, substituting source template filenames, line the Python interpreter, substituting source template filenames, line
numbers and code for that of the originating source template, as numbers and code for that of the originating source template, as
applicable. applicable.
""" """
import mako.template import mako.template
return mako.template.Template(r""" return mako.template.Template(r"""
@ -227,22 +249,48 @@ Traceback (most recent call last):
${tback.errorname}: ${tback.message} ${tback.errorname}: ${tback.message}
""") """)
def _install_pygments():
global syntax_highlight, pygments_html_formatter
from mako.ext.pygmentplugin import syntax_highlight # noqa
from mako.ext.pygmentplugin import pygments_html_formatter # noqa
def _install_fallback():
global syntax_highlight, pygments_html_formatter
from mako.filters import html_escape
pygments_html_formatter = None
def syntax_highlight(filename='', language=None):
return html_escape
def _install_highlighting():
try:
_install_pygments()
except ImportError:
_install_fallback()
_install_highlighting()
def html_error_template(): def html_error_template():
"""Provides a template that renders a stack trace in an HTML format, """Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source filenames, line numbers and code for that of the originating source
template, as applicable. template, as applicable.
The template's default encoding_errors value is 'htmlentityreplace'. the The template's default ``encoding_errors`` value is
template has two options. With the full option disabled, only a section of ``'htmlentityreplace'``. The template has two options. With the
an HTML document is returned. with the css option disabled, the default ``full`` option disabled, only a section of an HTML document is
stylesheet won't be included. returned. With the ``css`` option disabled, the default stylesheet
won't be included.
""" """
import mako.template import mako.template
return mako.template.Template(r""" return mako.template.Template(r"""
<%! <%!
from mako.exceptions import RichTraceback from mako.exceptions import RichTraceback, syntax_highlight,\
pygments_html_formatter
%> %>
<%page args="full=True, css=True, error=None, traceback=None"/> <%page args="full=True, css=True, error=None, traceback=None"/>
% if full: % if full:
@ -256,10 +304,29 @@ def html_error_template():
.stacktrace { margin:5px 5px 5px 5px; } .stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; } .highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; } .nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; } .sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; } .sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;} .sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; } .location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs()}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
</style> </style>
% endif % endif
% if full: % if full:
@ -277,16 +344,29 @@ def html_error_template():
else: else:
lines = None lines = None
%> %>
<h3>${tback.errorname}: ${tback.message}</h3> <h3>${tback.errorname}: ${tback.message|h}</h3>
% if lines: % if lines:
<div class="sample"> <div class="sample">
<div class="nonhighlight"> <div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)): % for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line: % if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div> <%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else: % else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div> ${lines[index] | syntax_highlight(language='mako')}
% endif % endif
% endfor % endfor
</div> </div>
@ -296,7 +376,13 @@ def html_error_template():
<div class="stacktrace"> <div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback: % for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div> <div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div> <div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | syntax_highlight(filename)}</div>
</div>
% endfor % endfor
</div> </div>
@ -304,4 +390,5 @@ def html_error_template():
</body> </body>
</html> </html>
% endif % endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace') """, output_encoding=sys.getdefaultencoding(),
encoding_errors='htmlentityreplace')

View File

@ -1,5 +1,5 @@
# ext/autohandler.py # ext/autohandler.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -25,7 +25,10 @@ or with custom autohandler filename:
""" """
import posixpath, os, re import posixpath
import os
import re
def autohandler(template, context, name='autohandler'): def autohandler(template, context, name='autohandler'):
lookup = context.lookup lookup = context.lookup
@ -42,24 +45,24 @@ def autohandler(template, context, name='autohandler'):
if path != _template_uri and _file_exists(lookup, path): if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks: if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault( return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path) (autohandler, _template_uri, name), path)
else: else:
return path return path
if len(tokens) == 1: if len(tokens) == 1:
break break
tokens[-2:] = [name] tokens[-2:] = [name]
if not lookup.filesystem_checks: if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault( return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None) (autohandler, _template_uri, name), None)
else: else:
return None return None
def _file_exists(lookup, path): def _file_exists(lookup, path):
psub = re.sub(r'^/', '',path) psub = re.sub(r'^/', '', path)
for d in lookup.directories: for d in lookup.directories:
if os.path.exists(d + '/' + psub): if os.path.exists(d + '/' + psub):
return True return True
else: else:
return False return False

View File

@ -1,15 +1,37 @@
# ext/babelplugin.py # ext/babelplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""gettext message extraction via Babel: http://babel.edgewall.org/""" """gettext message extraction via Babel: http://babel.edgewall.org/"""
from StringIO import StringIO
from babel.messages.extract import extract_python from babel.messages.extract import extract_python
from mako.ext.extract import MessageExtractor
class BabelMakoExtractor(MessageExtractor):
def __init__(self, keywords, comment_tags, options):
self.keywords = keywords
self.options = options
self.config = {
'comment-tags': u' '.join(comment_tags),
'encoding': options.get('input_encoding',
options.get('encoding', None)),
}
super(BabelMakoExtractor, self).__init__()
def __call__(self, fileobj):
return self.process_file(fileobj)
def process_python(self, code, code_lineno, translator_strings):
comment_tags = self.config['comment-tags']
for lineno, funcname, messages, python_translator_comments \
in extract_python(code,
self.keywords, comment_tags, self.options):
yield (code_lineno + (lineno - 1), funcname, messages,
translator_strings + python_translator_comments)
from mako import lexer, parsetree
def extract(fileobj, keywords, comment_tags, options): def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates. """Extract messages from Mako templates.
@ -23,107 +45,6 @@ def extract(fileobj, keywords, comment_tags, options):
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples :return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator`` :rtype: ``iterator``
""" """
encoding = options.get('input_encoding', options.get('encoding', None)) extractor = BabelMakoExtractor(keywords, comment_tags, options)
for message in extractor(fileobj):
template_node = lexer.Lexer(fileobj.read(), yield message
input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(),
keywords, comment_tags, options):
yield extracted
def extract_nodes(nodes, keywords, comment_tags, options):
"""Extract messages from Mako's lexer node objects
:param nodes: an iterable of Mako parsetree.Node objects to extract from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
translator_comments = []
in_translator_comments = False
for node in nodes:
child_nodes = None
if in_translator_comments and isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(_split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(_split_comment(node.lineno,
value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
attribs = ', '.join(['%s=%s' % (key, val)
for key, val in node.attributes.iteritems()])
code = '{%s}' % attribs
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
translator_comments = []
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
# <% and <%! blocks would provide their own translator comments
translator_comments = []
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
translator_comments = []
in_translator_comments = False
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
else:
translator_comments = \
[comment[1] for comment in translator_comments]
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
code = StringIO(code)
for lineno, funcname, messages, python_translator_comments \
in extract_python(code, keywords, comment_tags, options):
yield (node.lineno + (lineno - 1), funcname, messages,
translator_comments + python_translator_comments)
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in extract_nodes(child_nodes, keywords, comment_tags,
options):
yield extracted
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of comment line
numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]

View File

@ -0,0 +1,76 @@
"""Provide a :class:`.CacheImpl` for the Beaker caching system."""
from mako import exceptions
from mako.cache import CacheImpl
try:
from beaker import cache as beaker_cache
except:
has_beaker = False
else:
has_beaker = True
_beaker_cache = None
class BeakerCacheImpl(CacheImpl):
"""A :class:`.CacheImpl` provided for the Beaker caching system.
This plugin is used by default, based on the default
value of ``'beaker'`` for the ``cache_impl`` parameter of the
:class:`.Template` or :class:`.TemplateLookup` classes.
"""
def __init__(self, cache):
if not has_beaker:
raise exceptions.RuntimeException(
"Can't initialize Beaker plugin; Beaker is not installed.")
global _beaker_cache
if _beaker_cache is None:
if 'manager' in cache.template.cache_args:
_beaker_cache = cache.template.cache_args['manager']
else:
_beaker_cache = beaker_cache.CacheManager()
super(BeakerCacheImpl, self).__init__(cache)
def _get_cache(self, **kw):
expiretime = kw.pop('timeout', None)
if 'dir' in kw:
kw['data_dir'] = kw.pop('dir')
elif self.cache.template.module_directory:
kw['data_dir'] = self.cache.template.module_directory
if 'manager' in kw:
kw.pop('manager')
if kw.get('type') == 'memcached':
kw['type'] = 'ext:memcached'
if 'region' in kw:
region = kw.pop('region')
cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw)
else:
cache = _beaker_cache.get_cache(self.cache.id, **kw)
cache_args = {'starttime': self.cache.starttime}
if expiretime:
cache_args['expiretime'] = expiretime
return cache, cache_args
def get_or_create(self, key, creation_function, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, createfunc=creation_function, **kw)
def put(self, key, value, **kw):
cache, kw = self._get_cache(**kw)
cache.put(key, value, **kw)
def get(self, key, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, **kw)
def invalidate(self, key, **kw):
cache, kw = self._get_cache(**kw)
cache.remove_value(key, **kw)

108
lib/mako/ext/extract.py Normal file
View File

@ -0,0 +1,108 @@
import re
from mako import compat
from mako import lexer
from mako import parsetree
class MessageExtractor(object):
def process_file(self, fileobj):
template_node = lexer.Lexer(
fileobj.read(),
input_encoding=self.config['encoding']).parse()
for extracted in self.extract_nodes(template_node.get_children()):
yield extracted
def extract_nodes(self, nodes):
translator_comments = []
in_translator_comments = False
input_encoding = self.config['encoding'] or 'ascii'
comment_tags = list(
filter(None, re.split(r'\s+', self.config['comment-tags'])))
for node in nodes:
child_nodes = None
if in_translator_comments and \
isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(
self._split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(
self._split_comment(node.lineno, value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.BlockTag):
code = node.body_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
code = node.expression
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
translator_strings = [
comment[1] for comment in translator_comments]
if isinstance(code, compat.text_type):
code = code.encode(input_encoding, 'backslashreplace')
used_translator_comments = False
# We add extra newline to work around a pybabel bug
# (see python-babel/babel#274, parse_encoding dies if the first
# input string of the input is non-ascii)
# Also, because we added it, we have to subtract one from
# node.lineno
code = compat.byte_buffer(compat.b('\n') + code)
for message in self.process_python(
code, node.lineno - 1, translator_strings):
yield message
used_translator_comments = True
if used_translator_comments:
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in self.extract_nodes(child_nodes):
yield extracted
@staticmethod
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]

View File

@ -0,0 +1,43 @@
import io
from lingua.extractors import Extractor
from lingua.extractors import Message
from lingua.extractors import get_extractor
from mako.ext.extract import MessageExtractor
from mako import compat
class LinguaMakoExtractor(Extractor, MessageExtractor):
'''Mako templates'''
extensions = ['.mako']
default_config = {
'encoding': 'utf-8',
'comment-tags': '',
}
def __call__(self, filename, options, fileobj=None):
self.options = options
self.filename = filename
self.python_extractor = get_extractor('x.py')
if fileobj is None:
fileobj = open(filename, 'rb')
return self.process_file(fileobj)
def process_python(self, code, code_lineno, translator_strings):
source = code.getvalue().strip()
if source.endswith(compat.b(':')):
if source in (compat.b('try:'), compat.b('else:')) or source.startswith(compat.b('except')):
source = compat.b('') # Ignore try/except and else
elif source.startswith(compat.b('elif')):
source = source[2:] # Replace "elif" with "if"
source += compat.b('pass')
code = io.BytesIO(source)
for msg in self.python_extractor(
self.filename, self.options, code, code_lineno -1):
if translator_strings:
msg = Message(msg.msgctxt, msg.msgid, msg.msgid_plural,
msg.flags,
compat.u(' ').join(
translator_strings + [msg.comment]),
msg.tcomment, msg.location)
yield msg

View File

@ -1,20 +1,20 @@
# ext/preprocessors.py # ext/preprocessors.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor' """preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup""" argument on Template, TemplateLookup"""
import re import re
def convert_comments(text): def convert_comments(text):
"""preprocess old style comments. """preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=preprocess_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=convert_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)

View File

@ -1,23 +1,20 @@
# ext/pygmentplugin.py # ext/pygmentplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexers.web import \ from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer from pygments.lexers.agile import PythonLexer, Python3Lexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \ from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \
include, using, this include, using
from pygments.token import Error, Punctuation, \ from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Literal Text, Comment, Operator, Keyword, Name, String, Other
from pygments.util import html_doctype_matches, looks_like_xml from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from mako import compat
class MakoLexer(RegexLexer): class MakoLexer(RegexLexer):
name = 'Mako' name = 'Mako'
@ -30,13 +27,16 @@ class MakoLexer(RegexLexer):
bygroups(Text, Comment.Preproc, Keyword, Other)), bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)', (r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)), bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)', (r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)), bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc), (r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'), (r'(<%)([\w\.\:]+)',
(r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'), (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), (r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})', (r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx) (r'''(?sx)
@ -80,7 +80,8 @@ class MakoHtmlLexer(DelegatingLexer):
def __init__(self, **options): def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer, super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options) **options)
class MakoXmlLexer(DelegatingLexer): class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako' name = 'XML+Mako'
@ -88,7 +89,8 @@ class MakoXmlLexer(DelegatingLexer):
def __init__(self, **options): def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer, super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options) **options)
class MakoJavascriptLexer(DelegatingLexer): class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako' name = 'JavaScript+Mako'
@ -96,7 +98,8 @@ class MakoJavascriptLexer(DelegatingLexer):
def __init__(self, **options): def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer, super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options) MakoLexer, **options)
class MakoCssLexer(DelegatingLexer): class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako' name = 'CSS+Mako'
@ -104,4 +107,21 @@ class MakoCssLexer(DelegatingLexer):
def __init__(self, **options): def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer, super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options) **options)
pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted',
linenos=True)
def syntax_highlight(filename='', language=None):
mako_lexer = MakoLexer()
if compat.py3k:
python_lexer = Python3Lexer()
else:
python_lexer = PythonLexer()
if filename.startswith('memory:') or language == 'mako':
return lambda string: highlight(string, mako_lexer,
pygments_html_formatter)
return lambda string: highlight(string, python_lexer,
pygments_html_formatter)

View File

@ -1,14 +1,16 @@
# ext/turbogears.py # ext/turbogears.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, inspect from mako import compat
from mako.lookup import TemplateLookup from mako.lookup import TemplateLookup
from mako.template import Template from mako.template import Template
class TGPlugin(object): class TGPlugin(object):
"""TurboGears compatible Template Plugin.""" """TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'): def __init__(self, extra_vars_func=None, options=None, extension='mak'):
@ -19,17 +21,17 @@ class TGPlugin(object):
# Pull the options out and initialize the lookup # Pull the options out and initialize the lookup
lookup_options = {} lookup_options = {}
for k, v in options.iteritems(): for k, v in options.items():
if k.startswith('mako.'): if k.startswith('mako.'):
lookup_options[k[5:]] = v lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']: elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options) self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {} self.tmpl_options = {}
# transfer lookup args to template args, based on those available # transfer lookup args to template args, based on those available
# in getargspec # in getargspec
for kw in inspect.getargspec(Template.__init__)[0]: for kw in compat.inspect_getargspec(Template.__init__)[0]:
if kw in lookup_options: if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw] self.tmpl_options[kw] = lookup_options[kw]
@ -39,13 +41,14 @@ class TGPlugin(object):
return Template(template_string, **self.tmpl_options) return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path # Translate TG dot notation to normal / template path
if '/' not in templatename: if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' + self.extension templatename = '/' + templatename.replace('.', '/') + '.' +\
self.extension
# Lookup template # Lookup template
return self.lookup.get_template(templatename) return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None): def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, basestring): if isinstance(template, compat.string_types):
template = self.load_template(template) template = self.load_template(template)
# Load extra vars func if provided # Load extra vars func if provided
@ -53,4 +56,3 @@ class TGPlugin(object):
info.update(self.extra_vars_func()) info.update(self.extra_vars_func())
return template.render(**info) return template.render(**info)

View File

@ -1,29 +1,39 @@
# mako/filters.py # mako/filters.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, urllib, htmlentitydefs, codecs import re
from StringIO import StringIO import codecs
from mako import util
from mako.compat import quote_plus, unquote_plus, codepoint2name, \
name2codepoint
from mako import compat
xml_escapes = { xml_escapes = {
'&' : '&amp;', '&': '&amp;',
'>' : '&gt;', '>': '&gt;',
'<' : '&lt;', '<': '&lt;',
'"' : '&#34;', # also &quot; in html-only '"': '&#34;', # also &quot; in html-only
"'" : '&#39;' # also &apos; in html-only "'": '&#39;' # also &apos; in html-only
} }
# XXX: &quot; is valid in HTML and XML # XXX: &quot; is valid in HTML and XML
# &apos; is not valid HTML, but is valid XML # &apos; is not valid HTML, but is valid XML
def legacy_html_escape(string):
"""legacy HTML escape for non-unicode mode."""
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string) def legacy_html_escape(s):
"""legacy HTML escape for non-unicode mode."""
s = s.replace("&", "&amp;")
s = s.replace(">", "&gt;")
s = s.replace("<", "&lt;")
s = s.replace('"', "&#34;")
s = s.replace("'", "&#39;")
return s
try: try:
import markupsafe import markupsafe
@ -31,49 +41,61 @@ try:
except ImportError: except ImportError:
html_escape = legacy_html_escape html_escape = legacy_html_escape
def xml_escape(string): def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string) return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string): def url_escape(string):
# convert into a list of octets # convert into a list of octets
string = string.encode("utf8") string = string.encode("utf8")
return urllib.quote_plus(string) return quote_plus(string)
def legacy_url_escape(string):
# convert into a list of octets
return quote_plus(string)
def url_unescape(string): def url_unescape(string):
text = urllib.unquote_plus(string) text = unquote_plus(string)
if not is_ascii_str(text): if not is_ascii_str(text):
text = text.decode("utf8") text = text.decode("utf8")
return text return text
def trim(string): def trim(string):
return string.strip() return string.strip()
class Decode(object): class Decode(object):
def __getattr__(self, key): def __getattr__(self, key):
def decode(x): def decode(x):
if isinstance(x, unicode): if isinstance(x, compat.text_type):
return x return x
elif not isinstance(x, str): elif not isinstance(x, compat.binary_type):
return unicode(str(x), encoding=key) return decode(str(x))
else: else:
return unicode(x, encoding=key) return compat.text_type(x, encoding=key)
return decode return decode
decode = Decode() decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z') _ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text): def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text) return isinstance(text, str) and _ASCII_re.match(text)
################################################################ ################################################################
class XMLEntityEscaper(object): class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint): def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n) self.codepoint2entity = dict([(c, compat.text_type('&%s;' % n))
for c,n in codepoint2name.iteritems()]) for c, n in codepoint2name.items()])
self.name2codepoint = name2codepoint self.name2codepoint = name2codepoint
def escape_entities(self, text): def escape_entities(self, text):
@ -81,7 +103,7 @@ class XMLEntityEscaper(object):
Only characters corresponding to a named entity are replaced. Only characters corresponding to a named entity are replaced.
""" """
return unicode(text).translate(self.codepoint2entity) return compat.text_type(text).translate(self.codepoint2entity)
def __escape(self, m): def __escape(self, m):
codepoint = ord(m.group()) codepoint = ord(m.group())
@ -90,7 +112,6 @@ class XMLEntityEscaper(object):
except (KeyError, IndexError): except (KeyError, IndexError):
return '&#x%X;' % codepoint return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]') __escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text): def escape(self, text):
@ -102,7 +123,7 @@ class XMLEntityEscaper(object):
The return value is guaranteed to be ASCII. The return value is guaranteed to be ASCII.
""" """
return self.__escapable.sub(self.__escape, unicode(text) return self.__escapable.sub(self.__escape, compat.text_type(text)
).encode('ascii') ).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__. # XXX: This regexp will not match all valid XML entity names__.
@ -115,7 +136,7 @@ class XMLEntityEscaper(object):
| ( (?!\d) [:\w] [-.:\w]+ ) | ( (?!\d) [:\w] [-.:\w]+ )
) ;''', ) ;''',
re.X | re.UNICODE) re.X | re.UNICODE)
def __unescape(self, m): def __unescape(self, m):
dval, hval, name = m.groups() dval, hval, name = m.groups()
if dval: if dval:
@ -127,8 +148,8 @@ class XMLEntityEscaper(object):
# U+FFFD = "REPLACEMENT CHARACTER" # U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128: if codepoint < 128:
return chr(codepoint) return chr(codepoint)
return unichr(codepoint) return chr(codepoint)
def unescape(self, text): def unescape(self, text):
"""Unescape character references. """Unescape character references.
@ -138,8 +159,7 @@ class XMLEntityEscaper(object):
return self.__characterrefs.sub(self.__unescape, text) return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name, _html_entities_escaper = XMLEntityEscaper(codepoint2name, name2codepoint)
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape html_entities_unescape = _html_entities_escaper.unescape
@ -159,30 +179,31 @@ def htmlentityreplace_errors(ex):
# Handle encoding errors # Handle encoding errors
bad_text = ex.object[ex.start:ex.end] bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text) text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end) return (compat.text_type(text), ex.end)
raise ex raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors) codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release # TODO: options to make this dynamic per-compilation will be added in a later
# release
DEFAULT_ESCAPES = { DEFAULT_ESCAPES = {
'x':'filters.xml_escape', 'x': 'filters.xml_escape',
'h':'filters.html_escape', 'h': 'filters.html_escape',
'u':'filters.url_escape', 'u': 'filters.url_escape',
'trim':'filters.trim', 'trim': 'filters.trim',
'entity':'filters.html_entities_escape', 'entity': 'filters.html_entities_escape',
'unicode':'unicode', 'unicode': 'unicode',
'decode':'decode', 'decode': 'decode',
'str':'str', 'str': 'str',
'n':'n' 'n': 'n'
} }
if util.py3k: if compat.py3k:
DEFAULT_ESCAPES.update({ DEFAULT_ESCAPES.update({
'unicode':'str' 'unicode': 'str'
}) })
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy() NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape' NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape'
NON_UNICODE_ESCAPES['u'] = 'filters.legacy_url_escape'

View File

@ -1,21 +1,24 @@
# mako/lexer.py # mako/lexer.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees.""" """provides the Lexer class for parsing template strings into parse trees."""
import re, codecs import re
from mako import parsetree, exceptions, util import codecs
from mako import parsetree, exceptions, compat
from mako.pygen import adjust_whitespace from mako.pygen import adjust_whitespace
_regexp_cache = {} _regexp_cache = {}
class Lexer(object): class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False, def __init__(self, text, filename=None,
input_encoding=None, preprocessor=None): disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text self.text = text
self.filename = filename self.filename = filename
self.template = parsetree.TemplateNode(self.filename) self.template = parsetree.TemplateNode(self.filename)
@ -25,31 +28,32 @@ class Lexer(object):
self.match_position = 0 self.match_position = 0
self.tag = [] self.tag = []
self.control_line = [] self.control_line = []
self.ternary_stack = []
self.disable_unicode = disable_unicode self.disable_unicode = disable_unicode
self.encoding = input_encoding self.encoding = input_encoding
if util.py3k and disable_unicode: if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError( raise exceptions.UnsupportedError(
"Mako for Python 3 does not " "Mako for Python 3 does not "
"support disabling Unicode") "support disabling Unicode")
if preprocessor is None: if preprocessor is None:
self.preprocessor = [] self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'): elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor] self.preprocessor = [preprocessor]
else: else:
self.preprocessor = preprocessor self.preprocessor = preprocessor
@property @property
def exception_kwargs(self): def exception_kwargs(self):
return {'source':self.text, return {'source': self.text,
'lineno':self.matched_lineno, 'lineno': self.matched_lineno,
'pos':self.matched_charpos, 'pos': self.matched_charpos,
'filename':self.filename} 'filename': self.filename}
def match(self, regexp, flags=None): def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg().""" """compile the given regexp, cache the reg, and call match_reg()."""
try: try:
reg = _regexp_cache[(regexp, flags)] reg = _regexp_cache[(regexp, flags)]
except KeyError: except KeyError:
@ -58,14 +62,15 @@ class Lexer(object):
else: else:
reg = re.compile(regexp) reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg _regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg) return self.match_reg(reg)
def match_reg(self, reg): def match_reg(self, reg):
"""match the given regular expression object to the current text position. """match the given regular expression object to the current text
position.
if a match occurs, update the current text and line position. if a match occurs, update the current text and line position.
""" """
mp = self.match_position mp = self.match_position
@ -80,43 +85,53 @@ class Lexer(object):
self.matched_lineno = self.lineno self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position]) lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1 cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'): while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'):
cp -=1 cp -= 1
self.matched_charpos = mp - cp self.matched_charpos = mp - cp
self.lineno += len(lines) self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:", # print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno # self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE") # print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \
# (match and "TRUE" or "FALSE")
return match return match
def parse_until_text(self, *text): def parse_until_text(self, watch_nesting, *text):
startpos = self.match_position startpos = self.match_position
text_re = r'|'.join(text)
brace_level = 0
paren_level = 0
bracket_level = 0
while True: while True:
match = self.match(r'#.*\n') match = self.match(r'#.*\n')
if match: if match:
continue continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')') match = self.match(r'(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1',
re.S)
if match: if match:
m = self.match(r'.*?%s' % match.group(1), re.S) continue
if not m: match = self.match(r'(%s)' % text_re)
raise exceptions.SyntaxException( if match and not (watch_nesting
"Unmatched '%s'" % and (brace_level > 0 or paren_level > 0
match.group(1), or bracket_level > 0)):
**self.exception_kwargs) return \
else: self.text[startpos:
match = self.match(r'(%s)' % r'|'.join(text)) self.match_position - len(match.group(1))],\
if match: match.group(1)
return \ elif not match:
self.text[startpos:self.match_position-len(match.group(1))],\ match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S)
match.group(1) if match:
else: brace_level += match.group(1).count('{')
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S) brace_level -= match.group(1).count('}')
if not match: paren_level += match.group(1).count('(')
raise exceptions.SyntaxException( paren_level -= match.group(1).count(')')
"Expected: %s" % bracket_level += match.group(1).count('[')
','.join(text), bracket_level -= match.group(1).count(']')
**self.exception_kwargs) continue
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs): def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text) kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno) kwargs.setdefault('lineno', self.matched_lineno)
@ -127,6 +142,17 @@ class Lexer(object):
self.tag[-1].nodes.append(node) self.tag[-1].nodes.append(node)
else: else:
self.template.nodes.append(node) self.template.nodes.append(node)
# build a set of child nodes for the control line
# (used for loop variable detection)
# also build a set of child nodes on ternary control lines
# (used for determining if a pass needs to be auto-inserted
if self.control_line:
control_frame = self.control_line[-1]
control_frame.nodes.append(node)
if not (isinstance(node, parsetree.ControlLine) and
control_frame.is_ternary(node.keyword)):
if self.ternary_stack and self.ternary_stack[-1]:
self.ternary_stack[-1][-1].nodes.append(node)
if isinstance(node, parsetree.Tag): if isinstance(node, parsetree.Tag):
if len(self.tag): if len(self.tag):
node.parent = self.tag[-1] node.parent = self.tag[-1]
@ -134,14 +160,19 @@ class Lexer(object):
elif isinstance(node, parsetree.ControlLine): elif isinstance(node, parsetree.ControlLine):
if node.isend: if node.isend:
self.control_line.pop() self.control_line.pop()
self.ternary_stack.pop()
elif node.is_primary: elif node.is_primary:
self.control_line.append(node) self.control_line.append(node)
elif len(self.control_line) and \ self.ternary_stack.append([])
elif self.control_line and \
self.control_line[-1].is_ternary(node.keyword):
self.ternary_stack[-1].append(node)
elif self.control_line and \
not self.control_line[-1].is_ternary(node.keyword): not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" % "Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword), (node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs) **self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n') _coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
@ -151,7 +182,7 @@ class Lexer(object):
or raw if decode_raw=False or raw if decode_raw=False
""" """
if isinstance(text, unicode): if isinstance(text, compat.text_type):
m = self._coding_re.match(text) m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii' encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text return encoding, text
@ -162,10 +193,10 @@ class Lexer(object):
m = self._coding_re.match(text.decode('utf-8', 'ignore')) m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8': if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException( raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting " "Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1), "magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'), text.decode('utf-8', 'ignore'),
0, 0, filename) 0, 0, filename)
else: else:
m = self._coding_re.match(text.decode('utf-8', 'ignore')) m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m: if m:
@ -176,34 +207,35 @@ class Lexer(object):
if decode_raw: if decode_raw:
try: try:
text = text.decode(parsed_encoding) text = text.decode(parsed_encoding)
except UnicodeDecodeError, e: except UnicodeDecodeError:
raise exceptions.CompileException( raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" % "Unicode decode operation of encoding '%s' failed" %
parsed_encoding, parsed_encoding,
text.decode('utf-8', 'ignore'), text.decode('utf-8', 'ignore'),
0, 0, filename) 0, 0, filename)
return parsed_encoding, text return parsed_encoding, text
def parse(self): def parse(self):
self.encoding, self.text = self.decode_raw_stream(self.text, self.encoding, self.text = self.decode_raw_stream(
not self.disable_unicode, self.text,
self.encoding, not self.disable_unicode,
self.filename,) self.encoding,
self.filename)
for preproc in self.preprocessor: for preproc in self.preprocessor:
self.text = preproc(self.text) self.text = preproc(self.text)
# push the match marker past the # push the match marker past the
# encoding comment. # encoding comment.
self.match_reg(self._coding_re) self.match_reg(self._coding_re)
self.textlength = len(self.text) self.textlength = len(self.text)
while (True): while (True):
if self.match_position > self.textlength: if self.match_position > self.textlength:
break break
if self.match_end(): if self.match_end():
break break
if self.match_expression(): if self.match_expression():
@ -212,53 +244,56 @@ class Lexer(object):
continue continue
if self.match_comment(): if self.match_comment():
continue continue
if self.match_tag_start(): if self.match_tag_start():
continue continue
if self.match_tag_end(): if self.match_tag_end():
continue continue
if self.match_python_block(): if self.match_python_block():
continue continue
if self.match_text(): if self.match_text():
continue continue
if self.match_position > self.textlength: if self.match_position > self.textlength:
break break
raise exceptions.CompileException("assertion failed") raise exceptions.CompileException("assertion failed")
if len(self.tag): if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword, self.tag[-1].keyword,
**self.exception_kwargs) **self.exception_kwargs)
if len(self.control_line): if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" % raise exceptions.SyntaxException(
self.control_line[-1].keyword, "Unterminated control keyword: '%s'" %
self.text, self.control_line[-1].keyword,
self.control_line[-1].lineno, self.text,
self.control_line[-1].pos, self.filename) self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template return self.template
def match_tag_start(self): def match_tag_start(self):
match = self.match(r''' match = self.match(r'''
\<% # opening tag \<% # opening tag
([\w\.\:]+) # keyword ([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \
# sign, string expression
\s* # more whitespace \s* # more whitespace
(/)?> # closing (/)?> # closing
''', ''',
re.I | re.S | re.X) re.I | re.S | re.X)
if match: if match:
keyword, attr, isend = match.group(1), match.group(2), match.group(3) keyword, attr, isend = match.groups()
self.keyword = keyword self.keyword = keyword
attributes = {} attributes = {}
if attr: if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): for att in re.findall(
r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att key, val1, val2 = att
text = val1 or val2 text = val1 or val2
text = text.replace('\r\n', '\n') text = text.replace('\r\n', '\n')
@ -268,36 +303,36 @@ class Lexer(object):
self.tag.pop() self.tag.pop()
else: else:
if keyword == 'text': if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S) match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match: if not match:
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" % "Unclosed tag: <%%%s>" %
self.tag[-1].keyword, self.tag[-1].keyword,
**self.exception_kwargs) **self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1)) self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end() return self.match_tag_end()
return True return True
else: else:
return False return False
def match_tag_end(self): def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>') match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match: if match:
if not len(self.tag): if not len(self.tag):
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" % "Closing tag without opening tag: </%%%s>" %
match.group(1), match.group(1),
**self.exception_kwargs) **self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1): elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" % "Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword), (match.group(1), self.tag[-1].keyword),
**self.exception_kwargs) **self.exception_kwargs)
self.tag.pop() self.tag.pop()
return True return True
else: else:
return False return False
def match_end(self): def match_end(self):
match = self.match(r'\Z', re.S) match = self.match(r'\Z', re.S)
if match: if match:
@ -308,19 +343,17 @@ class Lexer(object):
return True return True
else: else:
return False return False
def match_text(self): def match_text(self):
match = self.match(r""" match = self.match(r"""
(.*?) # anything, followed by: (.*?) # anything, followed by:
( (
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based (?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a # comment preceded by a
# consumed newline and whitespace # consumed newline and whitespace
| |
(?=\${) # an expression (?=\${) # an expression
| |
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end (?=</?[%&]) # a substitution or block or call start or end
# - don't consume # - don't consume
| |
@ -328,7 +361,7 @@ class Lexer(object):
| |
\Z # end of string \Z # end of string
)""", re.X | re.S) )""", re.X | re.S)
if match: if match:
text = match.group(1) text = match.group(1)
if text: if text:
@ -336,43 +369,45 @@ class Lexer(object):
return True return True
else: else:
return False return False
def match_python_block(self): def match_python_block(self):
match = self.match(r"<%(!)?") match = self.match(r"<%(!)?")
if match: if match:
line, pos = self.matched_lineno, self.matched_charpos line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>') text, end = self.parse_until_text(False, r'%>')
# the trailing newline helps # the trailing newline helps
# compiler.parse() not complain about indentation # compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n" text = adjust_whitespace(text) + "\n"
self.append_node( self.append_node(
parsetree.Code, parsetree.Code,
text, text,
match.group(1)=='!', lineno=line, pos=pos) match.group(1) == '!', lineno=line, pos=pos)
return True return True
else: else:
return False return False
def match_expression(self): def match_expression(self):
match = self.match(r"\${") match = self.match(r"\${")
if match: if match:
line, pos = self.matched_lineno, self.matched_charpos line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}') text, end = self.parse_until_text(True, r'\|', r'}')
if end == '|': if end == '|':
escapes, end = self.parse_until_text(r'}') escapes, end = self.parse_until_text(True, r'}')
else: else:
escapes = "" escapes = ""
text = text.replace('\r\n', '\n') text = text.replace('\r\n', '\n')
self.append_node( self.append_node(
parsetree.Expression, parsetree.Expression,
text, escapes.strip(), text, escapes.strip(),
lineno=line, pos=pos) lineno=line, pos=pos)
return True return True
else: else:
return False return False
def match_control_line(self): def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M) match = self.match(
r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)"
r"(?:\r?\n|\Z)", re.M)
if match: if match:
operator = match.group(1) operator = match.group(1)
text = match.group(2) text = match.group(2)
@ -380,23 +415,23 @@ class Lexer(object):
m2 = re.match(r'(end)?(\w+)\s*(.*)', text) m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2: if not m2:
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Invalid control line: '%s'" % "Invalid control line: '%s'" %
text, text,
**self.exception_kwargs) **self.exception_kwargs)
isend, keyword = m2.group(1, 2) isend, keyword = m2.group(1, 2)
isend = (isend is not None) isend = (isend is not None)
if isend: if isend:
if not len(self.control_line): if not len(self.control_line):
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" % "No starting keyword '%s' for '%s'" %
(keyword, text), (keyword, text),
**self.exception_kwargs) **self.exception_kwargs)
elif self.control_line[-1].keyword != keyword: elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" % "Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword), (text, self.control_line[-1].keyword),
**self.exception_kwargs) **self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text) self.append_node(parsetree.ControlLine, keyword, isend, text)
else: else:
self.append_node(parsetree.Comment, text) self.append_node(parsetree.Comment, text)
@ -412,4 +447,3 @@ class Lexer(object):
return True return True
else: else:
return False return False

View File

@ -1,10 +1,13 @@
# mako/lookup.py # mako/lookup.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
import os, stat, posixpath, re import os
import stat
import posixpath
import re
from mako import exceptions, util from mako import exceptions, util
from mako.template import Template from mako.template import Template
@ -12,30 +15,32 @@ try:
import threading import threading
except: except:
import dummy_threading as threading import dummy_threading as threading
class TemplateCollection(object): class TemplateCollection(object):
"""Represent a collection of :class:`.Template` objects,
identifiable via uri. """Represent a collection of :class:`.Template` objects,
identifiable via URI.
A :class:`.TemplateCollection` is linked to the usage of A :class:`.TemplateCollection` is linked to the usage of
all template tags that address other templates, such all template tags that address other templates, such
as ``<%include>``, ``<%namespace>``, and ``<%inherit>``. as ``<%include>``, ``<%namespace>``, and ``<%inherit>``.
The ``file`` attribute of each of those tags refers The ``file`` attribute of each of those tags refers
to a string URI that is passed to that :class:`.Template` to a string URI that is passed to that :class:`.Template`
object's :class:`.TemplateCollection` for resolution. object's :class:`.TemplateCollection` for resolution.
:class:`.TemplateCollection` is an abstract class, :class:`.TemplateCollection` is an abstract class,
with the usual default implementation being :class:`.TemplateLookup`. with the usual default implementation being :class:`.TemplateLookup`.
""" """
def has_template(self, uri): def has_template(self, uri):
"""Return ``True`` if this :class:`.TemplateLookup` is """Return ``True`` if this :class:`.TemplateLookup` is
capable of returning a :class:`.Template` object for the capable of returning a :class:`.Template` object for the
given URL. given ``uri``.
:param uri: String URI of the template to be resolved.
:param uri: String uri of the template to be resolved.
""" """
try: try:
self.get_template(uri) self.get_template(uri)
@ -44,124 +49,140 @@ class TemplateCollection(object):
return False return False
def get_template(self, uri, relativeto=None): def get_template(self, uri, relativeto=None):
"""Return a :class:`.Template` object corresponding to the given """Return a :class:`.Template` object corresponding to the given
URL. ``uri``.
The default implementation raises The default implementation raises
:class:`.NotImplementedError`. Implementations should :class:`.NotImplementedError`. Implementations should
raise :class:`.TemplateLookupException` if the given uri raise :class:`.TemplateLookupException` if the given ``uri``
cannot be resolved. cannot be resolved.
:param uri: String uri of the template to be resolved. :param uri: String URI of the template to be resolved.
:param relativeto: if present, the given URI is assumed to :param relativeto: if present, the given ``uri`` is assumed to
be relative to this uri. be relative to this URI.
""" """
raise NotImplementedError() raise NotImplementedError()
def filename_to_uri(self, uri, filename): def filename_to_uri(self, uri, filename):
"""Convert the given filename to a uri relative to """Convert the given ``filename`` to a URI relative to
this TemplateCollection.""" this :class:`.TemplateCollection`."""
return uri return uri
def adjust_uri(self, uri, filename): def adjust_uri(self, uri, filename):
"""Adjust the given uri based on the calling filename. """Adjust the given ``uri`` based on the calling ``filename``.
When this method is called from the runtime, the When this method is called from the runtime, the
'filename' parameter is taken directly to the 'filename' ``filename`` parameter is taken directly to the ``filename``
attribute of the calling template. Therefore a custom attribute of the calling template. Therefore a custom
TemplateCollection subclass can place any string :class:`.TemplateCollection` subclass can place any string
identifier desired in the "filename" parameter of the identifier desired in the ``filename`` parameter of the
Template objects it constructs and have them come back :class:`.Template` objects it constructs and have them come back
here. here.
""" """
return uri return uri
class TemplateLookup(TemplateCollection): class TemplateLookup(TemplateCollection):
"""Represent a collection of templates that locates template source files """Represent a collection of templates that locates template source files
from the local filesystem. from the local filesystem.
The primary argument is the ``directories`` argument, the list of The primary argument is the ``directories`` argument, the list of
directories to search:: directories to search:
.. sourcecode:: python
lookup = TemplateLookup(["/path/to/templates"]) lookup = TemplateLookup(["/path/to/templates"])
some_template = lookup.get_template("/index.html") some_template = lookup.get_template("/index.html")
The :class:`.TemplateLookup` can also be given :class:`.Template` objects The :class:`.TemplateLookup` can also be given :class:`.Template` objects
programatically using :meth:`.put_string` or :meth:`.put_template`:: programatically using :meth:`.put_string` or :meth:`.put_template`:
.. sourcecode:: python
lookup = TemplateLookup() lookup = TemplateLookup()
lookup.put_string("base.html", ''' lookup.put_string("base.html", '''
<html><body>${self.next()}</body></html> <html><body>${self.next()}</body></html>
''') ''')
lookup.put_string("hello.html", ''' lookup.put_string("hello.html", '''
<%include file='base.html'/> <%include file='base.html'/>
Hello, world ! Hello, world !
''') ''')
:param directories: A list of directory names which will be :param directories: A list of directory names which will be
searched for a particular template URI. The URI is appended searched for a particular template URI. The URI is appended
to each directory and the filesystem checked. to each directory and the filesystem checked.
:param collection_size: Approximate size of the collection used :param collection_size: Approximate size of the collection used
to store templates. If left at its default of -1, the size to store templates. If left at its default of ``-1``, the size
is unbounded, and a plain Python dictionary is used to is unbounded, and a plain Python dictionary is used to
relate URI strings to :class:`.Template` instances. relate URI strings to :class:`.Template` instances.
Otherwise, a least-recently-used cache object is used which Otherwise, a least-recently-used cache object is used which
will maintain the size of the collection approximately to will maintain the size of the collection approximately to
the number given. the number given.
:param filesystem_checks: When at its default value of ``True``, :param filesystem_checks: When at its default value of ``True``,
each call to :meth:`TemplateLookup.get_template()` will each call to :meth:`.TemplateLookup.get_template()` will
compare the filesystem last modified time to the time in compare the filesystem last modified time to the time in
which an existing :class:`.Template` object was created. which an existing :class:`.Template` object was created.
This allows the :class:`.TemplateLookup` to regenerate a This allows the :class:`.TemplateLookup` to regenerate a
new :class:`.Template` whenever the original source has new :class:`.Template` whenever the original source has
been updated. Set this to ``False`` for a very minor been updated. Set this to ``False`` for a very minor
performance increase. performance increase.
:param modulename_callable: A callable which, when present, :param modulename_callable: A callable which, when present,
is passed the path of the source file as well as the is passed the path of the source file as well as the
requested URI, and then returns the full path of the requested URI, and then returns the full path of the
generated Python module file. This is used to inject generated Python module file. This is used to inject
alternate schemes for Pyhton module location. If left at alternate schemes for Python module location. If left at
its default of ``None``, the built in system of generation its default of ``None``, the built in system of generation
based on ``module_directory`` plus ``uri`` is used. based on ``module_directory`` plus ``uri`` is used.
All other keyword parameters available for All other keyword parameters available for
:class:`.Template` are mirrored here. When new :class:`.Template` are mirrored here. When new
:class:`.Template` objects are created, the keywords :class:`.Template` objects are created, the keywords
established with this :class:`.TemplateLookup` are passed on established with this :class:`.TemplateLookup` are passed on
to each new :class:`.Template`. to each new :class:`.Template`.
""" """
def __init__(self, def __init__(self,
directories=None, directories=None,
module_directory=None, module_directory=None,
filesystem_checks=True, filesystem_checks=True,
collection_size=-1, collection_size=-1,
format_exceptions=False, format_exceptions=False,
error_handler=None, error_handler=None,
disable_unicode=False, disable_unicode=False,
bytestring_passthrough=False, bytestring_passthrough=False,
output_encoding=None, output_encoding=None,
encoding_errors='strict', encoding_errors='strict',
cache_type=None,
cache_dir=None, cache_url=None, cache_args=None,
cache_enabled=True, cache_impl='beaker',
modulename_callable=None, cache_enabled=True,
default_filters=None, cache_type=None,
buffer_filters=(), cache_dir=None,
strict_undefined=False, cache_url=None,
imports=None,
input_encoding=None, modulename_callable=None,
preprocessor=None): module_writer=None,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
future_imports=None,
enable_loop=True,
input_encoding=None,
preprocessor=None,
lexer_cls=None,
include_error_handler=None):
self.directories = [posixpath.normpath(d) for d in self.directories = [posixpath.normpath(d) for d in
util.to_list(directories, ()) util.to_list(directories, ())
] ]
@ -170,24 +191,39 @@ class TemplateLookup(TemplateCollection):
self.filesystem_checks = filesystem_checks self.filesystem_checks = filesystem_checks
self.collection_size = collection_size self.collection_size = collection_size
if cache_args is None:
cache_args = {}
# transfer deprecated cache_* args
if cache_dir:
cache_args.setdefault('dir', cache_dir)
if cache_url:
cache_args.setdefault('url', cache_url)
if cache_type:
cache_args.setdefault('type', cache_type)
self.template_args = { self.template_args = {
'format_exceptions':format_exceptions, 'format_exceptions': format_exceptions,
'error_handler':error_handler, 'error_handler': error_handler,
'disable_unicode':disable_unicode, 'include_error_handler': include_error_handler,
'bytestring_passthrough':bytestring_passthrough, 'disable_unicode': disable_unicode,
'output_encoding':output_encoding, 'bytestring_passthrough': bytestring_passthrough,
'encoding_errors':encoding_errors, 'output_encoding': output_encoding,
'input_encoding':input_encoding, 'cache_impl': cache_impl,
'module_directory':module_directory, 'encoding_errors': encoding_errors,
'cache_type':cache_type, 'input_encoding': input_encoding,
'cache_dir':cache_dir or module_directory, 'module_directory': module_directory,
'cache_url':cache_url, 'module_writer': module_writer,
'cache_enabled':cache_enabled, 'cache_args': cache_args,
'default_filters':default_filters, 'cache_enabled': cache_enabled,
'buffer_filters':buffer_filters, 'default_filters': default_filters,
'strict_undefined':strict_undefined, 'buffer_filters': buffer_filters,
'imports':imports, 'strict_undefined': strict_undefined,
'preprocessor':preprocessor} 'imports': imports,
'future_imports': future_imports,
'enable_loop': enable_loop,
'preprocessor': preprocessor,
'lexer_cls': lexer_cls
}
if collection_size == -1: if collection_size == -1:
self._collection = {} self._collection = {}
@ -196,15 +232,16 @@ class TemplateLookup(TemplateCollection):
self._collection = util.LRUCache(collection_size) self._collection = util.LRUCache(collection_size)
self._uri_cache = util.LRUCache(collection_size) self._uri_cache = util.LRUCache(collection_size)
self._mutex = threading.Lock() self._mutex = threading.Lock()
def get_template(self, uri): def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given """Return a :class:`.Template` object corresponding to the given
URL. ``uri``.
Note the "relativeto" argument is not supported here at the moment. .. note:: The ``relativeto`` argument is not supported here at
the moment.
""" """
try: try:
if self.filesystem_checks: if self.filesystem_checks:
return self._check(uri, self._collection[uri]) return self._check(uri, self._collection[uri])
@ -213,33 +250,36 @@ class TemplateLookup(TemplateCollection):
except KeyError: except KeyError:
u = re.sub(r'^\/+', '', uri) u = re.sub(r'^\/+', '', uri)
for dir in self.directories: for dir in self.directories:
# make sure the path seperators are posix - os.altsep is empty
# on POSIX and cannot be used.
dir = dir.replace(os.path.sep, posixpath.sep)
srcfile = posixpath.normpath(posixpath.join(dir, u)) srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile): if os.path.isfile(srcfile):
return self._load(srcfile, uri) return self._load(srcfile, uri)
else: else:
raise exceptions.TopLevelLookupException( raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri) "Cant locate template for uri %r" % uri)
def adjust_uri(self, uri, relativeto): def adjust_uri(self, uri, relativeto):
"""adjust the given uri based on the given relative uri.""" """Adjust the given ``uri`` based on the given relative URI."""
key = (uri, relativeto) key = (uri, relativeto)
if key in self._uri_cache: if key in self._uri_cache:
return self._uri_cache[key] return self._uri_cache[key]
if uri[0] != '/': if uri[0] != '/':
if relativeto is not None: if relativeto is not None:
v = self._uri_cache[key] = posixpath.join(posixpath.dirname(relativeto), uri) v = self._uri_cache[key] = posixpath.join(
posixpath.dirname(relativeto), uri)
else: else:
v = self._uri_cache[key] = '/' + uri v = self._uri_cache[key] = '/' + uri
else: else:
v = self._uri_cache[key] = uri v = self._uri_cache[key] = uri
return v return v
def filename_to_uri(self, filename): def filename_to_uri(self, filename):
"""Convert the given filename to a uri relative to """Convert the given ``filename`` to a URI relative to
this TemplateCollection.""" this :class:`.TemplateCollection`."""
try: try:
return self._uri_cache[filename] return self._uri_cache[filename]
@ -247,25 +287,25 @@ class TemplateLookup(TemplateCollection):
value = self._relativeize(filename) value = self._relativeize(filename)
self._uri_cache[filename] = value self._uri_cache[filename] = value
return value return value
def _relativeize(self, filename): def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative' """Return the portion of a filename that is 'relative'
to the directories in this lookup. to the directories in this lookup.
""" """
filename = posixpath.normpath(filename) filename = posixpath.normpath(filename)
for dir in self.directories: for dir in self.directories:
if filename[0:len(dir)] == dir: if filename[0:len(dir)] == dir:
return filename[len(dir):] return filename[len(dir):]
else: else:
return None return None
def _load(self, filename, uri): def _load(self, filename, uri):
self._mutex.acquire() self._mutex.acquire()
try: try:
try: try:
# try returning from collection one # try returning from collection one
# more time in case concurrent thread already loaded # more time in case concurrent thread already loaded
return self._collection[uri] return self._collection[uri]
except KeyError: except KeyError:
@ -276,21 +316,21 @@ class TemplateLookup(TemplateCollection):
else: else:
module_filename = None module_filename = None
self._collection[uri] = template = Template( self._collection[uri] = template = Template(
uri=uri, uri=uri,
filename=posixpath.normpath(filename), filename=posixpath.normpath(filename),
lookup=self, lookup=self,
module_filename=module_filename, module_filename=module_filename,
**self.template_args) **self.template_args)
return template return template
except: except:
# if compilation fails etc, ensure # if compilation fails etc, ensure
# template is removed from collection, # template is removed from collection,
# re-raise # re-raise
self._collection.pop(uri, None) self._collection.pop(uri, None)
raise raise
finally: finally:
self._mutex.release() self._mutex.release()
def _check(self, uri, template): def _check(self, uri, template):
if template.filename is None: if template.filename is None:
return template return template
@ -298,7 +338,7 @@ class TemplateLookup(TemplateCollection):
try: try:
template_stat = os.stat(template.filename) template_stat = os.stat(template.filename)
if template.module._modified_time < \ if template.module._modified_time < \
template_stat[stat.ST_MTIME]: template_stat[stat.ST_MTIME]:
self._collection.pop(uri, None) self._collection.pop(uri, None)
return self._load(template.filename, uri) return self._load(template.filename, uri)
else: else:
@ -306,26 +346,24 @@ class TemplateLookup(TemplateCollection):
except OSError: except OSError:
self._collection.pop(uri, None) self._collection.pop(uri, None)
raise exceptions.TemplateLookupException( raise exceptions.TemplateLookupException(
"Cant locate template for uri %r" % uri) "Cant locate template for uri %r" % uri)
def put_string(self, uri, text): def put_string(self, uri, text):
"""Place a new :class:`.Template` object into this """Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given string of :class:`.TemplateLookup`, based on the given string of
text. ``text``.
""" """
self._collection[uri] = Template( self._collection[uri] = Template(
text, text,
lookup=self, lookup=self,
uri=uri, uri=uri,
**self.template_args) **self.template_args)
def put_template(self, uri, template): def put_template(self, uri, template):
"""Place a new :class:`.Template` object into this """Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given :class:`.TemplateLookup`, based on the given
:class:`.Template` object. :class:`.Template` object.
""" """
self._collection[uri] = template self._collection[uri] = template

View File

@ -1,127 +1,145 @@
# mako/parsetree.py # mako/parsetree.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates.""" """defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters from mako import exceptions, ast, util, filters, compat
import re import re
class Node(object): class Node(object):
"""base class for a Node in the parse tree.""" """base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename): def __init__(self, source, lineno, pos, filename):
self.source = source self.source = source
self.lineno = lineno self.lineno = lineno
self.pos = pos self.pos = pos
self.filename = filename self.filename = filename
@property @property
def exception_kwargs(self): def exception_kwargs(self):
return {'source':self.source, 'lineno':self.lineno, return {'source': self.source, 'lineno': self.lineno,
'pos':self.pos, 'filename':self.filename} 'pos': self.pos, 'filename': self.filename}
def get_children(self): def get_children(self):
return [] return []
def accept_visitor(self, visitor): def accept_visitor(self, visitor):
def traverse(node): def traverse(node):
for n in node.get_children(): for n in node.get_children():
n.accept_visitor(visitor) n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse) method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self) method(self)
class TemplateNode(Node): class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes.""" """a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename): def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename) super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = [] self.nodes = []
self.page_attributes = {} self.page_attributes = {}
def get_children(self): def get_children(self):
return self.nodes return self.nodes
def __repr__(self): def __repr__(self):
return "TemplateNode(%s, %r)" % ( return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes), util.sorted_dict_repr(self.page_attributes),
self.nodes) self.nodes)
class ControlLine(Node): class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag. """defines a control line, a line-oriented python line or end tag.
e.g.:: e.g.::
% if foo: % if foo:
(markup) (markup)
% endif % endif
""" """
has_loop_context = False
def __init__(self, keyword, isend, text, **kwargs): def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs) super(ControlLine, self).__init__(**kwargs)
self.text = text self.text = text
self.keyword = keyword self.keyword = keyword
self.isend = isend self.isend = isend
self.is_primary = keyword in ['for','if', 'while', 'try'] self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with']
self.nodes = []
if self.isend: if self.isend:
self._declared_identifiers = [] self._declared_identifiers = []
self._undeclared_identifiers = [] self._undeclared_identifiers = []
else: else:
code = ast.PythonFragment(text, **self.exception_kwargs) code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers self._undeclared_identifiers = code.undeclared_identifiers
def get_children(self):
return self.nodes
def declared_identifiers(self): def declared_identifiers(self):
return self._declared_identifiers return self._declared_identifiers
def undeclared_identifiers(self): def undeclared_identifiers(self):
return self._undeclared_identifiers return self._undeclared_identifiers
def is_ternary(self, keyword): def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword """return true if the given keyword is a ternary keyword
for this ControlLine""" for this ControlLine"""
return keyword in { return keyword in {
'if':set(['else', 'elif']), 'if': set(['else', 'elif']),
'try':set(['except', 'finally']), 'try': set(['except', 'finally']),
'for':set(['else']) 'for': set(['else'])
}.get(self.keyword, []) }.get(self.keyword, [])
def __repr__(self): def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % ( return "ControlLine(%r, %r, %r, %r)" % (
self.keyword, self.keyword,
self.text, self.text,
self.isend, self.isend,
(self.lineno, self.pos) (self.lineno, self.pos)
) )
class Text(Node): class Text(Node):
"""defines plain text in the template.""" """defines plain text in the template."""
def __init__(self, content, **kwargs): def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs) super(Text, self).__init__(**kwargs)
self.content = content self.content = content
def __repr__(self): def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos)) return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node): class Code(Node):
"""defines a Python code block, either inline or module level. """defines a Python code block, either inline or module level.
e.g.:: e.g.::
inline: inline:
<% <%
x = 12 x = 12
%> %>
module level: module level:
<%! <%!
import logger import logger
%> %>
""" """
def __init__(self, text, ismodule, **kwargs): def __init__(self, text, ismodule, **kwargs):
@ -138,32 +156,36 @@ class Code(Node):
def __repr__(self): def __repr__(self):
return "Code(%r, %r, %r)" % ( return "Code(%r, %r, %r)" % (
self.text, self.text,
self.ismodule, self.ismodule,
(self.lineno, self.pos) (self.lineno, self.pos)
) )
class Comment(Node): class Comment(Node):
"""defines a comment line. """defines a comment line.
# this is a comment # this is a comment
""" """
def __init__(self, text, **kwargs): def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs) super(Comment, self).__init__(**kwargs)
self.text = text self.text = text
def __repr__(self): def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos)) return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node): class Expression(Node):
"""defines an inline expression. """defines an inline expression.
${x+y} ${x+y}
""" """
def __init__(self, text, escapes, **kwargs): def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs) super(Expression, self).__init__(**kwargs)
self.text = text self.text = text
@ -177,81 +199,83 @@ class Expression(Node):
def undeclared_identifiers(self): def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time # TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union( return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference( self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys()) set(filters.DEFAULT_ESCAPES.keys())
) )
).difference(self.code.declared_identifiers) ).difference(self.code.declared_identifiers)
def __repr__(self): def __repr__(self):
return "Expression(%r, %r, %r)" % ( return "Expression(%r, %r, %r)" % (
self.text, self.text,
self.escapes_code.args, self.escapes_code.args,
(self.lineno, self.pos) (self.lineno, self.pos)
) )
class _TagMeta(type): class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to """metaclass to allow Tag to produce a subclass according to
its keyword""" its keyword"""
_classmap = {} _classmap = {}
def __init__(cls, clsname, bases, dict): def __init__(cls, clsname, bases, dict):
if cls.__keyword__ is not None: if getattr(cls, '__keyword__', None) is not None:
cls._classmap[cls.__keyword__] = cls cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict) super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs): def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword: if ":" in keyword:
ns, defname = keyword.split(':') ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname, return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs) attributes, **kwargs)
try: try:
cls = _TagMeta._classmap[keyword] cls = _TagMeta._classmap[keyword]
except KeyError: except KeyError:
raise exceptions.CompileException( raise exceptions.CompileException(
"No such tag: '%s'" % keyword, "No such tag: '%s'" % keyword,
source=kwargs['source'], source=kwargs['source'],
lineno=kwargs['lineno'], lineno=kwargs['lineno'],
pos=kwargs['pos'], pos=kwargs['pos'],
filename=kwargs['filename'] filename=kwargs['filename']
) )
return type.__call__(cls, keyword, attributes, **kwargs) return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(Node):
class Tag(compat.with_metaclass(_TagMeta, Node)):
"""abstract base class for tags. """abstract base class for tags.
<%sometag/> <%sometag/>
<%someothertag> <%someothertag>
stuff stuff
</%someothertag> </%someothertag>
""" """
__metaclass__ = _TagMeta
__keyword__ = None __keyword__ = None
def __init__(self, keyword, attributes, expressions, def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs): nonexpressions, required, **kwargs):
"""construct a new Tag instance. """construct a new Tag instance.
this constructor not called directly, and is only called this constructor not called directly, and is only called
by subclasses. by subclasses.
:param keyword: the tag keyword :param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs :param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes, :param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal :param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions attributes, which cannot contain embedded expressions
:param \**kwargs: :param \**kwargs:
other arguments passed to the Node superclass (lineno, pos) other arguments passed to the Node superclass (lineno, pos)
""" """
super(Tag, self).__init__(**kwargs) super(Tag, self).__init__(**kwargs)
self.keyword = keyword self.keyword = keyword
@ -260,18 +284,18 @@ class Tag(Node):
missing = [r for r in required if r not in self.parsed_attributes] missing = [r for r in required if r not in self.parsed_attributes]
if len(missing): if len(missing):
raise exceptions.CompileException( raise exceptions.CompileException(
"Missing attribute(s): %s" % "Missing attribute(s): %s" %
",".join([repr(m) for m in missing]), ",".join([repr(m) for m in missing]),
**self.exception_kwargs) **self.exception_kwargs)
self.parent = None self.parent = None
self.nodes = [] self.nodes = []
def is_root(self): def is_root(self):
return self.parent is None return self.parent is None
def get_children(self): def get_children(self):
return self.nodes return self.nodes
def _parse_attributes(self, expressions, nonexpressions): def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set() undeclared_identifiers = set()
self.parsed_attributes = {} self.parsed_attributes = {}
@ -283,14 +307,14 @@ class Tag(Node):
m = re.compile(r'^\${(.+?)}$', re.S).match(x) m = re.compile(r'^\${(.+?)}$', re.S).match(x)
if m: if m:
code = ast.PythonCode(m.group(1).rstrip(), code = ast.PythonCode(m.group(1).rstrip(),
**self.exception_kwargs) **self.exception_kwargs)
# we aren't discarding "declared_identifiers" here, # we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared # which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a # variables aren't counted. As yet can't find a
# condition that requires it here. # condition that requires it here.
undeclared_identifiers = \ undeclared_identifiers = \
undeclared_identifiers.union( undeclared_identifiers.union(
code.undeclared_identifiers) code.undeclared_identifiers)
expr.append('(%s)' % m.group(1)) expr.append('(%s)' % m.group(1))
else: else:
if x: if x:
@ -299,15 +323,15 @@ class Tag(Node):
elif key in nonexpressions: elif key in nonexpressions:
if re.search(r'\${.+?}', self.attributes[key]): if re.search(r'\${.+?}', self.attributes[key]):
raise exceptions.CompileException( raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded " "Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword), "expressions" % (key, self.keyword),
**self.exception_kwargs) **self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key]) self.parsed_attributes[key] = repr(self.attributes[key])
else: else:
raise exceptions.CompileException( raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'" % "Invalid attribute for tag '%s': '%s'" %
(self.keyword, key), (self.keyword, key),
**self.exception_kwargs) **self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self): def declared_identifiers(self):
@ -317,49 +341,51 @@ class Tag(Node):
return self.expression_undeclared_identifiers return self.expression_undeclared_identifiers
def __repr__(self): def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__, return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword, self.keyword,
util.sorted_dict_repr(self.attributes), util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos), (self.lineno, self.pos),
self.nodes self.nodes
) )
class IncludeTag(Tag): class IncludeTag(Tag):
__keyword__ = 'include' __keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__( super(IncludeTag, self).__init__(
keyword, keyword,
attributes, attributes,
('file', 'import', 'args'), ('file', 'import', 'args'),
(), ('file',), **kwargs) (), ('file',), **kwargs)
self.page_args = ast.PythonCode( self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get('args', ''), "__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs) **self.exception_kwargs)
def declared_identifiers(self): def declared_identifiers(self):
return [] return []
def undeclared_identifiers(self): def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.\ identifiers = self.page_args.undeclared_identifiers.\
difference(set(["__DUMMY"])).\ difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers) difference(self.page_args.declared_identifiers)
return identifiers.union(super(IncludeTag, self). return identifiers.union(super(IncludeTag, self).
undeclared_identifiers()) undeclared_identifiers())
class NamespaceTag(Tag): class NamespaceTag(Tag):
__keyword__ = 'namespace' __keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__( super(NamespaceTag, self).__init__(
keyword, attributes, keyword, attributes,
('file',), ('file',),
('name','inheritable', ('name', 'inheritable',
'import','module'), 'import', 'module'),
(), **kwargs) (), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self)))) self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes: if 'name' not in attributes and 'import' not in attributes:
raise exceptions.CompileException( raise exceptions.CompileException(
"'name' and/or 'import' attributes are required " "'name' and/or 'import' attributes are required "
"for <%namespace>", "for <%namespace>",
@ -373,42 +399,53 @@ class NamespaceTag(Tag):
def declared_identifiers(self): def declared_identifiers(self):
return [] return []
class TextTag(Tag): class TextTag(Tag):
__keyword__ = 'text' __keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__( super(TextTag, self).__init__(
keyword, keyword,
attributes, (), attributes, (),
('filter'), (), **kwargs) ('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList( self.filter_args = ast.ArgumentList(
attributes.get('filter', ''), attributes.get('filter', ''),
**self.exception_kwargs) **self.exception_kwargs)
def undeclared_identifiers(self):
return self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys()).union(
self.expression_undeclared_identifiers
)
class DefTag(Tag): class DefTag(Tag):
__keyword__ = 'def' __keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached'] + [
c for c in attributes if c.startswith('cache_')]
super(DefTag, self).__init__( super(DefTag, self).__init__(
keyword, keyword,
attributes, attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout', expressions,
'cache_type', 'cache_dir', 'cache_url'), ('name', 'filter', 'decorator'),
('name','filter', 'decorator'), ('name',),
('name',), **kwargs)
**kwargs)
name = attributes['name'] name = attributes['name']
if re.match(r'^[\w_]+$',name): if re.match(r'^[\w_]+$', name):
raise exceptions.CompileException( raise exceptions.CompileException(
"Missing parenthesis in %def", "Missing parenthesis in %def",
**self.exception_kwargs) **self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass", self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs) **self.exception_kwargs)
self.name = self.function_decl.funcname self.name = self.function_decl.funcname
self.decorator = attributes.get('decorator', '') self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList( self.filter_args = ast.ArgumentList(
attributes.get('filter', ''), attributes.get('filter', ''),
**self.exception_kwargs) **self.exception_kwargs)
is_anonymous = False is_anonymous = False
is_block = False is_block = False
@ -421,49 +458,56 @@ class DefTag(Tag):
return self.function_decl.get_argument_expressions(**kw) return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self): def declared_identifiers(self):
return self.function_decl.argnames return self.function_decl.allargnames
def undeclared_identifiers(self): def undeclared_identifiers(self):
res = [] res = []
for c in self.function_decl.defaults: for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs). res += list(ast.PythonCode(c, **self.exception_kwargs).
undeclared_identifiers) undeclared_identifiers)
return res + list(self.filter_args.\ return set(res).union(
undeclared_identifiers.\ self.filter_args.
difference(filters.DEFAULT_ESCAPES.keys()) undeclared_identifiers.
) difference(filters.DEFAULT_ESCAPES.keys())
).union(
self.expression_undeclared_identifiers
).difference(
self.function_decl.allargnames
)
class BlockTag(Tag): class BlockTag(Tag):
__keyword__ = 'block' __keyword__ = 'block'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached', 'args'] + [
c for c in attributes if c.startswith('cache_')]
super(BlockTag, self).__init__( super(BlockTag, self).__init__(
keyword, keyword,
attributes, attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout', expressions,
'cache_type', 'cache_dir', 'cache_url', 'args'), ('name', 'filter', 'decorator'),
('name','filter', 'decorator'), (),
(), **kwargs)
**kwargs)
name = attributes.get('name') name = attributes.get('name')
if name and not re.match(r'^[\w_]+$',name): if name and not re.match(r'^[\w_]+$', name):
raise exceptions.CompileException( raise exceptions.CompileException(
"%block may not specify an argument signature", "%block may not specify an argument signature",
**self.exception_kwargs) **self.exception_kwargs)
if not name and attributes.get('args', None): if not name and attributes.get('args', None):
raise exceptions.CompileException( raise exceptions.CompileException(
"Only named %blocks may specify args", "Only named %blocks may specify args",
**self.exception_kwargs **self.exception_kwargs
) )
self.body_decl = ast.FunctionArgs(attributes.get('args', ''), self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs) **self.exception_kwargs)
self.name = name self.name = name
self.decorator = attributes.get('decorator', '') self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList( self.filter_args = ast.ArgumentList(
attributes.get('filter', ''), attributes.get('filter', ''),
**self.exception_kwargs) **self.exception_kwargs)
is_block = True is_block = True
@ -479,87 +523,94 @@ class BlockTag(Tag):
return self.body_decl.get_argument_expressions(**kw) return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self): def declared_identifiers(self):
return self.body_decl.argnames return self.body_decl.allargnames
def undeclared_identifiers(self): def undeclared_identifiers(self):
return [] return (self.filter_args.
undeclared_identifiers.
difference(filters.DEFAULT_ESCAPES.keys())
).union(self.expression_undeclared_identifiers)
class CallTag(Tag): class CallTag(Tag):
__keyword__ = 'call' __keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes, super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs) ('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr'] self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs) self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''), self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs) **self.exception_kwargs)
def declared_identifiers(self): def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames) return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self): def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\ return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers) difference(self.code.declared_identifiers)
class CallNamespaceTag(Tag): class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs): def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__( super(CallNamespaceTag, self).__init__(
namespace + ":" + defname, namespace + ":" + defname,
attributes, attributes,
tuple(attributes.keys()) + ('args', ), tuple(attributes.keys()) + ('args', ),
(), (),
(), (),
**kwargs) **kwargs)
self.expression = "%s.%s(%s)" % ( self.expression = "%s.%s(%s)" % (
namespace, namespace,
defname, defname,
",".join(["%s=%s" % (k, v) for k, v in ",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.iteritems() self.parsed_attributes.items()
if k != 'args']) if k != 'args'])
) )
self.code = ast.PythonCode(self.expression, **self.exception_kwargs) self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs( self.body_decl = ast.FunctionArgs(
attributes.get('args', ''), attributes.get('args', ''),
**self.exception_kwargs) **self.exception_kwargs)
def declared_identifiers(self): def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames) return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self): def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\ return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers) difference(self.code.declared_identifiers)
class InheritTag(Tag): class InheritTag(Tag):
__keyword__ = 'inherit' __keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__( super(InheritTag, self).__init__(
keyword, attributes, keyword, attributes,
('file',), (), ('file',), **kwargs) ('file',), (), ('file',), **kwargs)
class PageTag(Tag): class PageTag(Tag):
__keyword__ = 'page' __keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs): def __init__(self, keyword, attributes, **kwargs):
expressions = \
['cached', 'args', 'expression_filter', 'enable_loop'] + \
[c for c in attributes if c.startswith('cache_')]
super(PageTag, self).__init__( super(PageTag, self).__init__(
keyword, keyword,
attributes, attributes,
('cached', 'cache_key', 'cache_timeout', expressions,
'cache_type', 'cache_dir', 'cache_url', (),
'args', 'expression_filter'), (),
(), **kwargs)
(), self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**kwargs) **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.filter_args = ast.ArgumentList( self.filter_args = ast.ArgumentList(
attributes.get('expression_filter', ''), attributes.get('expression_filter', ''),
**self.exception_kwargs) **self.exception_kwargs)
def declared_identifiers(self): def declared_identifiers(self):
return self.body_decl.argnames return self.body_decl.allargnames

View File

@ -1,61 +1,78 @@
# mako/pygen.py # mako/pygen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code.""" """utilities for generating and formatting literal Python code."""
import re, string import re
from StringIO import StringIO
from mako import exceptions from mako import exceptions
class PythonPrinter(object): class PythonPrinter(object):
def __init__(self, stream): def __init__(self, stream):
# indentation counter # indentation counter
self.indent = 0 self.indent = 0
# a stack storing information about why we incremented # a stack storing information about why we incremented
# the indentation counter, to help us determine if we # the indentation counter, to help us determine if we
# should decrement it # should decrement it
self.indent_detail = [] self.indent_detail = []
# the string of whitespace multiplied by the indent # the string of whitespace multiplied by the indent
# counter to produce a line # counter to produce a line
self.indentstring = " " self.indentstring = " "
# the stream we are writing to # the stream we are writing to
self.stream = stream self.stream = stream
# current line number
self.lineno = 1
# a list of lines that represents a buffered "block" of code, # a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level # which can be later printed relative to an indent level
self.line_buffer = [] self.line_buffer = []
self.in_indent_lines = False self.in_indent_lines = False
self._reset_multi_line_flags() self._reset_multi_line_flags()
def write(self, text): # mapping of generated python lines to template
self.stream.write(text) # source lines
self.source_map = {}
def _update_lineno(self, num):
self.lineno += num
def start_source(self, lineno):
if self.lineno not in self.source_map:
self.source_map[self.lineno] = lineno
def write_blanks(self, num):
self.stream.write("\n" * num)
self._update_lineno(num)
def write_indented_block(self, block): def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation. """print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of The indentation of the total block of lines will be adjusted to that of
the current indent level.""" the current indent level."""
self.in_indent_lines = False self.in_indent_lines = False
for l in re.split(r'\r?\n', block): for l in re.split(r'\r?\n', block):
self.line_buffer.append(l) self.line_buffer.append(l)
self._update_lineno(1)
def writelines(self, *lines): def writelines(self, *lines):
"""print a series of lines of python.""" """print a series of lines of python."""
for line in lines: for line in lines:
self.writeline(line) self.writeline(line)
def writeline(self, line): def writeline(self, line):
"""print a line of python, indenting it according to the current """print a line of python, indenting it according to the current
indent level. indent level.
this also adjusts the indentation counter according to the this also adjusts the indentation counter according to the
content of the line. content of the line.
@ -65,42 +82,42 @@ class PythonPrinter(object):
self._flush_adjusted_lines() self._flush_adjusted_lines()
self.in_indent_lines = True self.in_indent_lines = True
decreased_indent = False if (
line is None or
if (line is None or re.match(r"^\s*#", line) or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line) re.match(r"^\s*$", line)
): ):
hastext = False hastext = False
else: else:
hastext = True hastext = True
is_comment = line and len(line) and line[0] == '#' is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level # see if this line should decrease the indentation level
if (not decreased_indent and if (
not is_comment and not is_comment and
(not hastext or self._is_unindentor(line)) (not hastext or self._is_unindentor(line))
): ):
if self.indent > 0: if self.indent > 0:
self.indent -=1 self.indent -= 1
# if the indent_detail stack is empty, the user # if the indent_detail stack is empty, the user
# probably put extra closures - the resulting # probably put extra closures - the resulting
# module wont compile. # module wont compile.
if len(self.indent_detail) == 0: if len(self.indent_detail) == 0:
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"Too many whitespace closures") "Too many whitespace closures")
self.indent_detail.pop() self.indent_detail.pop()
if line is None: if line is None:
return return
# write the line # write the line
self.stream.write(self._indent_line(line) + "\n") self.stream.write(self._indent_line(line) + "\n")
self._update_lineno(len(line.split("\n")))
# see if this line should increase the indentation level. # see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and # note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level. # then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line): if re.search(r":[ \t]*(?:#.*)?$", line):
@ -108,18 +125,19 @@ class PythonPrinter(object):
# keep track of what the keyword was that indented us, # keep track of what the keyword was that indented us,
# if it is a python compound statement keyword # if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword # where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for)", line) match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match: if match:
# its a "compound" keyword, so we will check for "unindentors" # its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1) indentor = match.group(1)
self.indent +=1 self.indent += 1
self.indent_detail.append(indentor) self.indent_detail.append(indentor)
else: else:
indentor = None indentor = None
# its not a "compound" keyword. but lets also # its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us, # test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line # else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line) m2 = re.match(r"^\s*(def|class|else|elif|except|finally)",
line)
if m2: if m2:
self.indent += 1 self.indent += 1
self.indent_detail.append(indentor) self.indent_detail.append(indentor)
@ -127,53 +145,53 @@ class PythonPrinter(object):
def close(self): def close(self):
"""close this printer, flushing any remaining lines.""" """close this printer, flushing any remaining lines."""
self._flush_adjusted_lines() self._flush_adjusted_lines()
def _is_unindentor(self, line): def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor', """return true if the given line is an 'unindentor',
relative to the last 'indent' event received. relative to the last 'indent' event received.
""" """
# no indentation detail has been pushed on; return False # no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0: if len(self.indent_detail) == 0:
return False return False
indentor = self.indent_detail[-1] indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a # the last indent keyword we grabbed is not a
# compound statement keyword; return False # compound statement keyword; return False
if indentor is None: if indentor is None:
return False return False
# if the current line doesnt have one of the "unindentor" keywords, # if the current line doesnt have one of the "unindentor" keywords,
# return False # return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line) match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match: if not match:
return False return False
# whitespace matches up, we have a compound indentor, # whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this # and this line has an unindentor, this
# is probably good enough # is probably good enough
return True return True
# should we decide that its not good enough, heres # should we decide that its not good enough, heres
# more stuff to check. # more stuff to check.
#keyword = match.group(1) # keyword = match.group(1)
# match the original indent keyword # match the original indent keyword
#for crit in [ # for crit in [
# (r'if|elif', r'else|elif'), # (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'), # (r'try', r'except|finally|else'),
# (r'while|for', r'else'), # (r'while|for', r'else'),
#]: # ]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword): # if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True # return True
#return False # return False
def _indent_line(self, line, stripspace=''): def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level. """indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the stripspace is a string of space that will be truncated from the
start of the line before indenting.""" start of the line before indenting."""
@ -185,7 +203,7 @@ class PythonPrinter(object):
or triple-quoted section.""" or triple-quoted section."""
self.backslashed, self.triplequoted = False, False self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line): def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block, """return true if the given line is part of a multi-line block,
via backslash or triple-quote.""" via backslash or triple-quote."""
@ -195,24 +213,24 @@ class PythonPrinter(object):
# guard against the possibility of modifying the space inside of # guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed # a literal multiline string with unfortunately placed
# whitespace # whitespace
current_state = (self.backslashed or self.triplequoted) current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line): if re.search(r"\\$", line):
self.backslashed = True self.backslashed = True
else: else:
self.backslashed = False self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line)) triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0: if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted self.triplequoted = not self.triplequoted
return current_state return current_state
def _flush_adjusted_lines(self): def _flush_adjusted_lines(self):
stripspace = None stripspace = None
self._reset_multi_line_flags() self._reset_multi_line_flags()
for entry in self.line_buffer: for entry in self.line_buffer:
if self._in_multi_line(entry): if self._in_multi_line(entry):
self.stream.write(entry + "\n") self.stream.write(entry + "\n")
@ -221,32 +239,32 @@ class PythonPrinter(object):
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry): if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1) stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n") self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = [] self.line_buffer = []
self._reset_multi_line_flags() self._reset_multi_line_flags()
def adjust_whitespace(text): def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code.""" """remove the left-whitespace margin of a block of Python code."""
state = [False, False] state = [False, False]
(backslashed, triplequoted) = (0, 1) (backslashed, triplequoted) = (0, 1)
def in_multi_line(line): def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted]) start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line): if re.search(r"\\$", line):
state[backslashed] = True state[backslashed] = True
else: else:
state[backslashed] = False state[backslashed] = False
def match(reg, t): def match(reg, t):
m = re.match(reg, t) m = re.match(reg, t)
if m: if m:
return m, t[len(m.group(0)):] return m, t[len(m.group(0)):]
else: else:
return None, t return None, t
while line: while line:
if state[triplequoted]: if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line) m, line = match(r"%s" % state[triplequoted], line)
@ -258,17 +276,17 @@ def adjust_whitespace(text):
m, line = match(r'#', line) m, line = match(r'#', line)
if m: if m:
return start_state return start_state
m, line = match(r"\"\"\"|\'\'\'", line) m, line = match(r"\"\"\"|\'\'\'", line)
if m: if m:
state[triplequoted] = m.group(0) state[triplequoted] = m.group(0)
continue continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line) m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state return start_state
def _indent_line(line, stripspace = ''): def _indent_line(line, stripspace=''):
return re.sub(r"^%s" % stripspace, '', line) return re.sub(r"^%s" % stripspace, '', line)
lines = [] lines = []

View File

@ -1,5 +1,5 @@
# mako/pyparser.py # mako/pyparser.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -10,524 +10,224 @@ Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used. module is used.
""" """
from StringIO import StringIO from mako import exceptions, util, compat
from mako import exceptions, util from mako.compat import arg_stringname
import operator import operator
if util.py3k: if compat.py3k:
# words that cannot be assigned to (notably # words that cannot be assigned to (notably
# smaller than the total keys in __builtins__) # smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print']) reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node # the "id" attribute on a function node
arg_id = operator.attrgetter('arg') arg_id = operator.attrgetter('arg')
else: else:
# words that cannot be assigned to (notably # words that cannot be assigned to (notably
# smaller than the total keys in __builtins__) # smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None']) reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node # the "id" attribute on a function node
arg_id = operator.attrgetter('id') arg_id = operator.attrgetter('id')
import _ast
try: util.restore__ast(_ast)
import _ast from mako import _ast_util
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs): def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST""" """Parse an expression into AST"""
try: try:
if _ast: return _ast_util.parse(code, '<unknown>', mode)
return _ast_util.parse(code, '<unknown>', mode) except Exception:
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException( raise exceptions.SyntaxException(
"(%s) %s (%r)" % ( "(%s) %s (%r)" % (
e.__class__.__name__, compat.exception_as().__class__.__name__,
e, compat.exception_as(),
code[0:50] code[0:50]
), **exception_kwargs) ), **exception_kwargs)
if _ast: class FindIdentifiers(_ast_util.NodeVisitor):
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs): def __init__(self, listener, **exception_kwargs):
self.in_function = False self.in_function = False
self.in_assign_targets = False self.in_assign_targets = False
self.local_ident_stack = {} self.local_ident_stack = set()
self.listener = listener self.listener = listener
self.exception_kwargs = exception_kwargs self.exception_kwargs = exception_kwargs
def _add_declared(self, name): def _add_declared(self, name):
if not self.in_function: if not self.in_function:
self.listener.declared_identifiers.add(name) self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node): def visit_ClassDef(self, node):
self._add_declared(node.name) self._add_declared(node.name)
def visit_Assign(self, node): def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets # flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x # evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared) # is undeclared)
self.visit(node.value) self.visit(node.value)
in_a = self.in_assign_targets in_a = self.in_assign_targets
self.in_assign_targets = True self.in_assign_targets = True
for n in node.targets: for n in node.targets:
self.visit(n) self.visit(n)
self.in_assign_targets = in_a self.in_assign_targets = in_a
if util.py3k: if compat.py3k:
# ExceptHandler is in Python 2, but this block only works in # ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there) # Python 3 (and is required there)
def visit_ExceptHandler(self, node): def visit_ExceptHandler(self, node):
if node.name is not None: if node.name is not None:
self._add_declared(node.name) self._add_declared(node.name)
if node.type is not None: if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id) self.visit(node.type)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body: for statement in node.body:
self.visit(statement) self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node): def visit_Lambda(self, node, *args):
if isinstance(node.ctx, _ast.Store): self._visit_function(node, True)
self._add_declared(node.id)
if node.id not in reserved and node.id \ def visit_FunctionDef(self, node):
not in self.listener.declared_identifiers and node.id \ self._add_declared(node.name)
self._visit_function(node, False)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
for n in arg.elts:
yield n
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union([
arg_id(arg) for arg in self._expand_tuples(node.args.args)
])
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
not in self.local_ident_stack: not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id) self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node): def visit_Import(self, node):
for name in node.names: for name in node.names:
if name.asname is not None: if name.asname is not None:
self._add_declared(name.asname) self._add_declared(name.asname)
else: else:
self._add_declared(name.name.split('.')[0]) self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node): def visit_ImportFrom(self, node):
for name in node.names: for name in node.names:
if name.asname is not None: if name.asname is not None:
self._add_declared(name.asname) self._add_declared(name.asname)
else: else:
if name.name == '*': if name.name == '*':
raise exceptions.CompileException( raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitLambda(self, node, *args):
self._visit_function(node, args)
def visitFunction(self, node, *args):
self._add_declared(node.name)
self._visit_function(node, args)
def _visit_function(self, node, args):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name \
not in self.listener.declared_identifiers and node.name \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier " "'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the " "names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, " "form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs) "...' instead.", **self.exception_kwargs)
self._add_declared(mod) self._add_declared(name.name)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
class FindTuple(object): class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs): def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener self.listener = listener
self.exception_kwargs = exception_kwargs self.exception_kwargs = exception_kwargs
self.code_factory = code_factory self.code_factory = code_factory
def visitTuple(self, node, *args): def visit_Tuple(self, node):
for n in node.nodes: for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs) p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p) self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value()) self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \ self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(p.declared_identifiers) self.listener.declared_identifiers.union(
self.listener.undeclared_identifiers = \ p.declared_identifiers)
self.listener.undeclared_identifiers.union(p.undeclared_identifiers) self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
def visit(self, expr): p.undeclared_identifiers)
visitor.walk(expr, self) # , walker=walker())
class ParseFunc(object): class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs): def __init__(self, listener, **exception_kwargs):
self.listener = listener self.listener = listener
self.exception_kwargs = exception_kwargs self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args): def visit_FunctionDef(self, node):
self.listener.funcname = node.name self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr): argnames = [arg_id(arg) for arg in node.args.args]
visitor.walk(expr, self) if node.args.vararg:
argnames.append(arg_stringname(node.args.vararg))
if compat.py2k:
# kw-only args don't exist in Python 2
kwargnames = []
else:
kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs]
if node.args.kwarg:
kwargnames.append(arg_stringname(node.args.kwarg))
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.kwargnames = kwargnames
if compat.py2k:
self.listener.kwdefaults = []
else:
self.listener.kwdefaults = node.args.kw_defaults
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object): class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python def __init__(self, astnode):
expression.""" self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def __init__(self, astnode): def value(self):
self.buf = StringIO() return ''.join(self.generator.result)
visitor.walk(astnode, self) # , walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write('(')
self.visit(node.left, *args)
self.buf.write(' %s ' % op)
self.visit(node.right, *args)
self.buf.write(')')
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(' ' + op + ' ')
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator('*', node, *args)
def visitAnd(self, node, *args):
self.booleanop('and', node, *args)
def visitOr(self, node, *args):
self.booleanop('or', node, *args)
def visitBitand(self, node, *args):
self.booleanop('&', node, *args)
def visitBitor(self, node, *args):
self.booleanop('|', node, *args)
def visitBitxor(self, node, *args):
self.booleanop('^', node, *args)
def visitAdd(self, node, *args):
self.operator('+', node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write('.%s' % node.attrname)
def visitSub(self, node, *args):
self.operator('-', node, *args)
def visitNot(self, node, *args):
self.buf.write('not ')
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator('/', node, *args)
def visitFloorDiv(self, node, *args):
self.operator('//', node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
[self.visit(x) for x in node.subs]
self.buf.write(']')
def visitUnarySub(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
if node.lower is not None:
self.visit(node.lower)
self.buf.write(':')
if node.upper is not None:
self.visit(node.upper)
self.buf.write(']')
def visitDict(self, node):
self.buf.write('{')
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(': ')
self.visit(c[i + 1])
if i < len(c) - 2:
self.buf.write(', ')
self.buf.write('}')
def visitTuple(self, node):
self.buf.write('(')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(')')
def visitList(self, node):
self.buf.write('[')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(']')
def visitListComp(self, node):
self.buf.write('[')
self.visit(node.expr)
self.buf.write(' ')
for n in node.quals:
self.visit(n)
self.buf.write(']')
def visitListCompFor(self, node):
self.buf.write(' for ')
self.visit(node.assign)
self.buf.write(' in ')
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(' if ')
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write('(')
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(', ')
self.visit(a)
self.buf.write(')')
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print 'Node:', str(node)
# print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# mako/template.py # mako/template.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -8,57 +8,82 @@
template strings, as well as template runtime operations.""" template strings, as well as template runtime operations."""
from mako.lexer import Lexer from mako.lexer import Lexer
from mako import runtime, util, exceptions, codegen from mako import runtime, util, exceptions, codegen, cache, compat
import imp, os, re, shutil, stat, sys, tempfile, time, types, weakref import os
import re
import shutil
import stat
import sys
import tempfile
import types
import weakref
class Template(object): class Template(object):
"""Represents a compiled template. """Represents a compiled template.
:class:`.Template` includes a reference to the original :class:`.Template` includes a reference to the original
template source (via the ``.source`` attribute) template source (via the :attr:`.source` attribute)
as well as the source code of the as well as the source code of the
generated Python module (i.e. the ``.code`` attribute), generated Python module (i.e. the :attr:`.code` attribute),
as well as a reference to an actual Python module. as well as a reference to an actual Python module.
:class:`.Template` is constructed using either a literal string :class:`.Template` is constructed using either a literal string
representing the template text, or a filename representing a filesystem representing the template text, or a filename representing a filesystem
path to a source file. path to a source file.
:param text: textual template source. This argument is mutually
exclusive versus the "filename" parameter.
:param filename: filename of the source template. This argument is :param text: textual template source. This argument is mutually
mutually exclusive versus the "text" parameter. exclusive versus the ``filename`` parameter.
:param filename: filename of the source template. This argument is
mutually exclusive versus the ``text`` parameter.
:param buffer_filters: string list of filters to be applied :param buffer_filters: string list of filters to be applied
to the output of %defs which are buffered, cached, or otherwise to the output of ``%def``\ s which are buffered, cached, or otherwise
filtered, after all filters filtered, after all filters
defined with the %def itself have been applied. Allows the defined with the ``%def`` itself have been applied. Allows the
creation of default expression filters that let the output creation of default expression filters that let the output
of return-valued %defs "opt out" of that filtering via of return-valued ``%def``\ s "opt out" of that filtering via
passing special attributes or objects. passing special attributes or objects.
:param bytestring_passthrough: When True, and output_encoding is :param bytestring_passthrough: When ``True``, and ``output_encoding`` is
set to None, and :meth:`.Template.render` is used to render, set to ``None``, and :meth:`.Template.render` is used to render,
the StringIO or cStringIO buffer will be used instead of the the `StringIO` or `cStringIO` buffer will be used instead of the
default "fast" buffer. This allows raw bytestrings in the default "fast" buffer. This allows raw bytestrings in the
output stream, such as in expressions, to pass straight output stream, such as in expressions, to pass straight
through to the buffer. New in 0.4 to provide the same through to the buffer. This flag is forced
behavior as that of the previous series. This flag is forced to ``True`` if ``disable_unicode`` is also configured.
to True if disable_unicode is also configured.
:param cache_dir: Filesystem directory where cache files will be .. versionadded:: 0.4
placed. See :ref:`caching_toplevel`. Added to provide the same behavior as that of the previous series.
:param cache_args: Dictionary of cache configuration arguments that
will be passed to the :class:`.CacheImpl`. See :ref:`caching_toplevel`.
:param cache_dir:
.. deprecated:: 0.6
Use the ``'dir'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param cache_enabled: Boolean flag which enables caching of this :param cache_enabled: Boolean flag which enables caching of this
template. See :ref:`caching_toplevel`. template. See :ref:`caching_toplevel`.
:param cache_type: Type of Beaker caching to be applied to the :param cache_impl: String name of a :class:`.CacheImpl` caching
template. See :ref:`caching_toplevel`. implementation to use. Defaults to ``'beaker'``.
:param cache_url: URL of a memcached server with which to use :param cache_type:
for caching. See :ref:`caching_toplevel`.
.. deprecated:: 0.6
Use the ``'type'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param cache_url:
.. deprecated:: 0.6
Use the ``'url'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param default_filters: List of string filter names that will :param default_filters: List of string filter names that will
be applied to all expressions. See :ref:`filtering_default_filters`. be applied to all expressions. See :ref:`filtering_default_filters`.
@ -66,9 +91,16 @@ class Template(object):
:param disable_unicode: Disables all awareness of Python Unicode :param disable_unicode: Disables all awareness of Python Unicode
objects. See :ref:`unicode_disabled`. objects. See :ref:`unicode_disabled`.
:param enable_loop: When ``True``, enable the ``loop`` context variable.
This can be set to ``False`` to support templates that may
be making usage of the name "``loop``". Individual templates can
re-enable the "loop" context by placing the directive
``enable_loop="True"`` inside the ``<%page>`` tag -- see
:ref:`migrating_loop`.
:param encoding_errors: Error parameter passed to ``encode()`` when :param encoding_errors: Error parameter passed to ``encode()`` when
string encoding is performed. See :ref:`usage_unicode`. string encoding is performed. See :ref:`usage_unicode`.
:param error_handler: Python callable which is called whenever :param error_handler: Python callable which is called whenever
compile or runtime exceptions occur. The callable is passed compile or runtime exceptions occur. The callable is passed
the current context as well as the exception. If the the current context as well as the exception. If the
@ -76,82 +108,158 @@ class Template(object):
be handled, else it is re-raised after the function be handled, else it is re-raised after the function
completes. Is used to provide custom error-rendering completes. Is used to provide custom error-rendering
functions. functions.
.. seealso::
:paramref:`.Template.include_error_handler` - include-specific
error handler function
:param format_exceptions: if ``True``, exceptions which occur during :param format_exceptions: if ``True``, exceptions which occur during
the render phase of this template will be caught and the render phase of this template will be caught and
formatted into an HTML error page, which then becomes the formatted into an HTML error page, which then becomes the
rendered result of the :meth:`render` call. Otherwise, rendered result of the :meth:`.render` call. Otherwise,
runtime exceptions are propagated outwards. runtime exceptions are propagated outwards.
:param imports: String list of Python statements, typically individual :param imports: String list of Python statements, typically individual
"import" lines, which will be placed into the module level "import" lines, which will be placed into the module level
preamble of all generated Python modules. See the example preamble of all generated Python modules. See the example
in :ref:`filtering_default_filters`. in :ref:`filtering_default_filters`.
:param future_imports: String list of names to import from `__future__`.
These will be concatenated into a comma-separated string and inserted
into the beginning of the template, e.g. ``futures_imports=['FOO',
'BAR']`` results in ``from __future__ import FOO, BAR``. If you're
interested in using features like the new division operator, you must
use future_imports to convey that to the renderer, as otherwise the
import will not appear as the first executed statement in the generated
code and will therefore not have the desired effect.
:param include_error_handler: An error handler that runs when this template
is included within another one via the ``<%include>`` tag, and raises an
error. Compare to the :paramref:`.Template.error_handler` option.
.. versionadded:: 1.0.6
.. seealso::
:paramref:`.Template.error_handler` - top-level error handler function
:param input_encoding: Encoding of the template's source code. Can :param input_encoding: Encoding of the template's source code. Can
be used in lieu of the coding comment. See be used in lieu of the coding comment. See
:ref:`usage_unicode` as well as :ref:`unicode_toplevel` for :ref:`usage_unicode` as well as :ref:`unicode_toplevel` for
details on source encoding. details on source encoding.
:param lookup: a :class:`.TemplateLookup` instance that will be used :param lookup: a :class:`.TemplateLookup` instance that will be used
for all file lookups via the ``<%namespace>``, for all file lookups via the ``<%namespace>``,
``<%include>``, and ``<%inherit>`` tags. See ``<%include>``, and ``<%inherit>`` tags. See
:ref:`usage_templatelookup`. :ref:`usage_templatelookup`.
:param module_directory: Filesystem location where generated :param module_directory: Filesystem location where generated
Python module files will be placed. Python module files will be placed.
:param module_filename: Overrides the filename of the generated :param module_filename: Overrides the filename of the generated
Python module file. For advanced usage only. Python module file. For advanced usage only.
:param output_encoding: The encoding to use when :meth:`.render` :param module_writer: A callable which overrides how the Python
is called. module is written entirely. The callable is passed the
encoded source content of the module and the destination
path to be written to. The default behavior of module writing
uses a tempfile in conjunction with a file move in order
to make the operation atomic. So a user-defined module
writing function that mimics the default behavior would be:
.. sourcecode:: python
import tempfile
import os
import shutil
def module_writer(source, outputpath):
(dest, name) = \\
tempfile.mkstemp(
dir=os.path.dirname(outputpath)
)
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
from mako.template import Template
mytemplate = Template(
filename="index.html",
module_directory="/path/to/modules",
module_writer=module_writer
)
The function is provided for unusual configurations where
certain platform-specific permissions or other special
steps are needed.
:param output_encoding: The encoding to use when :meth:`.render`
is called.
See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`. See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`.
:param preprocessor: Python callable which will be passed :param preprocessor: Python callable which will be passed
the full template source before it is parsed. The return the full template source before it is parsed. The return
result of the callable will be used as the template source result of the callable will be used as the template source
code. code.
:param strict_undefined: Replaces the automatic usage of :param lexer_cls: A :class:`.Lexer` class used to parse
the template. The :class:`.Lexer` class is used by
default.
.. versionadded:: 0.7.4
:param strict_undefined: Replaces the automatic usage of
``UNDEFINED`` for any undeclared variables not located in ``UNDEFINED`` for any undeclared variables not located in
the :class:`.Context` with an immediate raise of the :class:`.Context` with an immediate raise of
``NameError``. The advantage is immediate reporting of ``NameError``. The advantage is immediate reporting of
missing variables which include the name. New in 0.3.6. missing variables which include the name.
:param uri: string uri or other identifier for this template. .. versionadded:: 0.3.6
If not provided, the uri is generated from the filesystem
:param uri: string URI or other identifier for this template.
If not provided, the ``uri`` is generated from the filesystem
path, or from the in-memory identity of a non-file-based path, or from the in-memory identity of a non-file-based
template. The primary usage of the uri is to provide a key template. The primary usage of the ``uri`` is to provide a key
within :class:`.TemplateLookup`, as well as to generate the within :class:`.TemplateLookup`, as well as to generate the
file path of the generated Python module file, if file path of the generated Python module file, if
``module_directory`` is specified. ``module_directory`` is specified.
""" """
def __init__(self, lexer_cls = Lexer
text=None,
filename=None, def __init__(self,
uri=None, text=None,
format_exceptions=False, filename=None,
error_handler=None, uri=None,
lookup=None, format_exceptions=False,
output_encoding=None, error_handler=None,
encoding_errors='strict', lookup=None,
module_directory=None, output_encoding=None,
cache_type=None, encoding_errors='strict',
cache_dir=None, module_directory=None,
cache_url=None, cache_args=None,
module_filename=None, cache_impl='beaker',
input_encoding=None, cache_enabled=True,
disable_unicode=False, cache_type=None,
bytestring_passthrough=False, cache_dir=None,
default_filters=None, cache_url=None,
buffer_filters=(), module_filename=None,
strict_undefined=False, input_encoding=None,
imports=None, disable_unicode=False,
preprocessor=None, module_writer=None,
cache_enabled=True): bytestring_passthrough=False,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
future_imports=None,
enable_loop=True,
preprocessor=None,
lexer_cls=None,
include_error_handler=None):
if uri: if uri:
self.module_id = re.sub(r'\W', "_", uri) self.module_id = re.sub(r'\W', "_", uri)
self.uri = uri self.uri = uri
@ -163,34 +271,50 @@ class Template(object):
else: else:
self.module_id = "memory:" + hex(id(self)) self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id self.uri = self.module_id
u_norm = self.uri
if u_norm.startswith("/"):
u_norm = u_norm[1:]
u_norm = os.path.normpath(u_norm)
if u_norm.startswith(".."):
raise exceptions.TemplateLookupException(
"Template uri \"%s\" is invalid - "
"it cannot be relative outside "
"of the root path." % self.uri)
self.input_encoding = input_encoding self.input_encoding = input_encoding
self.output_encoding = output_encoding self.output_encoding = output_encoding
self.encoding_errors = encoding_errors self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.enable_loop = enable_loop
self.strict_undefined = strict_undefined self.strict_undefined = strict_undefined
self.module_writer = module_writer
if util.py3k and disable_unicode: if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError( raise exceptions.UnsupportedError(
"Mako for Python 3 does not " "Mako for Python 3 does not "
"support disabling Unicode") "support disabling Unicode")
elif output_encoding and disable_unicode: elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError( raise exceptions.UnsupportedError(
"output_encoding must be set to " "output_encoding must be set to "
"None when disable_unicode is used.") "None when disable_unicode is used.")
if default_filters is None: if default_filters is None:
if util.py3k or self.disable_unicode: if compat.py3k or self.disable_unicode:
self.default_filters = ['str'] self.default_filters = ['str']
else: else:
self.default_filters = ['unicode'] self.default_filters = ['unicode']
else: else:
self.default_filters = default_filters self.default_filters = default_filters
self.buffer_filters = buffer_filters self.buffer_filters = buffer_filters
self.imports = imports self.imports = imports
self.future_imports = future_imports
self.preprocessor = preprocessor self.preprocessor = preprocessor
if lexer_cls is not None:
self.lexer_cls = lexer_cls
# if plain text, compile code in memory only # if plain text, compile code in memory only
if text is not None: if text is not None:
(code, module) = _compile_text(self, text, filename) (code, module) = _compile_text(self, text, filename)
@ -203,171 +327,226 @@ class Template(object):
if module_filename is not None: if module_filename is not None:
path = module_filename path = module_filename
elif module_directory is not None: elif module_directory is not None:
u = self.uri
if u[0] == '/':
u = u[1:]
path = os.path.abspath( path = os.path.abspath(
os.path.join( os.path.join(
os.path.normpath(module_directory), os.path.normpath(module_directory),
os.path.normpath(u) + ".py" u_norm + ".py"
) )
) )
else: else:
path = None path = None
module = self._compile_from_file(path, filename) module = self._compile_from_file(path, filename)
else: else:
raise exceptions.RuntimeException( raise exceptions.RuntimeException(
"Template requires text or filename") "Template requires text or filename")
self.module = module self.module = module
self.filename = filename self.filename = filename
self.callable_ = self.module.render_body self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions self.format_exceptions = format_exceptions
self.error_handler = error_handler self.error_handler = error_handler
self.include_error_handler = include_error_handler
self.lookup = lookup self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir self.module_directory = module_directory
self.cache_url = cache_url
self._setup_cache_args(
cache_impl, cache_enabled, cache_args,
cache_type, cache_dir, cache_url
)
@util.memoized_property
def reserved_names(self):
if self.enable_loop:
return codegen.RESERVED_NAMES
else:
return codegen.RESERVED_NAMES.difference(['loop'])
def _setup_cache_args(self,
cache_impl, cache_enabled, cache_args,
cache_type, cache_dir, cache_url):
self.cache_impl = cache_impl
self.cache_enabled = cache_enabled self.cache_enabled = cache_enabled
if cache_args:
self.cache_args = cache_args
else:
self.cache_args = {}
# transfer deprecated cache_* args
if cache_type:
self.cache_args['type'] = cache_type
if cache_dir:
self.cache_args['dir'] = cache_dir
if cache_url:
self.cache_args['url'] = cache_url
def _compile_from_file(self, path, filename): def _compile_from_file(self, path, filename):
if path is not None: if path is not None:
util.verify_directory(os.path.dirname(path)) util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME] filemtime = os.stat(filename)[stat.ST_MTIME]
if not os.path.exists(path) or \ if not os.path.exists(path) or \
os.stat(path)[stat.ST_MTIME] < filemtime: os.stat(path)[stat.ST_MTIME] < filemtime:
data = util.read_file(filename)
_compile_module_file( _compile_module_file(
self, self,
open(filename, 'rb').read(), data,
filename, filename,
path) path,
module = imp.load_source(self.module_id, path, open(path, 'rb')) self.module_writer)
module = compat.load_module(self.module_id, path)
del sys.modules[self.module_id] del sys.modules[self.module_id]
if module._magic_number != codegen.MAGIC_NUMBER: if module._magic_number != codegen.MAGIC_NUMBER:
data = util.read_file(filename)
_compile_module_file( _compile_module_file(
self, self,
open(filename, 'rb').read(), data,
filename, filename,
path) path,
module = imp.load_source(self.module_id, path, open(path, 'rb')) self.module_writer)
module = compat.load_module(self.module_id, path)
del sys.modules[self.module_id] del sys.modules[self.module_id]
ModuleInfo(module, path, self, filename, None, None) ModuleInfo(module, path, self, filename, None, None)
else: else:
# template filename and no module directory, compile code # template filename and no module directory, compile code
# in memory # in memory
data = util.read_file(filename)
code, module = _compile_text( code, module = _compile_text(
self, self,
open(filename, 'rb').read(), data,
filename) filename)
self._source = None self._source = None
self._code = code self._code = code
ModuleInfo(module, None, self, filename, code, None) ModuleInfo(module, None, self, filename, code, None)
return module return module
@property @property
def source(self): def source(self):
"""return the template source code for this Template.""" """Return the template source code for this :class:`.Template`."""
return _get_module_info_from_callable(self.callable_).source return _get_module_info_from_callable(self.callable_).source
@property @property
def code(self): def code(self):
"""return the module source code for this Template""" """Return the module source code for this :class:`.Template`."""
return _get_module_info_from_callable(self.callable_).code return _get_module_info_from_callable(self.callable_).code
@property @util.memoized_property
def cache(self): def cache(self):
return self.module._template_cache return cache.Cache(self)
@property
def cache_dir(self):
return self.cache_args['dir']
@property
def cache_url(self):
return self.cache_args['url']
@property
def cache_type(self):
return self.cache_args['type']
def render(self, *args, **data): def render(self, *args, **data):
"""Render the output of this template as a string. """Render the output of this template as a string.
if the template specifies an output encoding, the string If the template specifies an output encoding, the string
will be encoded accordingly, else the output is raw (raw will be encoded accordingly, else the output is raw (raw
output uses cStringIO and can't handle multibyte output uses `cStringIO` and can't handle multibyte
characters). a Context object is created corresponding characters). A :class:`.Context` object is created corresponding
to the given data. Arguments that are explictly declared to the given data. Arguments that are explicitly declared
by this template's internal rendering method are also by this template's internal rendering method are also
pulled from the given \*args, \**data members. pulled from the given ``*args``, ``**data`` members.
""" """
return runtime._render(self, self.callable_, args, data) return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data): def render_unicode(self, *args, **data):
"""render the output of this template as a unicode object.""" """Render the output of this template as a unicode object."""
return runtime._render(self, return runtime._render(self,
self.callable_, self.callable_,
args, args,
data, data,
as_unicode=True) as_unicode=True)
def render_context(self, context, *args, **kwargs): def render_context(self, context, *args, **kwargs):
"""Render this Template with the given context. """Render this :class:`.Template` with the given context.
the data is written to the context's buffer. The data is written to the context's buffer.
""" """
if getattr(context, '_with_template', None) is None: if getattr(context, '_with_template', None) is None:
context._with_template = self context._set_with_template(self)
runtime._render_context(self, runtime._render_context(self,
self.callable_, self.callable_,
context, context,
*args, *args,
**kwargs) **kwargs)
def has_def(self, name): def has_def(self, name):
return hasattr(self.module, "render_%s" % name) return hasattr(self.module, "render_%s" % name)
def get_def(self, name): def get_def(self, name):
"""Return a def of this template as a :class:`.DefTemplate`.""" """Return a def of this template as a :class:`.DefTemplate`."""
return DefTemplate(self, getattr(self.module, "render_%s" % name)) return DefTemplate(self, getattr(self.module, "render_%s" % name))
def list_defs(self):
"""return a list of defs in the template.
.. versionadded:: 1.0.4
"""
return [i[7:] for i in dir(self.module) if i[:7] == 'render_']
def _get_def_callable(self, name): def _get_def_callable(self, name):
return getattr(self.module, "render_%s" % name) return getattr(self.module, "render_%s" % name)
@property @property
def last_modified(self): def last_modified(self):
return self.module._modified_time return self.module._modified_time
class ModuleTemplate(Template): class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module. """A Template which is constructed given an existing Python module.
e.g.:: e.g.::
t = Template("this is a template") t = Template("this is a template")
f = file("mymodule.py", "w") f = file("mymodule.py", "w")
f.write(t.code) f.write(t.code)
f.close() f.close()
import mymodule import mymodule
t = ModuleTemplate(mymodule) t = ModuleTemplate(mymodule)
print t.render() print t.render()
""" """
def __init__(self, module, def __init__(self, module,
module_filename=None, module_filename=None,
template=None, template=None,
template_filename=None, template_filename=None,
module_source=None, module_source=None,
template_source=None, template_source=None,
output_encoding=None, output_encoding=None,
encoding_errors='strict', encoding_errors='strict',
disable_unicode=False, disable_unicode=False,
bytestring_passthrough=False, bytestring_passthrough=False,
format_exceptions=False, format_exceptions=False,
error_handler=None, error_handler=None,
lookup=None, lookup=None,
cache_type=None, cache_args=None,
cache_dir=None, cache_impl='beaker',
cache_url=None, cache_enabled=True,
cache_enabled=True cache_type=None,
): cache_dir=None,
cache_url=None,
include_error_handler=None,
):
self.module_id = re.sub(r'\W', "_", module._template_uri) self.module_id = re.sub(r'\W', "_", module._template_uri)
self.uri = module._template_uri self.uri = module._template_uri
self.input_encoding = module._source_encoding self.input_encoding = module._source_encoding
@ -375,38 +554,42 @@ class ModuleTemplate(Template):
self.encoding_errors = encoding_errors self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.enable_loop = module._enable_loop
if util.py3k and disable_unicode: if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError( raise exceptions.UnsupportedError(
"Mako for Python 3 does not " "Mako for Python 3 does not "
"support disabling Unicode") "support disabling Unicode")
elif output_encoding and disable_unicode: elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError( raise exceptions.UnsupportedError(
"output_encoding must be set to " "output_encoding must be set to "
"None when disable_unicode is used.") "None when disable_unicode is used.")
self.module = module self.module = module
self.filename = template_filename self.filename = template_filename
ModuleInfo(module, ModuleInfo(module,
module_filename, module_filename,
self, self,
template_filename, template_filename,
module_source, module_source,
template_source) template_source)
self.callable_ = self.module.render_body self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions self.format_exceptions = format_exceptions
self.error_handler = error_handler self.error_handler = error_handler
self.include_error_handler = include_error_handler
self.lookup = lookup self.lookup = lookup
self.cache_type = cache_type self._setup_cache_args(
self.cache_dir = cache_dir cache_impl, cache_enabled, cache_args,
self.cache_url = cache_url cache_type, cache_dir, cache_url
self.cache_enabled = cache_enabled )
class DefTemplate(Template): class DefTemplate(Template):
"""a Template which represents a callable def in a parent
"""A :class:`.Template` which represents a callable def in a parent
template.""" template."""
def __init__(self, parent, callable_): def __init__(self, parent, callable_):
self.parent = parent self.parent = parent
self.callable_ = callable_ self.callable_ = callable_
@ -415,27 +598,31 @@ class DefTemplate(Template):
self.encoding_errors = parent.encoding_errors self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler self.error_handler = parent.error_handler
self.include_error_handler = parent.include_error_handler
self.enable_loop = parent.enable_loop
self.lookup = parent.lookup self.lookup = parent.lookup
self.bytestring_passthrough = parent.bytestring_passthrough self.bytestring_passthrough = parent.bytestring_passthrough
def get_def(self, name): def get_def(self, name):
return self.parent.get_def(name) return self.parent.get_def(name)
class ModuleInfo(object): class ModuleInfo(object):
"""Stores information about a module currently loaded into """Stores information about a module currently loaded into
memory, provides reverse lookups of template source, module memory, provides reverse lookups of template source, module
source code based on a module's identifier. source code based on a module's identifier.
""" """
_modules = weakref.WeakValueDictionary() _modules = weakref.WeakValueDictionary()
def __init__(self, def __init__(self,
module, module,
module_filename, module_filename,
template, template,
template_filename, template_filename,
module_source, module_source,
template_source): template_source):
self.module = module self.module = module
self.module_filename = module_filename self.module_filename = module_filename
self.template_filename = template_filename self.template_filename = template_filename
@ -444,93 +631,116 @@ class ModuleInfo(object):
self._modules[module.__name__] = template._mmarker = self self._modules[module.__name__] = template._mmarker = self
if module_filename: if module_filename:
self._modules[module_filename] = self self._modules[module_filename] = self
@classmethod
def get_module_source_metadata(cls, module_source, full_line_map=False):
source_map = re.search(
r"__M_BEGIN_METADATA(.+?)__M_END_METADATA",
module_source, re.S).group(1)
source_map = compat.json.loads(source_map)
source_map['line_map'] = dict(
(int(k), int(v))
for k, v in source_map['line_map'].items())
if full_line_map:
f_line_map = source_map['full_line_map'] = []
line_map = source_map['line_map']
curr_templ_line = 1
for mod_line in range(1, max(line_map)):
if mod_line in line_map:
curr_templ_line = line_map[mod_line]
f_line_map.append(curr_templ_line)
return source_map
@property @property
def code(self): def code(self):
if self.module_source is not None: if self.module_source is not None:
return self.module_source return self.module_source
else: else:
return open(self.module_filename).read() return util.read_python_file(self.module_filename)
@property @property
def source(self): def source(self):
if self.template_source is not None: if self.template_source is not None:
if self.module._source_encoding and \ if self.module._source_encoding and \
not isinstance(self.template_source, unicode): not isinstance(self.template_source, compat.text_type):
return self.template_source.decode( return self.template_source.decode(
self.module._source_encoding) self.module._source_encoding)
else: else:
return self.template_source return self.template_source
else: else:
data = util.read_file(self.template_filename)
if self.module._source_encoding: if self.module._source_encoding:
return open(self.template_filename, 'rb').read().\ return data.decode(self.module._source_encoding)
decode(self.module._source_encoding)
else: else:
return open(self.template_filename).read() return data
def _compile(template, text, filename, generate_magic_comment):
lexer = template.lexer_cls(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
future_imports=template.future_imports,
source_encoding=lexer.encoding,
generate_magic_comment=generate_magic_comment,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined,
enable_loop=template.enable_loop,
reserved_names=template.reserved_names)
return source, lexer
def _compile_text(template, text, filename): def _compile_text(template, text, filename):
identifier = template.module_id identifier = template.module_id
lexer = Lexer(text, source, lexer = _compile(template, text, filename,
filename, generate_magic_comment=template.disable_unicode)
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=template.disable_unicode,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
cid = identifier cid = identifier
if not util.py3k and isinstance(cid, unicode): if not compat.py3k and isinstance(cid, compat.text_type):
cid = cid.encode() cid = cid.encode()
module = types.ModuleType(cid) module = types.ModuleType(cid)
code = compile(source, cid, 'exec') code = compile(source, cid, 'exec')
exec code in module.__dict__, module.__dict__
# this exec() works for 2.4->3.3.
exec(code, module.__dict__, module.__dict__)
return (source, module) return (source, module)
def _compile_module_file(template, text, filename, outputpath):
identifier = template.module_id def _compile_module_file(template, text, filename, outputpath, module_writer):
lexer = Lexer(text, source, lexer = _compile(template, text, filename,
filename, generate_magic_comment=True)
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding, if isinstance(source, compat.text_type):
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=True,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
if isinstance(source, unicode):
source = source.encode(lexer.encoding or 'ascii') source = source.encode(lexer.encoding or 'ascii')
os.write(dest, source) if module_writer:
os.close(dest) module_writer(source, outputpath)
shutil.move(name, outputpath) else:
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_): def _get_module_info_from_callable(callable_):
return _get_module_info(callable_.func_globals['__name__']) if compat.py3k:
return _get_module_info(callable_.__globals__['__name__'])
else:
return _get_module_info(callable_.func_globals['__name__'])
def _get_module_info(filename): def _get_module_info(filename):
return ModuleInfo._modules[filename] return ModuleInfo._modules[filename]

View File

@ -1,83 +1,68 @@
# mako/util.py # mako/util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file> # Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
# #
# This module is part of Mako and is released under # This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php # the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys import re
py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
py24 = sys.version_info >= (2, 4) and sys.version_info < (2, 5)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
if py3k:
from io import StringIO
else:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import codecs, re, weakref, os, time, operator
import collections import collections
import codecs
import os
from mako import compat
import operator
try:
import threading
import thread
except ImportError:
import dummy_threading as threading
import dummy_thread as thread
if win32 or jython: def update_wrapper(decorated, fn):
time_func = time.clock decorated.__wrapped__ = fn
else: decorated.__name__ = fn.__name__
time_func = time.time return decorated
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
""" class PluginLoader(object):
fn.__name__ = name
return fn def __init__(self, group):
self.group = group
self.impls = {}
def load(self, name):
if name in self.impls:
return self.impls[name]()
else:
import pkg_resources
for impl in pkg_resources.iter_entry_points(
self.group,
name):
self.impls[name] = impl.load
return impl.load()
else:
from mako import exceptions
raise exceptions.RuntimeException(
"Can't load plugin %s %s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = __import__(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
if py24:
def exception_name(exc):
try:
return exc.__class__.__name__
except AttributeError:
return exc.__name__
else:
def exception_name(exc):
return exc.__class__.__name__
def verify_directory(dir): def verify_directory(dir):
"""create and/or verify a filesystem directory.""" """create and/or verify a filesystem directory."""
tries = 0 tries = 0
while not os.path.exists(dir): while not os.path.exists(dir):
try: try:
tries += 1 tries += 1
os.makedirs(dir, 0775) os.makedirs(dir, compat.octal("0775"))
except: except:
if tries > 5: if tries > 5:
raise raise
def to_list(x, default=None): def to_list(x, default=None):
if x is None: if x is None:
return default return default
@ -88,7 +73,9 @@ def to_list(x, default=None):
class memoized_property(object): class memoized_property(object):
"""A read-only @property that is only evaluated once.""" """A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None): def __init__(self, fget, doc=None):
self.fget = fget self.fget = fget
self.__doc__ = doc or fget.__doc__ self.__doc__ = doc or fget.__doc__
@ -100,77 +87,118 @@ class memoized_property(object):
obj.__dict__[self.__name__] = result = self.fget(obj) obj.__dict__[self.__name__] = result = self.fget(obj)
return result return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class SetLikeDict(dict): class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it""" """a dictionary that has some setlike methods on it"""
def union(self, other): def union(self, other):
"""produce a 'union' of this dict and another (at the key level). """produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first""" values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self) x = SetLikeDict(**self)
x.update(other) x.update(other)
return x return x
class FastEncodingBuffer(object): class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO,
"""a very rudimentary buffer that is faster than StringIO,
but doesn't crash on unicode data like cStringIO.""" but doesn't crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False): def __init__(self, encoding=None, errors='strict', as_unicode=False):
self.data = collections.deque() self.data = collections.deque()
self.encoding = encoding self.encoding = encoding
if unicode: if as_unicode:
self.delim = u'' self.delim = compat.u('')
else: else:
self.delim = '' self.delim = ''
self.unicode = unicode self.as_unicode = as_unicode
self.errors = errors self.errors = errors
self.write = self.data.append self.write = self.data.append
def truncate(self): def truncate(self):
self.data = collections.deque() self.data = collections.deque()
self.write = self.data.append self.write = self.data.append
def getvalue(self): def getvalue(self):
if self.encoding: if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors) return self.delim.join(self.data).encode(self.encoding,
self.errors)
else: else:
return self.delim.join(self.data) return self.delim.join(self.data)
class LRUCache(dict): class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items, discarding
lesser used items periodically. """A dictionary-like object that stores a limited number of items,
discarding lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management paradigm so that synchronization is not really needed. the size management
is inexact. is inexact.
""" """
class _Item(object): class _Item(object):
def __init__(self, key, value): def __init__(self, key, value):
self.key = key self.key = key
self.value = value self.value = value
self.timestamp = time_func() self.timestamp = compat.time_func()
def __repr__(self): def __repr__(self):
return repr(self.value) return repr(self.value)
def __init__(self, capacity, threshold=.5): def __init__(self, capacity, threshold=.5):
self.capacity = capacity self.capacity = capacity
self.threshold = threshold self.threshold = threshold
def __getitem__(self, key): def __getitem__(self, key):
item = dict.__getitem__(self, key) item = dict.__getitem__(self, key)
item.timestamp = time_func() item.timestamp = compat.time_func()
return item.value return item.value
def values(self): def values(self):
return [i.value for i in dict.values(self)] return [i.value for i in dict.values(self)]
def setdefault(self, key, value): def setdefault(self, key, value):
if key in self: if key in self:
return self[key] return self[key]
else: else:
self[key] = value self[key] = value
return value return value
def __setitem__(self, key, value): def __setitem__(self, key, value):
item = dict.get(self, key) item = dict.get(self, key)
if item is None: if item is None:
@ -179,17 +207,17 @@ class LRUCache(dict):
else: else:
item.value = value item.value = value
self._manage_size() self._manage_size()
def _manage_size(self): def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold: while len(self) > self.capacity + self.capacity * self.threshold:
bytime = sorted(dict.values(self), bytime = sorted(dict.values(self),
key=operator.attrgetter('timestamp'), reverse=True) key=operator.attrgetter('timestamp'), reverse=True)
for item in bytime[self.capacity:]: for item in bytime[self.capacity:]:
try: try:
del self[item.key] del self[item.key]
except KeyError: except KeyError:
# if we couldnt find a key, most likely some other thread broke in # if we couldn't find a key, most likely some other thread
# on us. loop around and try again # broke in on us. loop around and try again
break break
# Regexp to match python magic encoding line # Regexp to match python magic encoding line
@ -197,8 +225,10 @@ _PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)',
re.VERBOSE) re.VERBOSE)
def parse_encoding(fp): def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic comment. """Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__ It does this in the same way as the `Python interpreter`__
@ -227,13 +257,14 @@ def parse_encoding(fp):
pass pass
else: else:
line2 = fp.readline() line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2.decode('ascii', 'ignore')) m = _PYTHON_MAGIC_COMMENT_re.match(
line2.decode('ascii', 'ignore'))
if has_bom: if has_bom:
if m: if m:
raise SyntaxError, \ raise SyntaxError(
"python refuses to compile code with both a UTF8" \ "python refuses to compile code with both a UTF8"
" byte-order-mark and a magic encoding comment" " byte-order-mark and a magic encoding comment")
return 'utf_8' return 'utf_8'
elif m: elif m:
return m.group(1) return m.group(1)
@ -242,16 +273,18 @@ def parse_encoding(fp):
finally: finally:
fp.seek(pos) fp.seek(pos)
def sorted_dict_repr(d): def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order. """repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings. Used by the lexer unit test to compare parse trees based on strings.
""" """
keys = d.keys() keys = list(d.keys())
keys.sort() keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}" return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast): def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it """Attempt to restore the required classes to the _ast module if it
appears to be missing them appears to be missing them
@ -328,25 +361,22 @@ mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
_ast.NotIn = type(m.body[12].value.ops[1]) _ast.NotIn = type(m.body[12].value.ops[1])
try: def read_file(path, mode='rb'):
from inspect import CO_VARKEYWORDS, CO_VARARGS fp = open(path, mode)
def inspect_func_args(fn): try:
co = fn.func_code data = fp.read()
return data
finally:
fp.close()
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None def read_python_file(path):
if co.co_flags & CO_VARARGS: fp = open(path, "rb")
varargs = co.co_varnames[nargs] try:
nargs = nargs + 1 encoding = parse_encoding(fp)
varkw = None data = fp.read()
if co.co_flags & CO_VARKEYWORDS: if encoding:
varkw = co.co_varnames[nargs] data = data.decode(encoding)
return data
return args, varargs, varkw, fn.func_defaults finally:
except ImportError: fp.close()
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)