Updated Mako template system to last version (1.0.6)

This commit is contained in:
Roberto Pastor 2016-12-16 12:59:19 +01:00 committed by evilhero
parent 47445452a7
commit 8a10181d32
25 changed files with 3714 additions and 2522 deletions

View File

@ -1,9 +1,8 @@
# mako/__init__.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '0.4.1'
__version__ = '1.0.6'

View File

@ -1,5 +1,5 @@
# mako/_ast_util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -30,46 +30,46 @@
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import * # noqa
from mako.compat import arg_stringname
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
@ -215,8 +215,8 @@ def get_compile_mode(node):
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
@ -246,6 +246,7 @@ def walk(node):
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
@ -290,6 +291,7 @@ class NodeVisitor(object):
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
@ -349,6 +351,7 @@ class NodeTransformer(NodeVisitor):
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
@ -388,6 +391,7 @@ class SourceGenerator(NodeVisitor):
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
@ -403,10 +407,10 @@ class SourceGenerator(NodeVisitor):
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
self.write('*' + arg_stringname(node.vararg))
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
self.write('**' + arg_stringname(node.kwarg))
def decorators(self, node):
for decorator in node.decorator_list:
@ -460,6 +464,7 @@ class SourceGenerator(NodeVisitor):
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
@ -481,11 +486,11 @@ class SourceGenerator(NodeVisitor):
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
if getattr(node, "starargs", None):
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
if getattr(node, "kwargs", None):
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
@ -631,6 +636,7 @@ class SourceGenerator(NodeVisitor):
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
@ -646,11 +652,11 @@ class SourceGenerator(NodeVisitor):
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
if getattr(node, "starargs", None):
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
if getattr(node, "kwargs", None):
write_comma()
self.write('**')
self.visit(node.kwargs)
@ -659,6 +665,12 @@ class SourceGenerator(NodeVisitor):
def visit_Name(self, node):
self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node):
self.write(repr(node.s))

View File

@ -1,36 +1,42 @@
# mako/ast.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, util
from mako import exceptions, pyparser, compat
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames,
# represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, basestring):
if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
@ -38,44 +44,51 @@ class PythonCode(object):
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, basestring):
if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
e.g.
"""extends PythonCode to provide identifier lookups in partial control
statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
(keyword, expr) = m.group(1, 2)
if keyword in ['for', 'if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
@ -83,61 +96,96 @@ class PythonFragment(PythonCode):
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
elif keyword == 'with':
code = code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable list."""
"'**%s' keyword argument not allowed here" %
self.kwargnames[-1], **exception_kwargs)
def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg
kwargs = False
elif varargs:
arg = "*" + arg
varargs = False
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" %
(arg,
pyparser.ExpressionGenerator(default).value()
)
)
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
namedecls.insert(0, arg)
default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls
@property
def allargnames(self):
return tuple(self.argnames) + tuple(self.kwargnames)
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code,
**kwargs)

View File

@ -1,124 +1,240 @@
# mako/cache.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import exceptions
from mako import compat, util
cache = None
_cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class BeakerMissing(object):
def get_cache(self, name, **kwargs):
raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.")
class Cache(object):
"""Represents a data content cache made available to the module
space of a :class:`.Template` object.
:class:`.Cache` is a wrapper on top of a Beaker CacheManager object.
This object in turn references any number of "containers", each of
which defines its own backend (i.e. file, memory, memcached, etc.)
independently of the rest.
"""
def __init__(self, id, starttime):
self.id = id
self.starttime = starttime
self.def_regions = {}
def put(self, key, value, **kwargs):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).put_value(key, starttime=self.starttime, expiretime=expiretime)
def get(self, key, **kwargs):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
return self._get_cache(defname, **kwargs).get_value(key, starttime=self.starttime, expiretime=expiretime, createfunc=createfunc)
def invalidate(self, key, **kwargs):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).remove_value(key, starttime=self.starttime, expiretime=expiretime)
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this template.
"""
self.invalidate('render_body', defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular <%def> within this template."""
self.invalidate('render_%s' % name, defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested <%def> within this template.
Caching of nested defs is a blunt tool as there is no
management of scope - nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, defname=name)
def _get_cache(self, defname, type=None, **kw):
global cache
if not cache:
try:
from beaker import cache as beaker_cache
cache = beaker_cache.CacheManager()
except ImportError:
# keep a fake cache around so subsequent
# calls don't attempt to re-import
cache = BeakerMissing()
if type == 'memcached':
type = 'ext:memcached'
if not type:
(type, kw) = self.def_regions.get(defname, ('memory', {}))
"""Represents a data content cache made available to the module
space of a specific :class:`.Template` object.
.. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, compat.string_types) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(
key,
creation_function,
**self._get_cache_kw(kw, context))
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate('render_body', __M_defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate('render_%s' % name, __M_defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop('__M_defname', None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else:
self.def_regions[defname] = (type, kw)
return cache.get_cache(self.id, type=type, **kw)
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault('context', context)
return tmpl_kw
class CacheImpl(object):
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()

67
lib/mako/cmd.py Executable file
View File

@ -0,0 +1,67 @@
# mako/cmd.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from argparse import ArgumentParser
from os.path import isfile, dirname
import sys
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
def varsplit(var):
if "=" not in var:
return (var, "")
return var.split("=", 1)
def _exit():
sys.stderr.write(exceptions.text_error_template().render())
sys.exit(1)
def cmdline(argv=None):
parser = ArgumentParser("usage: %prog [FILENAME]")
parser.add_argument(
"--var", default=[], action="append",
help="variable (can be used multiple times, use name=value)")
parser.add_argument(
"--template-dir", default=[], action="append",
help="Directory to use for template lookup (multiple "
"directories may be provided). If not given then if the "
"template is read from stdin, the value defaults to be "
"the current directory, otherwise it defaults to be the "
"parent directory of the file provided.")
parser.add_argument('input', nargs='?', default='-')
options = parser.parse_args(argv)
if options.input == '-':
lookup_dirs = options.template_dir or ["."]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(sys.stdin.read(), lookup=lookup)
except:
_exit()
else:
filename = options.input
if not isfile(filename):
raise SystemExit("error: can't find %s" % filename)
lookup_dirs = options.template_dir or [dirname(filename)]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(filename=filename, lookup=lookup)
except:
_exit()
kw = dict([varsplit(var) for var in options.var])
try:
print(template.render(**kw))
except:
_exit()
if __name__ == "__main__":
cmdline()

File diff suppressed because it is too large Load Diff

201
lib/mako/compat.py Normal file
View File

@ -0,0 +1,201 @@
import sys
import time
py3k = sys.version_info >= (3, 0)
py33 = sys.version_info >= (3, 3)
py2k = sys.version_info < (3,)
py26 = sys.version_info >= (2, 6)
py27 = sys.version_info >= (2, 7)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
pypy = hasattr(sys, 'pypy_version_info')
if py3k:
# create a "getargspec" from getfullargspec(), which is not deprecated
# in Py3K; getargspec() has started to emit warnings as of Py3.5.
# As of Py3.4, now they are trying to move from getfullargspec()
# to "signature()", but getfullargspec() is not deprecated, so stick
# with that for now.
import collections
ArgSpec = collections.namedtuple(
"ArgSpec",
["args", "varargs", "keywords", "defaults"])
from inspect import getfullargspec as inspect_getfullargspec
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
else:
from inspect import getargspec as inspect_getargspec # noqa
if py3k:
from io import StringIO
import builtins as compat_builtins
from urllib.parse import quote_plus, unquote_plus
from html.entities import codepoint2name, name2codepoint
string_types = str,
binary_type = bytes
text_type = str
from io import BytesIO as byte_buffer
def u(s):
return s
def b(s):
return s.encode("latin-1")
def octal(lit):
return eval("0o" + lit)
else:
import __builtin__ as compat_builtins # noqa
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
byte_buffer = StringIO
from urllib import quote_plus, unquote_plus # noqa
from htmlentitydefs import codepoint2name, name2codepoint # noqa
string_types = basestring, # noqa
binary_type = str
text_type = unicode # noqa
def u(s):
return unicode(s, "utf-8") # noqa
def b(s):
return s
def octal(lit):
return eval("0" + lit)
if py33:
from importlib import machinery
def load_module(module_id, path):
return machinery.SourceFileLoader(module_id, path).load_module()
else:
import imp
def load_module(module_id, path):
fp = open(path, 'rb')
try:
return imp.load_source(module_id, path, fp)
finally:
fp.close()
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
exec("def reraise(tp, value, tb=None, cause=None):\n"
" raise tp, value, tb\n")
def exception_as():
return sys.exc_info()[1]
try:
import threading
if py3k:
import _thread as thread
else:
import thread
except ImportError:
import dummy_threading as threading # noqa
if py3k:
import _dummy_thread as thread
else:
import dummy_thread as thread # noqa
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
all = all
import json # noqa
def exception_name(exc):
return exc.__class__.__name__
try:
from inspect import CO_VARKEYWORDS, CO_VARARGS
def inspect_func_args(fn):
if py3k:
co = fn.__code__
else:
co = fn.func_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
if py3k:
return args, varargs, varkw, fn.__defaults__
else:
return args, varargs, varkw, fn.func_defaults
except ImportError:
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)
if py3k:
def callable(fn):
return hasattr(fn, '__call__')
else:
callable = callable
################################################
# cross-compatible metaclass implementation
# Copyright (c) 2010-2012 Benjamin Peterson
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("%sBase" % meta.__name__, (base,), {})
################################################
def arg_stringname(func_arg):
"""Gets the string name of a kwarg or vararg
In Python3.4 a function's args are
of _ast.arg type not _ast.name
"""
if hasattr(func_arg, 'arg'):
return func_arg.arg
else:
return str(func_arg)

View File

@ -1,90 +1,112 @@
# mako/exceptions.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""exception classes"""
import traceback, sys, re
from mako import util
import traceback
import sys
from mako import util, compat
class MakoException(Exception):
pass
class RuntimeException(MakoException):
pass
def _format_filepos(lineno, pos, filename):
if filename is None:
return " at line: %d char: %d" % (lineno, pos)
else:
return " in file '%s' at line: %d char: %d" % (filename, lineno, pos)
class CompileException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(self, message + _format_filepos(lineno, pos, filename))
self.lineno =lineno
MakoException.__init__(
self,
message + _format_filepos(lineno, pos, filename))
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class SyntaxException(MakoException):
def __init__(self, message, source, lineno, pos, filename):
MakoException.__init__(
self,
message + _format_filepos(lineno, pos, filename))
self.lineno = lineno
self.pos = pos
self.filename = filename
self.source = source
class UnsupportedError(MakoException):
"""raised when a retired feature is used."""
class NameConflictError(MakoException):
"""raised when a reserved word is used inappropriately"""
class TemplateLookupException(MakoException):
pass
class TopLevelLookupException(TemplateLookupException):
pass
class RichTraceback(object):
"""Pulls the current exception from the sys traceback and extracts
"""Pull the current exception from the ``sys`` traceback and extracts
Mako-specific template information.
See the usage examples in :ref:`handling_exceptions`.
"""
def __init__(self, error=None, traceback=None):
self.source, self.lineno = "", 0
if error is None or traceback is None:
t, value, tback = sys.exc_info()
if error is None:
error = value or t
if traceback is None:
traceback = tback
self.error = error
self.records = self._init(traceback)
if isinstance(self.error, (CompileException, SyntaxException)):
import mako.template
self.source = self.error.source
self.lineno = self.error.lineno
self._has_source = True
self._init_message()
@property
def errorname(self):
return util.exception_name(self.error)
return compat.exception_name(self.error)
def _init_message(self):
"""Find a unicode representation of self.error"""
try:
self.message = unicode(self.error)
self.message = compat.text_type(self.error)
except UnicodeError:
try:
self.message = str(self.error)
@ -92,8 +114,8 @@ class RichTraceback(object):
# Fallback to args as neither unicode nor
# str(Exception(u'\xe6')) work in Python < 2.6
self.message = self.error.args[0]
if not isinstance(self.message, unicode):
self.message = unicode(self.message, 'ascii', 'replace')
if not isinstance(self.message, compat.text_type):
self.message = compat.text_type(self.message, 'ascii', 'replace')
def _get_reformatted_records(self, records):
for rec in records:
@ -101,25 +123,25 @@ class RichTraceback(object):
yield (rec[4], rec[5], rec[2], rec[6])
else:
yield tuple(rec[0:4])
@property
def traceback(self):
"""return a list of 4-tuple traceback records (i.e. normal python
"""Return a list of 4-tuple traceback records (i.e. normal python
format) with template-corresponding lines remapped to the originating
template.
"""
return list(self._get_reformatted_records(self.records))
@property
def reverse_records(self):
return reversed(self.records)
@property
def reverse_traceback(self):
"""return the same data as traceback, except in reverse order.
"""Return the same data as traceback, except in reverse order.
"""
return list(self._get_reformatted_records(self.reverse_records))
def _init(self, trcback):
@ -145,7 +167,7 @@ class RichTraceback(object):
template_filename = info.template_filename or filename
except KeyError:
# A normal .py file (not a Template)
if not util.py3k:
if not compat.py3k:
try:
fp = open(filename, 'rb')
encoding = util.parse_encoding(fp)
@ -156,32 +178,32 @@ class RichTraceback(object):
line = line.decode(encoding)
else:
line = line.decode('ascii', 'replace')
new_trcback.append((filename, lineno, function, line,
None, None, None, None))
new_trcback.append((filename, lineno, function, line,
None, None, None, None))
continue
template_ln = module_ln = 1
line_map = {}
for line in module_source.split("\n"):
match = re.match(r'\s*# SOURCE LINE (\d+)', line)
if match:
template_ln = int(match.group(1))
module_ln += 1
line_map[module_ln] = template_ln
template_lines = [line for line in
template_source.split("\n")]
template_ln = 1
source_map = mako.template.ModuleInfo.\
get_module_source_metadata(
module_source, full_line_map=True)
line_map = source_map['full_line_map']
template_lines = [line_ for line_ in
template_source.split("\n")]
mods[filename] = (line_map, template_lines)
template_ln = line_map[lineno]
template_ln = line_map[lineno - 1]
if template_ln <= len(template_lines):
template_line = template_lines[template_ln - 1]
else:
template_line = None
new_trcback.append((filename, lineno, function,
line, template_filename, template_ln,
new_trcback.append((filename, lineno, function,
line, template_filename, template_ln,
template_line, template_source))
if not self.source:
for l in range(len(new_trcback)-1, 0, -1):
for l in range(len(new_trcback) - 1, 0, -1):
if new_trcback[l][5]:
self.source = new_trcback[l][7]
self.lineno = new_trcback[l][5]
@ -202,13 +224,13 @@ class RichTraceback(object):
self.lineno = new_trcback[-1][1]
return new_trcback
def text_error_template(lookup=None):
"""Provides a template that renders a stack trace in a similar format to
the Python interpreter, substituting source template filenames, line
numbers and code for that of the originating source template, as
applicable.
"""
import mako.template
return mako.template.Template(r"""
@ -227,22 +249,48 @@ Traceback (most recent call last):
${tback.errorname}: ${tback.message}
""")
def _install_pygments():
global syntax_highlight, pygments_html_formatter
from mako.ext.pygmentplugin import syntax_highlight # noqa
from mako.ext.pygmentplugin import pygments_html_formatter # noqa
def _install_fallback():
global syntax_highlight, pygments_html_formatter
from mako.filters import html_escape
pygments_html_formatter = None
def syntax_highlight(filename='', language=None):
return html_escape
def _install_highlighting():
try:
_install_pygments()
except ImportError:
_install_fallback()
_install_highlighting()
def html_error_template():
"""Provides a template that renders a stack trace in an HTML format,
providing an excerpt of code as well as substituting source template
filenames, line numbers and code for that of the originating source
template, as applicable.
The template's default encoding_errors value is 'htmlentityreplace'. the
template has two options. With the full option disabled, only a section of
an HTML document is returned. with the css option disabled, the default
stylesheet won't be included.
The template's default ``encoding_errors`` value is
``'htmlentityreplace'``. The template has two options. With the
``full`` option disabled, only a section of an HTML document is
returned. With the ``css`` option disabled, the default stylesheet
won't be included.
"""
import mako.template
return mako.template.Template(r"""
<%!
from mako.exceptions import RichTraceback
from mako.exceptions import RichTraceback, syntax_highlight,\
pygments_html_formatter
%>
<%page args="full=True, css=True, error=None, traceback=None"/>
% if full:
@ -256,10 +304,29 @@ def html_error_template():
.stacktrace { margin:5px 5px 5px 5px; }
.highlight { padding:0px 10px 0px 10px; background-color:#9F9FDF; }
.nonhighlight { padding:0px; background-color:#DFDFDF; }
.sample { padding:10px; margin:10px 10px 10px 10px; font-family:monospace; }
.sample { padding:10px; margin:10px 10px 10px 10px;
font-family:monospace; }
.sampleline { padding:0px 10px 0px 10px; }
.sourceline { margin:5px 5px 10px 5px; font-family:monospace;}
.location { font-size:80%; }
.highlight { white-space:pre; }
.sampleline { white-space:pre; }
% if pygments_html_formatter:
${pygments_html_formatter.get_style_defs()}
.linenos { min-width: 2.5em; text-align: right; }
pre { margin: 0; }
.syntax-highlighted { padding: 0 10px; }
.syntax-highlightedtable { border-spacing: 1px; }
.nonhighlight { border-top: 1px solid #DFDFDF;
border-bottom: 1px solid #DFDFDF; }
.stacktrace .nonhighlight { margin: 5px 15px 10px; }
.sourceline { margin: 0 0; font-family:monospace; }
.code { background-color: #F8F8F8; width: 100%; }
.error .code { background-color: #FFBDBD; }
.error .syntax-highlighted { background-color: #FFBDBD; }
% endif
</style>
% endif
% if full:
@ -277,16 +344,29 @@ def html_error_template():
else:
lines = None
%>
<h3>${tback.errorname}: ${tback.message}</h3>
<h3>${tback.errorname}: ${tback.message|h}</h3>
% if lines:
<div class="sample">
<div class="nonhighlight">
% for index in range(max(0, line-4),min(len(lines), line+5)):
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = index + 1
%>
% if index + 1 == line:
<div class="highlight">${index + 1} ${lines[index] | h}</div>
<%
if pygments_html_formatter:
old_cssclass = pygments_html_formatter.cssclass
pygments_html_formatter.cssclass = 'error ' + old_cssclass
%>
${lines[index] | syntax_highlight(language='mako')}
<%
if pygments_html_formatter:
pygments_html_formatter.cssclass = old_cssclass
%>
% else:
<div class="sampleline">${index + 1} ${lines[index] | h}</div>
${lines[index] | syntax_highlight(language='mako')}
% endif
% endfor
</div>
@ -296,7 +376,13 @@ def html_error_template():
<div class="stacktrace">
% for (filename, lineno, function, line) in tback.reverse_traceback:
<div class="location">${filename}, line ${lineno}:</div>
<div class="sourceline">${line | h}</div>
<div class="nonhighlight">
<%
if pygments_html_formatter:
pygments_html_formatter.linenostart = lineno
%>
<div class="sourceline">${line | syntax_highlight(filename)}</div>
</div>
% endfor
</div>
@ -304,4 +390,5 @@ def html_error_template():
</body>
</html>
% endif
""", output_encoding=sys.getdefaultencoding(), encoding_errors='htmlentityreplace')
""", output_encoding=sys.getdefaultencoding(),
encoding_errors='htmlentityreplace')

View File

@ -1,5 +1,5 @@
# ext/autohandler.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -25,7 +25,10 @@ or with custom autohandler filename:
"""
import posixpath, os, re
import posixpath
import os
import re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
@ -42,24 +45,24 @@ def autohandler(template, context, name='autohandler'):
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path)
(autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None)
(autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '',path)
psub = re.sub(r'^/', '', path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False

View File

@ -1,15 +1,37 @@
# ext/babelplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from StringIO import StringIO
from babel.messages.extract import extract_python
from mako.ext.extract import MessageExtractor
class BabelMakoExtractor(MessageExtractor):
def __init__(self, keywords, comment_tags, options):
self.keywords = keywords
self.options = options
self.config = {
'comment-tags': u' '.join(comment_tags),
'encoding': options.get('input_encoding',
options.get('encoding', None)),
}
super(BabelMakoExtractor, self).__init__()
def __call__(self, fileobj):
return self.process_file(fileobj)
def process_python(self, code, code_lineno, translator_strings):
comment_tags = self.config['comment-tags']
for lineno, funcname, messages, python_translator_comments \
in extract_python(code,
self.keywords, comment_tags, self.options):
yield (code_lineno + (lineno - 1), funcname, messages,
translator_strings + python_translator_comments)
from mako import lexer, parsetree
def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates.
@ -23,107 +45,6 @@ def extract(fileobj, keywords, comment_tags, options):
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(fileobj.read(),
input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(),
keywords, comment_tags, options):
yield extracted
def extract_nodes(nodes, keywords, comment_tags, options):
"""Extract messages from Mako's lexer node objects
:param nodes: an iterable of Mako parsetree.Node objects to extract from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
translator_comments = []
in_translator_comments = False
for node in nodes:
child_nodes = None
if in_translator_comments and isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(_split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(_split_comment(node.lineno,
value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
attribs = ', '.join(['%s=%s' % (key, val)
for key, val in node.attributes.iteritems()])
code = '{%s}' % attribs
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
translator_comments = []
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
# <% and <%! blocks would provide their own translator comments
translator_comments = []
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
translator_comments = []
in_translator_comments = False
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
else:
translator_comments = \
[comment[1] for comment in translator_comments]
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
code = StringIO(code)
for lineno, funcname, messages, python_translator_comments \
in extract_python(code, keywords, comment_tags, options):
yield (node.lineno + (lineno - 1), funcname, messages,
translator_comments + python_translator_comments)
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in extract_nodes(child_nodes, keywords, comment_tags,
options):
yield extracted
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of comment line
numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]
extractor = BabelMakoExtractor(keywords, comment_tags, options)
for message in extractor(fileobj):
yield message

View File

@ -0,0 +1,76 @@
"""Provide a :class:`.CacheImpl` for the Beaker caching system."""
from mako import exceptions
from mako.cache import CacheImpl
try:
from beaker import cache as beaker_cache
except:
has_beaker = False
else:
has_beaker = True
_beaker_cache = None
class BeakerCacheImpl(CacheImpl):
"""A :class:`.CacheImpl` provided for the Beaker caching system.
This plugin is used by default, based on the default
value of ``'beaker'`` for the ``cache_impl`` parameter of the
:class:`.Template` or :class:`.TemplateLookup` classes.
"""
def __init__(self, cache):
if not has_beaker:
raise exceptions.RuntimeException(
"Can't initialize Beaker plugin; Beaker is not installed.")
global _beaker_cache
if _beaker_cache is None:
if 'manager' in cache.template.cache_args:
_beaker_cache = cache.template.cache_args['manager']
else:
_beaker_cache = beaker_cache.CacheManager()
super(BeakerCacheImpl, self).__init__(cache)
def _get_cache(self, **kw):
expiretime = kw.pop('timeout', None)
if 'dir' in kw:
kw['data_dir'] = kw.pop('dir')
elif self.cache.template.module_directory:
kw['data_dir'] = self.cache.template.module_directory
if 'manager' in kw:
kw.pop('manager')
if kw.get('type') == 'memcached':
kw['type'] = 'ext:memcached'
if 'region' in kw:
region = kw.pop('region')
cache = _beaker_cache.get_cache_region(self.cache.id, region, **kw)
else:
cache = _beaker_cache.get_cache(self.cache.id, **kw)
cache_args = {'starttime': self.cache.starttime}
if expiretime:
cache_args['expiretime'] = expiretime
return cache, cache_args
def get_or_create(self, key, creation_function, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, createfunc=creation_function, **kw)
def put(self, key, value, **kw):
cache, kw = self._get_cache(**kw)
cache.put(key, value, **kw)
def get(self, key, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, **kw)
def invalidate(self, key, **kw):
cache, kw = self._get_cache(**kw)
cache.remove_value(key, **kw)

108
lib/mako/ext/extract.py Normal file
View File

@ -0,0 +1,108 @@
import re
from mako import compat
from mako import lexer
from mako import parsetree
class MessageExtractor(object):
def process_file(self, fileobj):
template_node = lexer.Lexer(
fileobj.read(),
input_encoding=self.config['encoding']).parse()
for extracted in self.extract_nodes(template_node.get_children()):
yield extracted
def extract_nodes(self, nodes):
translator_comments = []
in_translator_comments = False
input_encoding = self.config['encoding'] or 'ascii'
comment_tags = list(
filter(None, re.split(r'\s+', self.config['comment-tags'])))
for node in nodes:
child_nodes = None
if in_translator_comments and \
isinstance(node, parsetree.Text) and \
not node.content.strip():
# Ignore whitespace within translator comments
continue
if isinstance(node, parsetree.Comment):
value = node.text.strip()
if in_translator_comments:
translator_comments.extend(
self._split_comment(node.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.extend(
self._split_comment(node.lineno, value))
continue
if isinstance(node, parsetree.DefTag):
code = node.function_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.BlockTag):
code = node.body_decl.code
child_nodes = node.nodes
elif isinstance(node, parsetree.CallTag):
code = node.code.code
child_nodes = node.nodes
elif isinstance(node, parsetree.PageTag):
code = node.body_decl.code
elif isinstance(node, parsetree.CallNamespaceTag):
code = node.expression
child_nodes = node.nodes
elif isinstance(node, parsetree.ControlLine):
if node.isend:
in_translator_comments = False
continue
code = node.text
elif isinstance(node, parsetree.Code):
in_translator_comments = False
code = node.code.code
elif isinstance(node, parsetree.Expression):
code = node.code.code
else:
continue
# Comments don't apply unless they immediately preceed the message
if translator_comments and \
translator_comments[-1][0] < node.lineno - 1:
translator_comments = []
translator_strings = [
comment[1] for comment in translator_comments]
if isinstance(code, compat.text_type):
code = code.encode(input_encoding, 'backslashreplace')
used_translator_comments = False
# We add extra newline to work around a pybabel bug
# (see python-babel/babel#274, parse_encoding dies if the first
# input string of the input is non-ascii)
# Also, because we added it, we have to subtract one from
# node.lineno
code = compat.byte_buffer(compat.b('\n') + code)
for message in self.process_python(
code, node.lineno - 1, translator_strings):
yield message
used_translator_comments = True
if used_translator_comments:
translator_comments = []
in_translator_comments = False
if child_nodes:
for extracted in self.extract_nodes(child_nodes):
yield extracted
@staticmethod
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())]

View File

@ -0,0 +1,43 @@
import io
from lingua.extractors import Extractor
from lingua.extractors import Message
from lingua.extractors import get_extractor
from mako.ext.extract import MessageExtractor
from mako import compat
class LinguaMakoExtractor(Extractor, MessageExtractor):
'''Mako templates'''
extensions = ['.mako']
default_config = {
'encoding': 'utf-8',
'comment-tags': '',
}
def __call__(self, filename, options, fileobj=None):
self.options = options
self.filename = filename
self.python_extractor = get_extractor('x.py')
if fileobj is None:
fileobj = open(filename, 'rb')
return self.process_file(fileobj)
def process_python(self, code, code_lineno, translator_strings):
source = code.getvalue().strip()
if source.endswith(compat.b(':')):
if source in (compat.b('try:'), compat.b('else:')) or source.startswith(compat.b('except')):
source = compat.b('') # Ignore try/except and else
elif source.startswith(compat.b('elif')):
source = source[2:] # Replace "elif" with "if"
source += compat.b('pass')
code = io.BytesIO(source)
for msg in self.python_extractor(
self.filename, self.options, code, code_lineno -1):
if translator_strings:
msg = Message(msg.msgctxt, msg.msgid, msg.msgid_plural,
msg.flags,
compat.u(' ').join(
translator_strings + [msg.comment]),
msg.tcomment, msg.location)
yield msg

View File

@ -1,20 +1,20 @@
# ext/preprocessors.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor'
"""preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=preprocess_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=convert_comments)"""
return re.sub(r'(?<=\n)\s*#[^#]', "##", text)

View File

@ -1,23 +1,20 @@
# ext/pygmentplugin.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexers.web import \
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Literal
from pygments.util import html_doctype_matches, looks_like_xml
HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
from pygments.lexers.agile import PythonLexer, Python3Lexer
from pygments.lexer import DelegatingLexer, RegexLexer, bygroups, \
include, using
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Other
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
from mako import compat
class MakoLexer(RegexLexer):
name = 'Mako'
@ -30,13 +27,16 @@ class MakoLexer(RegexLexer):
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(\%(?!%))([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)', bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)', bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'''(?s)<%doc>.*?</%doc>''', Comment.Preproc),
(r'(<%)([\w\.\:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w\.\:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
(r'(<%(?:!?))(.*?)(%>)(?s)', bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(<%(?:!?))(.*?)(%>)(?s)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
@ -80,7 +80,8 @@ class MakoHtmlLexer(DelegatingLexer):
def __init__(self, **options):
super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
**options)
**options)
class MakoXmlLexer(DelegatingLexer):
name = 'XML+Mako'
@ -88,7 +89,8 @@ class MakoXmlLexer(DelegatingLexer):
def __init__(self, **options):
super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
**options)
**options)
class MakoJavascriptLexer(DelegatingLexer):
name = 'JavaScript+Mako'
@ -96,7 +98,8 @@ class MakoJavascriptLexer(DelegatingLexer):
def __init__(self, **options):
super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
MakoLexer, **options)
MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
name = 'CSS+Mako'
@ -104,4 +107,21 @@ class MakoCssLexer(DelegatingLexer):
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
**options)
pygments_html_formatter = HtmlFormatter(cssclass='syntax-highlighted',
linenos=True)
def syntax_highlight(filename='', language=None):
mako_lexer = MakoLexer()
if compat.py3k:
python_lexer = Python3Lexer()
else:
python_lexer = PythonLexer()
if filename.startswith('memory:') or language == 'mako':
return lambda string: highlight(string, mako_lexer,
pygments_html_formatter)
return lambda string: highlight(string, python_lexer,
pygments_html_formatter)

View File

@ -1,14 +1,16 @@
# ext/turbogears.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, inspect
from mako import compat
from mako.lookup import TemplateLookup
from mako.template import Template
class TGPlugin(object):
"""TurboGears compatible Template Plugin."""
def __init__(self, extra_vars_func=None, options=None, extension='mak'):
@ -19,17 +21,17 @@ class TGPlugin(object):
# Pull the options out and initialize the lookup
lookup_options = {}
for k, v in options.iteritems():
for k, v in options.items():
if k.startswith('mako.'):
lookup_options[k[5:]] = v
elif k in ['directories', 'filesystem_checks', 'module_directory']:
lookup_options[k] = v
self.lookup = TemplateLookup(**lookup_options)
self.tmpl_options = {}
# transfer lookup args to template args, based on those available
# in getargspec
for kw in inspect.getargspec(Template.__init__)[0]:
for kw in compat.inspect_getargspec(Template.__init__)[0]:
if kw in lookup_options:
self.tmpl_options[kw] = lookup_options[kw]
@ -39,13 +41,14 @@ class TGPlugin(object):
return Template(template_string, **self.tmpl_options)
# Translate TG dot notation to normal / template path
if '/' not in templatename:
templatename = '/' + templatename.replace('.', '/') + '.' + self.extension
templatename = '/' + templatename.replace('.', '/') + '.' +\
self.extension
# Lookup template
return self.lookup.get_template(templatename)
def render(self, info, format="html", fragment=False, template=None):
if isinstance(template, basestring):
if isinstance(template, compat.string_types):
template = self.load_template(template)
# Load extra vars func if provided
@ -53,4 +56,3 @@ class TGPlugin(object):
info.update(self.extra_vars_func())
return template.render(**info)

View File

@ -1,29 +1,39 @@
# mako/filters.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, urllib, htmlentitydefs, codecs
from StringIO import StringIO
from mako import util
import re
import codecs
from mako.compat import quote_plus, unquote_plus, codepoint2name, \
name2codepoint
from mako import compat
xml_escapes = {
'&' : '&amp;',
'>' : '&gt;',
'<' : '&lt;',
'"' : '&#34;', # also &quot; in html-only
"'" : '&#39;' # also &apos; in html-only
'&': '&amp;',
'>': '&gt;',
'<': '&lt;',
'"': '&#34;', # also &quot; in html-only
"'": '&#39;' # also &apos; in html-only
}
# XXX: &quot; is valid in HTML and XML
# &apos; is not valid HTML, but is valid XML
def legacy_html_escape(string):
"""legacy HTML escape for non-unicode mode."""
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def legacy_html_escape(s):
"""legacy HTML escape for non-unicode mode."""
s = s.replace("&", "&amp;")
s = s.replace(">", "&gt;")
s = s.replace("<", "&lt;")
s = s.replace('"', "&#34;")
s = s.replace("'", "&#39;")
return s
try:
import markupsafe
@ -31,49 +41,61 @@ try:
except ImportError:
html_escape = legacy_html_escape
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
return quote_plus(string)
def legacy_url_escape(string):
# convert into a list of octets
return quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
text = unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
if isinstance(x, compat.text_type):
return x
elif not isinstance(x, str):
return unicode(str(x), encoding=key)
elif not isinstance(x, compat.binary_type):
return decode(str(x))
else:
return unicode(x, encoding=key)
return compat.text_type(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.codepoint2entity = dict([(c, compat.text_type('&%s;' % n))
for c, n in codepoint2name.items()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
@ -81,7 +103,7 @@ class XMLEntityEscaper(object):
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
return compat.text_type(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
@ -90,7 +112,6 @@ class XMLEntityEscaper(object):
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
@ -102,7 +123,7 @@ class XMLEntityEscaper(object):
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
return self.__escapable.sub(self.__escape, compat.text_type(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
@ -115,7 +136,7 @@ class XMLEntityEscaper(object):
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
@ -127,8 +148,8 @@ class XMLEntityEscaper(object):
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
return chr(codepoint)
def unescape(self, text):
"""Unescape character references.
@ -138,8 +159,7 @@ class XMLEntityEscaper(object):
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
_html_entities_escaper = XMLEntityEscaper(codepoint2name, name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
@ -159,30 +179,31 @@ def htmlentityreplace_errors(ex):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
return (compat.text_type(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release
# TODO: options to make this dynamic per-compilation will be added in a later
# release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'str':'str',
'n':'n'
'x': 'filters.xml_escape',
'h': 'filters.html_escape',
'u': 'filters.url_escape',
'trim': 'filters.trim',
'entity': 'filters.html_entities_escape',
'unicode': 'unicode',
'decode': 'decode',
'str': 'str',
'n': 'n'
}
if util.py3k:
if compat.py3k:
DEFAULT_ESCAPES.update({
'unicode':'str'
'unicode': 'str'
})
NON_UNICODE_ESCAPES = DEFAULT_ESCAPES.copy()
NON_UNICODE_ESCAPES['h'] = 'filters.legacy_html_escape'
NON_UNICODE_ESCAPES['u'] = 'filters.legacy_url_escape'

View File

@ -1,21 +1,24 @@
# mako/lexer.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions, util
import re
import codecs
from mako import parsetree, exceptions, compat
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
@ -25,31 +28,32 @@ class Lexer(object):
self.match_position = 0
self.tag = []
self.control_line = []
self.ternary_stack = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if util.py3k and disable_unicode:
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
"Mako for Python 3 does not "
"support disabling Unicode")
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {'source':self.text,
'lineno':self.matched_lineno,
'pos':self.matched_charpos,
'filename':self.filename}
return {'source': self.text,
'lineno': self.matched_lineno,
'pos': self.matched_charpos,
'filename': self.filename}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
@ -58,14 +62,15 @@ class Lexer(object):
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text position.
"""match the given regular expression object to the current text
position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
@ -80,43 +85,53 @@ class Lexer(object):
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
while (cp >= 0 and cp < self.textlength and self.text[cp] != '\n'):
cp -= 1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:",
# print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE")
# print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \
# (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
def parse_until_text(self, watch_nesting, *text):
startpos = self.match_position
text_re = r'|'.join(text)
brace_level = 0
paren_level = 0
bracket_level = 0
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')')
match = self.match(r'(\"\"\"|\'\'\'|\"|\')[^\\]*?(\\.[^\\]*?)*\1',
re.S)
if match:
m = self.match(r'.*?%s' % match.group(1), re.S)
if not m:
raise exceptions.SyntaxException(
"Unmatched '%s'" %
match.group(1),
**self.exception_kwargs)
else:
match = self.match(r'(%s)' % r'|'.join(text))
if match:
return \
self.text[startpos:self.match_position-len(match.group(1))],\
match.group(1)
else:
match = self.match(r".*?(?=\"|\'|#|%s)" % r'|'.join(text), re.S)
if not match:
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
continue
match = self.match(r'(%s)' % text_re)
if match and not (watch_nesting
and (brace_level > 0 or paren_level > 0
or bracket_level > 0)):
return \
self.text[startpos:
self.match_position - len(match.group(1))],\
match.group(1)
elif not match:
match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S)
if match:
brace_level += match.group(1).count('{')
brace_level -= match.group(1).count('}')
paren_level += match.group(1).count('(')
paren_level -= match.group(1).count(')')
bracket_level += match.group(1).count('[')
bracket_level -= match.group(1).count(']')
continue
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
@ -127,6 +142,17 @@ class Lexer(object):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
# build a set of child nodes for the control line
# (used for loop variable detection)
# also build a set of child nodes on ternary control lines
# (used for determining if a pass needs to be auto-inserted
if self.control_line:
control_frame = self.control_line[-1]
control_frame.nodes.append(node)
if not (isinstance(node, parsetree.ControlLine) and
control_frame.is_ternary(node.keyword)):
if self.ternary_stack and self.ternary_stack[-1]:
self.ternary_stack[-1][-1].nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
@ -134,14 +160,19 @@ class Lexer(object):
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
self.ternary_stack.pop()
elif node.is_primary:
self.control_line.append(node)
elif len(self.control_line) and \
self.ternary_stack.append([])
elif self.control_line and \
self.control_line[-1].is_ternary(node.keyword):
self.ternary_stack[-1].append(node)
elif self.control_line and \
not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
@ -151,7 +182,7 @@ class Lexer(object):
or raw if decode_raw=False
"""
if isinstance(text, unicode):
if isinstance(text, compat.text_type):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
@ -162,10 +193,10 @@ class Lexer(object):
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
@ -176,34 +207,35 @@ class Lexer(object):
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError, e:
except UnicodeDecodeError:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(self.text,
not self.disable_unicode,
self.encoding,
self.filename,)
self.encoding, self.text = self.decode_raw_stream(
self.text,
not self.disable_unicode,
self.encoding,
self.filename)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
@ -212,53 +244,56 @@ class Lexer(object):
continue
if self.match_comment():
continue
if self.match_tag_start():
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
if self.match_text():
continue
if self.match_position > self.textlength:
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException("Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
raise exceptions.SyntaxException(
"Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \
# sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
''',
re.I | re.S | re.X)
if match:
keyword, attr, isend = match.group(1), match.group(2), match.group(3)
keyword, attr, isend = match.groups()
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
for att in re.findall(
r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
@ -268,36 +303,36 @@ class Lexer(object):
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
@ -308,19 +343,17 @@ class Lexer(object):
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
@ -328,7 +361,7 @@ class Lexer(object):
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
if text:
@ -336,43 +369,45 @@ class Lexer(object):
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>')
# the trailing newline helps
text, end = self.parse_until_text(False, r'%>')
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1)=='!', lineno=line, pos=pos)
parsetree.Code,
text,
match.group(1) == '!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}')
text, end = self.parse_until_text(True, r'\|', r'}')
if end == '|':
escapes, end = self.parse_until_text(r'}')
escapes, end = self.parse_until_text(True, r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M)
match = self.match(
r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)"
r"(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
@ -380,23 +415,23 @@ class Lexer(object):
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
isend, keyword = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
@ -412,4 +447,3 @@ class Lexer(object):
return True
else:
return False

View File

@ -1,10 +1,13 @@
# mako/lookup.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import os, stat, posixpath, re
import os
import stat
import posixpath
import re
from mako import exceptions, util
from mako.template import Template
@ -12,30 +15,32 @@ try:
import threading
except:
import dummy_threading as threading
class TemplateCollection(object):
"""Represent a collection of :class:`.Template` objects,
identifiable via uri.
"""Represent a collection of :class:`.Template` objects,
identifiable via URI.
A :class:`.TemplateCollection` is linked to the usage of
all template tags that address other templates, such
all template tags that address other templates, such
as ``<%include>``, ``<%namespace>``, and ``<%inherit>``.
The ``file`` attribute of each of those tags refers
to a string URI that is passed to that :class:`.Template`
object's :class:`.TemplateCollection` for resolution.
:class:`.TemplateCollection` is an abstract class,
with the usual default implementation being :class:`.TemplateLookup`.
"""
def has_template(self, uri):
"""Return ``True`` if this :class:`.TemplateLookup` is
capable of returning a :class:`.Template` object for the
given URL.
given ``uri``.
:param uri: String URI of the template to be resolved.
:param uri: String uri of the template to be resolved.
"""
try:
self.get_template(uri)
@ -44,124 +49,140 @@ class TemplateCollection(object):
return False
def get_template(self, uri, relativeto=None):
"""Return a :class:`.Template` object corresponding to the given
URL.
"""Return a :class:`.Template` object corresponding to the given
``uri``.
The default implementation raises
:class:`.NotImplementedError`. Implementations should
raise :class:`.TemplateLookupException` if the given uri
raise :class:`.TemplateLookupException` if the given ``uri``
cannot be resolved.
:param uri: String uri of the template to be resolved.
:param relativeto: if present, the given URI is assumed to
be relative to this uri.
:param uri: String URI of the template to be resolved.
:param relativeto: if present, the given ``uri`` is assumed to
be relative to this URI.
"""
raise NotImplementedError()
def filename_to_uri(self, uri, filename):
"""Convert the given filename to a uri relative to
this TemplateCollection."""
"""Convert the given ``filename`` to a URI relative to
this :class:`.TemplateCollection`."""
return uri
def adjust_uri(self, uri, filename):
"""Adjust the given uri based on the calling filename.
"""Adjust the given ``uri`` based on the calling ``filename``.
When this method is called from the runtime, the
'filename' parameter is taken directly to the 'filename'
``filename`` parameter is taken directly to the ``filename``
attribute of the calling template. Therefore a custom
TemplateCollection subclass can place any string
identifier desired in the "filename" parameter of the
Template objects it constructs and have them come back
:class:`.TemplateCollection` subclass can place any string
identifier desired in the ``filename`` parameter of the
:class:`.Template` objects it constructs and have them come back
here.
"""
return uri
class TemplateLookup(TemplateCollection):
"""Represent a collection of templates that locates template source files
from the local filesystem.
The primary argument is the ``directories`` argument, the list of
directories to search::
directories to search:
.. sourcecode:: python
lookup = TemplateLookup(["/path/to/templates"])
some_template = lookup.get_template("/index.html")
The :class:`.TemplateLookup` can also be given :class:`.Template` objects
programatically using :meth:`.put_string` or :meth:`.put_template`::
programatically using :meth:`.put_string` or :meth:`.put_template`:
.. sourcecode:: python
lookup = TemplateLookup()
lookup.put_string("base.html", '''
<html><body>${self.next()}</body></html>
''')
lookup.put_string("hello.html", '''
<%include file='base.html'/>
Hello, world !
''')
:param directories: A list of directory names which will be
:param directories: A list of directory names which will be
searched for a particular template URI. The URI is appended
to each directory and the filesystem checked.
:param collection_size: Approximate size of the collection used
to store templates. If left at its default of -1, the size
:param collection_size: Approximate size of the collection used
to store templates. If left at its default of ``-1``, the size
is unbounded, and a plain Python dictionary is used to
relate URI strings to :class:`.Template` instances.
Otherwise, a least-recently-used cache object is used which
will maintain the size of the collection approximately to
the number given.
:param filesystem_checks: When at its default value of ``True``,
each call to :meth:`TemplateLookup.get_template()` will
:param filesystem_checks: When at its default value of ``True``,
each call to :meth:`.TemplateLookup.get_template()` will
compare the filesystem last modified time to the time in
which an existing :class:`.Template` object was created.
This allows the :class:`.TemplateLookup` to regenerate a
new :class:`.Template` whenever the original source has
been updated. Set this to ``False`` for a very minor
performance increase.
:param modulename_callable: A callable which, when present,
:param modulename_callable: A callable which, when present,
is passed the path of the source file as well as the
requested URI, and then returns the full path of the
generated Python module file. This is used to inject
alternate schemes for Pyhton module location. If left at
alternate schemes for Python module location. If left at
its default of ``None``, the built in system of generation
based on ``module_directory`` plus ``uri`` is used.
All other keyword parameters available for
:class:`.Template` are mirrored here. When new
:class:`.Template` objects are created, the keywords
established with this :class:`.TemplateLookup` are passed on
to each new :class:`.Template`.
"""
def __init__(self,
directories=None,
module_directory=None,
filesystem_checks=True,
collection_size=-1,
format_exceptions=False,
error_handler=None,
disable_unicode=False,
bytestring_passthrough=False,
output_encoding=None,
encoding_errors='strict',
cache_type=None,
cache_dir=None, cache_url=None,
cache_enabled=True,
modulename_callable=None,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
input_encoding=None,
preprocessor=None):
def __init__(self,
directories=None,
module_directory=None,
filesystem_checks=True,
collection_size=-1,
format_exceptions=False,
error_handler=None,
disable_unicode=False,
bytestring_passthrough=False,
output_encoding=None,
encoding_errors='strict',
cache_args=None,
cache_impl='beaker',
cache_enabled=True,
cache_type=None,
cache_dir=None,
cache_url=None,
modulename_callable=None,
module_writer=None,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
future_imports=None,
enable_loop=True,
input_encoding=None,
preprocessor=None,
lexer_cls=None,
include_error_handler=None):
self.directories = [posixpath.normpath(d) for d in
util.to_list(directories, ())
]
@ -170,24 +191,39 @@ class TemplateLookup(TemplateCollection):
self.filesystem_checks = filesystem_checks
self.collection_size = collection_size
if cache_args is None:
cache_args = {}
# transfer deprecated cache_* args
if cache_dir:
cache_args.setdefault('dir', cache_dir)
if cache_url:
cache_args.setdefault('url', cache_url)
if cache_type:
cache_args.setdefault('type', cache_type)
self.template_args = {
'format_exceptions':format_exceptions,
'error_handler':error_handler,
'disable_unicode':disable_unicode,
'bytestring_passthrough':bytestring_passthrough,
'output_encoding':output_encoding,
'encoding_errors':encoding_errors,
'input_encoding':input_encoding,
'module_directory':module_directory,
'cache_type':cache_type,
'cache_dir':cache_dir or module_directory,
'cache_url':cache_url,
'cache_enabled':cache_enabled,
'default_filters':default_filters,
'buffer_filters':buffer_filters,
'strict_undefined':strict_undefined,
'imports':imports,
'preprocessor':preprocessor}
'format_exceptions': format_exceptions,
'error_handler': error_handler,
'include_error_handler': include_error_handler,
'disable_unicode': disable_unicode,
'bytestring_passthrough': bytestring_passthrough,
'output_encoding': output_encoding,
'cache_impl': cache_impl,
'encoding_errors': encoding_errors,
'input_encoding': input_encoding,
'module_directory': module_directory,
'module_writer': module_writer,
'cache_args': cache_args,
'cache_enabled': cache_enabled,
'default_filters': default_filters,
'buffer_filters': buffer_filters,
'strict_undefined': strict_undefined,
'imports': imports,
'future_imports': future_imports,
'enable_loop': enable_loop,
'preprocessor': preprocessor,
'lexer_cls': lexer_cls
}
if collection_size == -1:
self._collection = {}
@ -196,15 +232,16 @@ class TemplateLookup(TemplateCollection):
self._collection = util.LRUCache(collection_size)
self._uri_cache = util.LRUCache(collection_size)
self._mutex = threading.Lock()
def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given
URL.
Note the "relativeto" argument is not supported here at the moment.
"""Return a :class:`.Template` object corresponding to the given
``uri``.
.. note:: The ``relativeto`` argument is not supported here at
the moment.
"""
try:
if self.filesystem_checks:
return self._check(uri, self._collection[uri])
@ -213,33 +250,36 @@ class TemplateLookup(TemplateCollection):
except KeyError:
u = re.sub(r'^\/+', '', uri)
for dir in self.directories:
# make sure the path seperators are posix - os.altsep is empty
# on POSIX and cannot be used.
dir = dir.replace(os.path.sep, posixpath.sep)
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile):
return self._load(srcfile, uri)
else:
raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri)
"Cant locate template for uri %r" % uri)
def adjust_uri(self, uri, relativeto):
"""adjust the given uri based on the given relative uri."""
"""Adjust the given ``uri`` based on the given relative URI."""
key = (uri, relativeto)
if key in self._uri_cache:
return self._uri_cache[key]
if uri[0] != '/':
if relativeto is not None:
v = self._uri_cache[key] = posixpath.join(posixpath.dirname(relativeto), uri)
v = self._uri_cache[key] = posixpath.join(
posixpath.dirname(relativeto), uri)
else:
v = self._uri_cache[key] = '/' + uri
else:
v = self._uri_cache[key] = uri
return v
def filename_to_uri(self, filename):
"""Convert the given filename to a uri relative to
this TemplateCollection."""
"""Convert the given ``filename`` to a URI relative to
this :class:`.TemplateCollection`."""
try:
return self._uri_cache[filename]
@ -247,25 +287,25 @@ class TemplateLookup(TemplateCollection):
value = self._relativeize(filename)
self._uri_cache[filename] = value
return value
def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative'
"""Return the portion of a filename that is 'relative'
to the directories in this lookup.
"""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None
def _load(self, filename, uri):
self._mutex.acquire()
try:
try:
# try returning from collection one
# try returning from collection one
# more time in case concurrent thread already loaded
return self._collection[uri]
except KeyError:
@ -276,21 +316,21 @@ class TemplateLookup(TemplateCollection):
else:
module_filename = None
self._collection[uri] = template = Template(
uri=uri,
filename=posixpath.normpath(filename),
lookup=self,
module_filename=module_filename,
**self.template_args)
uri=uri,
filename=posixpath.normpath(filename),
lookup=self,
module_filename=module_filename,
**self.template_args)
return template
except:
# if compilation fails etc, ensure
# if compilation fails etc, ensure
# template is removed from collection,
# re-raise
self._collection.pop(uri, None)
raise
finally:
self._mutex.release()
def _check(self, uri, template):
if template.filename is None:
return template
@ -298,7 +338,7 @@ class TemplateLookup(TemplateCollection):
try:
template_stat = os.stat(template.filename)
if template.module._modified_time < \
template_stat[stat.ST_MTIME]:
template_stat[stat.ST_MTIME]:
self._collection.pop(uri, None)
return self._load(template.filename, uri)
else:
@ -306,26 +346,24 @@ class TemplateLookup(TemplateCollection):
except OSError:
self._collection.pop(uri, None)
raise exceptions.TemplateLookupException(
"Cant locate template for uri %r" % uri)
"Cant locate template for uri %r" % uri)
def put_string(self, uri, text):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given string of
text.
``text``.
"""
self._collection[uri] = Template(
text,
lookup=self,
uri=uri,
**self.template_args)
text,
lookup=self,
uri=uri,
**self.template_args)
def put_template(self, uri, template):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given
:class:`.Template` object.
"""
self._collection[uri] = template

View File

@ -1,127 +1,145 @@
# mako/parsetree.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
from mako import exceptions, ast, util, filters
from mako import exceptions, ast, util, filters, compat
import re
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {'source':self.source, 'lineno':self.lineno,
'pos':self.pos, 'filename':self.filename}
return {'source': self.source, 'lineno': self.lineno,
'pos': self.pos, 'filename': self.filename}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__('', 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes)
util.sorted_dict_repr(self.page_attributes),
self.nodes)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
has_loop_context = False
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ['for','if', 'while', 'try']
self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with']
self.nodes = []
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def get_children(self):
return self.nodes
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
'if':set(['else', 'elif']),
'try':set(['except', 'finally']),
'for':set(['else'])
'if': set(['else', 'elif']),
'try': set(['except', 'finally']),
'for': set(['else'])
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos)
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
@ -138,32 +156,36 @@ class Code(Node):
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
self.text,
self.ismodule,
(self.lineno, self.pos)
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
@ -177,81 +199,83 @@ class Expression(Node):
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
self.text,
self.escapes_code.args,
(self.lineno, self.pos)
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict):
if cls.__keyword__ is not None:
if getattr(cls, '__keyword__', None) is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict)
super(_TagMeta, cls).__init__(clsname, bases, dict)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(':')
return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs)
return type.__call__(CallNamespaceTag, ns, defname,
attributes, **kwargs)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
"No such tag: '%s'" % keyword,
source=kwargs['source'],
lineno=kwargs['lineno'],
pos=kwargs['pos'],
filename=kwargs['filename']
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(Node):
class Tag(compat.with_metaclass(_TagMeta, Node)):
"""abstract base class for tags.
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__metaclass__ = _TagMeta
__keyword__ = None
def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs):
def __init__(self, keyword, attributes, expressions,
nonexpressions, required, **kwargs):
"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
@ -260,18 +284,18 @@ class Tag(Node):
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s" %
",".join([repr(m) for m in missing]),
"Missing attribute(s): %s" %
",".join([repr(m) for m in missing]),
**self.exception_kwargs)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
@ -283,14 +307,14 @@ class Tag(Node):
m = re.compile(r'^\${(.+?)}$', re.S).match(x)
if m:
code = ast.PythonCode(m.group(1).rstrip(),
**self.exception_kwargs)
**self.exception_kwargs)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = \
undeclared_identifiers.union(
code.undeclared_identifiers)
code.undeclared_identifiers)
expr.append('(%s)' % m.group(1))
else:
if x:
@ -299,15 +323,15 @@ class Tag(Node):
elif key in nonexpressions:
if re.search(r'\${.+?}', self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs)
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'" %
(self.keyword, key),
**self.exception_kwargs)
"Invalid attribute for tag '%s': '%s'" %
(self.keyword, key),
**self.exception_kwargs)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
@ -317,49 +341,51 @@ class Tag(Node):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes
)
return "%s(%r, %s, %r, %r)" % (self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes
)
class IncludeTag(Tag):
__keyword__ = 'include'
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
('file', 'import', 'args'),
(), ('file',), **kwargs)
keyword,
attributes,
('file', 'import', 'args'),
(), ('file',), **kwargs)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs)
"__DUMMY(%s)" % attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.\
difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers)
difference(set(["__DUMMY"])).\
difference(self.page_args.declared_identifiers)
return identifiers.union(super(IncludeTag, self).
undeclared_identifiers())
undeclared_identifiers())
class NamespaceTag(Tag):
__keyword__ = 'namespace'
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword, attributes,
('file',),
('name','inheritable',
'import','module'),
(), **kwargs)
keyword, attributes,
('file',),
('name', 'inheritable',
'import', 'module'),
(), **kwargs)
self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self))))
if not 'name' in attributes and not 'import' in attributes:
if 'name' not in attributes and 'import' not in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
@ -373,42 +399,53 @@ class NamespaceTag(Tag):
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = 'text'
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword,
attributes, (),
('filter'), (), **kwargs)
keyword,
attributes, (),
('filter'), (), **kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
attributes.get('filter', ''),
**self.exception_kwargs)
def undeclared_identifiers(self):
return self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys()).union(
self.expression_undeclared_identifiers
)
class DefTag(Tag):
__keyword__ = 'def'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached'] + [
c for c in attributes if c.startswith('cache_')]
super(DefTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url'),
('name','filter', 'decorator'),
('name',),
**kwargs)
keyword,
attributes,
expressions,
('name', 'filter', 'decorator'),
('name',),
**kwargs)
name = attributes['name']
if re.match(r'^[\w_]+$',name):
if re.match(r'^[\w_]+$', name):
raise exceptions.CompileException(
"Missing parenthesis in %def",
**self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs)
"Missing parenthesis in %def",
**self.exception_kwargs)
self.function_decl = ast.FunctionDecl("def " + name + ":pass",
**self.exception_kwargs)
self.name = self.function_decl.funcname
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
attributes.get('filter', ''),
**self.exception_kwargs)
is_anonymous = False
is_block = False
@ -421,49 +458,56 @@ class DefTag(Tag):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.argnames
return self.function_decl.allargnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(ast.PythonCode(c, **self.exception_kwargs).
undeclared_identifiers)
return res + list(self.filter_args.\
undeclared_identifiers.\
difference(filters.DEFAULT_ESCAPES.keys())
)
undeclared_identifiers)
return set(res).union(
self.filter_args.
undeclared_identifiers.
difference(filters.DEFAULT_ESCAPES.keys())
).union(
self.expression_undeclared_identifiers
).difference(
self.function_decl.allargnames
)
class BlockTag(Tag):
__keyword__ = 'block'
def __init__(self, keyword, attributes, **kwargs):
expressions = ['buffered', 'cached', 'args'] + [
c for c in attributes if c.startswith('cache_')]
super(BlockTag, self).__init__(
keyword,
attributes,
('buffered', 'cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url', 'args'),
('name','filter', 'decorator'),
(),
**kwargs)
keyword,
attributes,
expressions,
('name', 'filter', 'decorator'),
(),
**kwargs)
name = attributes.get('name')
if name and not re.match(r'^[\w_]+$',name):
if name and not re.match(r'^[\w_]+$', name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs)
"%block may not specify an argument signature",
**self.exception_kwargs)
if not name and attributes.get('args', None):
raise exceptions.CompileException(
"Only named %blocks may specify args",
**self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
"Only named %blocks may specify args",
**self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.name = name
self.decorator = attributes.get('decorator', '')
self.filter_args = ast.ArgumentList(
attributes.get('filter', ''),
**self.exception_kwargs)
attributes.get('filter', ''),
**self.exception_kwargs)
is_block = True
@ -479,87 +523,94 @@ class BlockTag(Tag):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.argnames
return self.body_decl.allargnames
def undeclared_identifiers(self):
return []
return (self.filter_args.
undeclared_identifiers.
difference(filters.DEFAULT_ESCAPES.keys())
).union(self.expression_undeclared_identifiers)
class CallTag(Tag):
__keyword__ = 'call'
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs)
super(CallTag, self).__init__(keyword, attributes,
('args'), ('expr',), ('expr',), **kwargs)
self.expression = attributes['expr']
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
difference(self.code.declared_identifiers)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ('args', ),
(),
(),
**kwargs)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.iteritems()
if k != 'args'])
)
namespace,
defname,
",".join(["%s=%s" % (k, v) for k, v in
self.parsed_attributes.items()
if k != 'args'])
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get('args', ''),
**self.exception_kwargs)
attributes.get('args', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.argnames)
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.\
difference(self.code.declared_identifiers)
difference(self.code.declared_identifiers)
class InheritTag(Tag):
__keyword__ = 'inherit'
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes,
('file',), (), ('file',), **kwargs)
keyword, attributes,
('file',), (), ('file',), **kwargs)
class PageTag(Tag):
__keyword__ = 'page'
def __init__(self, keyword, attributes, **kwargs):
expressions = \
['cached', 'args', 'expression_filter', 'enable_loop'] + \
[c for c in attributes if c.startswith('cache_')]
super(PageTag, self).__init__(
keyword,
attributes,
('cached', 'cache_key', 'cache_timeout',
'cache_type', 'cache_dir', 'cache_url',
'args', 'expression_filter'),
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
keyword,
attributes,
expressions,
(),
(),
**kwargs)
self.body_decl = ast.FunctionArgs(attributes.get('args', ''),
**self.exception_kwargs)
self.filter_args = ast.ArgumentList(
attributes.get('expression_filter', ''),
**self.exception_kwargs)
attributes.get('expression_filter', ''),
**self.exception_kwargs)
def declared_identifiers(self):
return self.body_decl.argnames
return self.body_decl.allargnames

View File

@ -1,61 +1,78 @@
# mako/pygen.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re, string
from StringIO import StringIO
import re
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# current line number
self.lineno = 1
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
def write(self, text):
self.stream.write(text)
# mapping of generated python lines to template
# source lines
self.source_map = {}
def _update_lineno(self, num):
self.lineno += num
def start_source(self, lineno):
if self.lineno not in self.source_map:
self.source_map[self.lineno] = lineno
def write_blanks(self, num):
self.stream.write("\n" * num)
self._update_lineno(num)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
self._update_lineno(1)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
@ -65,42 +82,42 @@ class PythonPrinter(object):
self._flush_adjusted_lines()
self.in_indent_lines = True
decreased_indent = False
if (line is None or
re.match(r"^\s*#",line) or
if (
line is None or
re.match(r"^\s*#", line) or
re.match(r"^\s*$", line)
):
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not decreased_indent and
not is_comment and
if (
not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -=1
):
if self.indent > 0:
self.indent -= 1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures")
"Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
self._update_lineno(len(line.split("\n")))
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
@ -108,18 +125,19 @@ class PythonPrinter(object):
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for)", line)
match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent +=1
self.indent += 1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)",
line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
@ -127,53 +145,53 @@ class PythonPrinter(object):
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
#keyword = match.group(1)
# match the original indent keyword
#for crit in [
# keyword = match.group(1)
# match the original indent keyword
# for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
#]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# ]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
#return False
# return False
def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
@ -185,7 +203,7 @@ class PythonPrinter(object):
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
@ -195,24 +213,24 @@ class PythonPrinter(object):
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = (self.backslashed or self.triplequoted)
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
@ -221,32 +239,32 @@ class PythonPrinter(object):
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
@ -258,17 +276,17 @@ def adjust_whitespace(text):
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace = ''):
def _indent_line(line, stripspace=''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []

View File

@ -1,5 +1,5 @@
# mako/pyparser.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -10,524 +10,224 @@ Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
from mako import exceptions, util, compat
from mako.compat import arg_stringname
import operator
if util.py3k:
# words that cannot be assigned to (notably
if compat.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
import _ast
util.restore__ast(_ast)
from mako import _ast_util
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
return _ast_util.parse(code, '<unknown>', mode)
except Exception:
raise exceptions.SyntaxException(
"(%s) %s (%r)" % (
e.__class__.__name__,
e,
code[0:50]
), **exception_kwargs)
"(%s) %s (%r)" % (
compat.exception_as().__class__.__name__,
compat.exception_as(),
code[0:50]
), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = set()
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
else:
self.local_ident_stack.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
if compat.py3k:
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
# ExceptHandler is in Python 2, but this block only works in
# Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.visit(node.type)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
def visit_Lambda(self, node, *args):
self._visit_function(node, True)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
self._visit_function(node, False)
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
for n in arg.elts:
yield n
else:
yield arg
def _visit_function(self, node, islambda):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
inf = self.in_function
self.in_function = True
local_ident_stack = self.local_ident_stack
self.local_ident_stack = local_ident_stack.union([
arg_id(arg) for arg in self._expand_tuples(node.args.args)
])
if islambda:
self.visit(node.body)
else:
for n in node.body:
self.visit(n)
self.in_function = inf
self.local_ident_stack = local_ident_stack
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
# this is eqiuvalent to visit_AssName in
# compiler
self._add_declared(node.id)
elif node.id not in reserved and node.id \
not in self.listener.declared_identifiers and node.id \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets
# evaluated first, in the case of a clause like "x=x+5" (x
# is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitLambda(self, node, *args):
self._visit_function(node, args)
def visitFunction(self, node, *args):
self._add_declared(node.name)
self._visit_function(node, args)
def _visit_function(self, node, args):
# push function state onto stack. dont log any more
# identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared". track
# argument names in each function header so they arent
# counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name \
not in self.listener.declared_identifiers and node.name \
not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for mod, alias in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException(
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException(
"'import *' is not supported, since all identifier "
"names must be explicitly declared. Please use the "
"form 'from <modulename> import <name1>, <name2>, "
"...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
self._add_declared(name.name)
class FindTuple(object):
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) # , walker=walker())
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = \
self.listener.declared_identifiers.union(
p.declared_identifiers)
self.listener.undeclared_identifiers = \
self.listener.undeclared_identifiers.union(
p.undeclared_identifiers)
class ParseFunc(object):
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
def visit(self, expr):
visitor.walk(expr, self)
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(arg_stringname(node.args.vararg))
if compat.py2k:
# kw-only args don't exist in Python 2
kwargnames = []
else:
kwargnames = [arg_id(arg) for arg in node.args.kwonlyargs]
if node.args.kwarg:
kwargnames.append(arg_stringname(node.args.kwarg))
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.kwargnames = kwargnames
if compat.py2k:
self.listener.kwdefaults = []
else:
self.listener.kwdefaults = node.args.kw_defaults
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python
expression."""
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) # , walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write('(')
self.visit(node.left, *args)
self.buf.write(' %s ' % op)
self.visit(node.right, *args)
self.buf.write(')')
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(' ' + op + ' ')
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator('*', node, *args)
def visitAnd(self, node, *args):
self.booleanop('and', node, *args)
def visitOr(self, node, *args):
self.booleanop('or', node, *args)
def visitBitand(self, node, *args):
self.booleanop('&', node, *args)
def visitBitor(self, node, *args):
self.booleanop('|', node, *args)
def visitBitxor(self, node, *args):
self.booleanop('^', node, *args)
def visitAdd(self, node, *args):
self.operator('+', node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write('.%s' % node.attrname)
def visitSub(self, node, *args):
self.operator('-', node, *args)
def visitNot(self, node, *args):
self.buf.write('not ')
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator('/', node, *args)
def visitFloorDiv(self, node, *args):
self.operator('//', node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
[self.visit(x) for x in node.subs]
self.buf.write(']')
def visitUnarySub(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write('-')
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write('[')
if node.lower is not None:
self.visit(node.lower)
self.buf.write(':')
if node.upper is not None:
self.visit(node.upper)
self.buf.write(']')
def visitDict(self, node):
self.buf.write('{')
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(': ')
self.visit(c[i + 1])
if i < len(c) - 2:
self.buf.write(', ')
self.buf.write('}')
def visitTuple(self, node):
self.buf.write('(')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(')')
def visitList(self, node):
self.buf.write('[')
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i < len(c) - 1:
self.buf.write(', ')
self.buf.write(']')
def visitListComp(self, node):
self.buf.write('[')
self.visit(node.expr)
self.buf.write(' ')
for n in node.quals:
self.visit(n)
self.buf.write(']')
def visitListCompFor(self, node):
self.buf.write(' for ')
self.visit(node.assign)
self.buf.write(' in ')
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(' if ')
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write('(')
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(', ')
self.visit(a)
self.buf.write(')')
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print 'Node:', str(node)
# print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
def value(self):
return ''.join(self.generator.result)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# mako/template.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@ -8,57 +8,82 @@
template strings, as well as template runtime operations."""
from mako.lexer import Lexer
from mako import runtime, util, exceptions, codegen
import imp, os, re, shutil, stat, sys, tempfile, time, types, weakref
from mako import runtime, util, exceptions, codegen, cache, compat
import os
import re
import shutil
import stat
import sys
import tempfile
import types
import weakref
class Template(object):
"""Represents a compiled template.
:class:`.Template` includes a reference to the original
template source (via the ``.source`` attribute)
template source (via the :attr:`.source` attribute)
as well as the source code of the
generated Python module (i.e. the ``.code`` attribute),
generated Python module (i.e. the :attr:`.code` attribute),
as well as a reference to an actual Python module.
:class:`.Template` is constructed using either a literal string
representing the template text, or a filename representing a filesystem
path to a source file.
:param text: textual template source. This argument is mutually
exclusive versus the "filename" parameter.
:param filename: filename of the source template. This argument is
mutually exclusive versus the "text" parameter.
:param text: textual template source. This argument is mutually
exclusive versus the ``filename`` parameter.
:param filename: filename of the source template. This argument is
mutually exclusive versus the ``text`` parameter.
:param buffer_filters: string list of filters to be applied
to the output of %defs which are buffered, cached, or otherwise
to the output of ``%def``\ s which are buffered, cached, or otherwise
filtered, after all filters
defined with the %def itself have been applied. Allows the
defined with the ``%def`` itself have been applied. Allows the
creation of default expression filters that let the output
of return-valued %defs "opt out" of that filtering via
of return-valued ``%def``\ s "opt out" of that filtering via
passing special attributes or objects.
:param bytestring_passthrough: When True, and output_encoding is
set to None, and :meth:`.Template.render` is used to render,
the StringIO or cStringIO buffer will be used instead of the
:param bytestring_passthrough: When ``True``, and ``output_encoding`` is
set to ``None``, and :meth:`.Template.render` is used to render,
the `StringIO` or `cStringIO` buffer will be used instead of the
default "fast" buffer. This allows raw bytestrings in the
output stream, such as in expressions, to pass straight
through to the buffer. New in 0.4 to provide the same
behavior as that of the previous series. This flag is forced
to True if disable_unicode is also configured.
through to the buffer. This flag is forced
to ``True`` if ``disable_unicode`` is also configured.
:param cache_dir: Filesystem directory where cache files will be
placed. See :ref:`caching_toplevel`.
.. versionadded:: 0.4
Added to provide the same behavior as that of the previous series.
:param cache_args: Dictionary of cache configuration arguments that
will be passed to the :class:`.CacheImpl`. See :ref:`caching_toplevel`.
:param cache_dir:
.. deprecated:: 0.6
Use the ``'dir'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param cache_enabled: Boolean flag which enables caching of this
template. See :ref:`caching_toplevel`.
:param cache_type: Type of Beaker caching to be applied to the
template. See :ref:`caching_toplevel`.
:param cache_url: URL of a memcached server with which to use
for caching. See :ref:`caching_toplevel`.
:param cache_impl: String name of a :class:`.CacheImpl` caching
implementation to use. Defaults to ``'beaker'``.
:param cache_type:
.. deprecated:: 0.6
Use the ``'type'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param cache_url:
.. deprecated:: 0.6
Use the ``'url'`` argument in the ``cache_args`` dictionary.
See :ref:`caching_toplevel`.
:param default_filters: List of string filter names that will
be applied to all expressions. See :ref:`filtering_default_filters`.
@ -66,9 +91,16 @@ class Template(object):
:param disable_unicode: Disables all awareness of Python Unicode
objects. See :ref:`unicode_disabled`.
:param enable_loop: When ``True``, enable the ``loop`` context variable.
This can be set to ``False`` to support templates that may
be making usage of the name "``loop``". Individual templates can
re-enable the "loop" context by placing the directive
``enable_loop="True"`` inside the ``<%page>`` tag -- see
:ref:`migrating_loop`.
:param encoding_errors: Error parameter passed to ``encode()`` when
string encoding is performed. See :ref:`usage_unicode`.
:param error_handler: Python callable which is called whenever
compile or runtime exceptions occur. The callable is passed
the current context as well as the exception. If the
@ -76,82 +108,158 @@ class Template(object):
be handled, else it is re-raised after the function
completes. Is used to provide custom error-rendering
functions.
.. seealso::
:paramref:`.Template.include_error_handler` - include-specific
error handler function
:param format_exceptions: if ``True``, exceptions which occur during
the render phase of this template will be caught and
formatted into an HTML error page, which then becomes the
rendered result of the :meth:`render` call. Otherwise,
rendered result of the :meth:`.render` call. Otherwise,
runtime exceptions are propagated outwards.
:param imports: String list of Python statements, typically individual
"import" lines, which will be placed into the module level
preamble of all generated Python modules. See the example
in :ref:`filtering_default_filters`.
:param future_imports: String list of names to import from `__future__`.
These will be concatenated into a comma-separated string and inserted
into the beginning of the template, e.g. ``futures_imports=['FOO',
'BAR']`` results in ``from __future__ import FOO, BAR``. If you're
interested in using features like the new division operator, you must
use future_imports to convey that to the renderer, as otherwise the
import will not appear as the first executed statement in the generated
code and will therefore not have the desired effect.
:param include_error_handler: An error handler that runs when this template
is included within another one via the ``<%include>`` tag, and raises an
error. Compare to the :paramref:`.Template.error_handler` option.
.. versionadded:: 1.0.6
.. seealso::
:paramref:`.Template.error_handler` - top-level error handler function
:param input_encoding: Encoding of the template's source code. Can
be used in lieu of the coding comment. See
:ref:`usage_unicode` as well as :ref:`unicode_toplevel` for
details on source encoding.
:param lookup: a :class:`.TemplateLookup` instance that will be used
for all file lookups via the ``<%namespace>``,
``<%include>``, and ``<%inherit>`` tags. See
:ref:`usage_templatelookup`.
:param module_directory: Filesystem location where generated
:param module_directory: Filesystem location where generated
Python module files will be placed.
:param module_filename: Overrides the filename of the generated
:param module_filename: Overrides the filename of the generated
Python module file. For advanced usage only.
:param output_encoding: The encoding to use when :meth:`.render`
is called.
:param module_writer: A callable which overrides how the Python
module is written entirely. The callable is passed the
encoded source content of the module and the destination
path to be written to. The default behavior of module writing
uses a tempfile in conjunction with a file move in order
to make the operation atomic. So a user-defined module
writing function that mimics the default behavior would be:
.. sourcecode:: python
import tempfile
import os
import shutil
def module_writer(source, outputpath):
(dest, name) = \\
tempfile.mkstemp(
dir=os.path.dirname(outputpath)
)
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
from mako.template import Template
mytemplate = Template(
filename="index.html",
module_directory="/path/to/modules",
module_writer=module_writer
)
The function is provided for unusual configurations where
certain platform-specific permissions or other special
steps are needed.
:param output_encoding: The encoding to use when :meth:`.render`
is called.
See :ref:`usage_unicode` as well as :ref:`unicode_toplevel`.
:param preprocessor: Python callable which will be passed
:param preprocessor: Python callable which will be passed
the full template source before it is parsed. The return
result of the callable will be used as the template source
code.
:param strict_undefined: Replaces the automatic usage of
:param lexer_cls: A :class:`.Lexer` class used to parse
the template. The :class:`.Lexer` class is used by
default.
.. versionadded:: 0.7.4
:param strict_undefined: Replaces the automatic usage of
``UNDEFINED`` for any undeclared variables not located in
the :class:`.Context` with an immediate raise of
``NameError``. The advantage is immediate reporting of
missing variables which include the name. New in 0.3.6.
:param uri: string uri or other identifier for this template.
If not provided, the uri is generated from the filesystem
missing variables which include the name.
.. versionadded:: 0.3.6
:param uri: string URI or other identifier for this template.
If not provided, the ``uri`` is generated from the filesystem
path, or from the in-memory identity of a non-file-based
template. The primary usage of the uri is to provide a key
template. The primary usage of the ``uri`` is to provide a key
within :class:`.TemplateLookup`, as well as to generate the
file path of the generated Python module file, if
``module_directory`` is specified.
"""
def __init__(self,
text=None,
filename=None,
uri=None,
format_exceptions=False,
error_handler=None,
lookup=None,
output_encoding=None,
encoding_errors='strict',
module_directory=None,
cache_type=None,
cache_dir=None,
cache_url=None,
module_filename=None,
input_encoding=None,
disable_unicode=False,
bytestring_passthrough=False,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
preprocessor=None,
cache_enabled=True):
lexer_cls = Lexer
def __init__(self,
text=None,
filename=None,
uri=None,
format_exceptions=False,
error_handler=None,
lookup=None,
output_encoding=None,
encoding_errors='strict',
module_directory=None,
cache_args=None,
cache_impl='beaker',
cache_enabled=True,
cache_type=None,
cache_dir=None,
cache_url=None,
module_filename=None,
input_encoding=None,
disable_unicode=False,
module_writer=None,
bytestring_passthrough=False,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
future_imports=None,
enable_loop=True,
preprocessor=None,
lexer_cls=None,
include_error_handler=None):
if uri:
self.module_id = re.sub(r'\W', "_", uri)
self.uri = uri
@ -163,34 +271,50 @@ class Template(object):
else:
self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id
u_norm = self.uri
if u_norm.startswith("/"):
u_norm = u_norm[1:]
u_norm = os.path.normpath(u_norm)
if u_norm.startswith(".."):
raise exceptions.TemplateLookupException(
"Template uri \"%s\" is invalid - "
"it cannot be relative outside "
"of the root path." % self.uri)
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.enable_loop = enable_loop
self.strict_undefined = strict_undefined
self.module_writer = module_writer
if util.py3k and disable_unicode:
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
"Mako for Python 3 does not "
"support disabling Unicode")
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used.")
"output_encoding must be set to "
"None when disable_unicode is used.")
if default_filters is None:
if util.py3k or self.disable_unicode:
if compat.py3k or self.disable_unicode:
self.default_filters = ['str']
else:
self.default_filters = ['unicode']
else:
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.future_imports = future_imports
self.preprocessor = preprocessor
if lexer_cls is not None:
self.lexer_cls = lexer_cls
# if plain text, compile code in memory only
if text is not None:
(code, module) = _compile_text(self, text, filename)
@ -203,171 +327,226 @@ class Template(object):
if module_filename is not None:
path = module_filename
elif module_directory is not None:
u = self.uri
if u[0] == '/':
u = u[1:]
path = os.path.abspath(
os.path.join(
os.path.normpath(module_directory),
os.path.normpath(u) + ".py"
)
)
os.path.join(
os.path.normpath(module_directory),
u_norm + ".py"
)
)
else:
path = None
module = self._compile_from_file(path, filename)
else:
raise exceptions.RuntimeException(
"Template requires text or filename")
"Template requires text or filename")
self.module = module
self.filename = filename
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.include_error_handler = include_error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.module_directory = module_directory
self._setup_cache_args(
cache_impl, cache_enabled, cache_args,
cache_type, cache_dir, cache_url
)
@util.memoized_property
def reserved_names(self):
if self.enable_loop:
return codegen.RESERVED_NAMES
else:
return codegen.RESERVED_NAMES.difference(['loop'])
def _setup_cache_args(self,
cache_impl, cache_enabled, cache_args,
cache_type, cache_dir, cache_url):
self.cache_impl = cache_impl
self.cache_enabled = cache_enabled
if cache_args:
self.cache_args = cache_args
else:
self.cache_args = {}
# transfer deprecated cache_* args
if cache_type:
self.cache_args['type'] = cache_type
if cache_dir:
self.cache_args['dir'] = cache_dir
if cache_url:
self.cache_args['url'] = cache_url
def _compile_from_file(self, path, filename):
if path is not None:
util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME]
if not os.path.exists(path) or \
os.stat(path)[stat.ST_MTIME] < filemtime:
os.stat(path)[stat.ST_MTIME] < filemtime:
data = util.read_file(filename)
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
self,
data,
filename,
path,
self.module_writer)
module = compat.load_module(self.module_id, path)
del sys.modules[self.module_id]
if module._magic_number != codegen.MAGIC_NUMBER:
data = util.read_file(filename)
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
self,
data,
filename,
path,
self.module_writer)
module = compat.load_module(self.module_id, path)
del sys.modules[self.module_id]
ModuleInfo(module, path, self, filename, None, None)
else:
# template filename and no module directory, compile code
# in memory
data = util.read_file(filename)
code, module = _compile_text(
self,
open(filename, 'rb').read(),
filename)
self,
data,
filename)
self._source = None
self._code = code
ModuleInfo(module, None, self, filename, code, None)
return module
@property
def source(self):
"""return the template source code for this Template."""
"""Return the template source code for this :class:`.Template`."""
return _get_module_info_from_callable(self.callable_).source
@property
def code(self):
"""return the module source code for this Template"""
"""Return the module source code for this :class:`.Template`."""
return _get_module_info_from_callable(self.callable_).code
@property
@util.memoized_property
def cache(self):
return self.module._template_cache
return cache.Cache(self)
@property
def cache_dir(self):
return self.cache_args['dir']
@property
def cache_url(self):
return self.cache_args['url']
@property
def cache_type(self):
return self.cache_args['type']
def render(self, *args, **data):
"""Render the output of this template as a string.
if the template specifies an output encoding, the string
If the template specifies an output encoding, the string
will be encoded accordingly, else the output is raw (raw
output uses cStringIO and can't handle multibyte
characters). a Context object is created corresponding
to the given data. Arguments that are explictly declared
output uses `cStringIO` and can't handle multibyte
characters). A :class:`.Context` object is created corresponding
to the given data. Arguments that are explicitly declared
by this template's internal rendering method are also
pulled from the given \*args, \**data members.
pulled from the given ``*args``, ``**data`` members.
"""
return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data):
"""render the output of this template as a unicode object."""
return runtime._render(self,
self.callable_,
args,
data,
as_unicode=True)
"""Render the output of this template as a unicode object."""
return runtime._render(self,
self.callable_,
args,
data,
as_unicode=True)
def render_context(self, context, *args, **kwargs):
"""Render this Template with the given context.
the data is written to the context's buffer.
"""Render this :class:`.Template` with the given context.
The data is written to the context's buffer.
"""
if getattr(context, '_with_template', None) is None:
context._with_template = self
runtime._render_context(self,
self.callable_,
context,
*args,
context._set_with_template(self)
runtime._render_context(self,
self.callable_,
context,
*args,
**kwargs)
def has_def(self, name):
return hasattr(self.module, "render_%s" % name)
def get_def(self, name):
"""Return a def of this template as a :class:`.DefTemplate`."""
return DefTemplate(self, getattr(self.module, "render_%s" % name))
def list_defs(self):
"""return a list of defs in the template.
.. versionadded:: 1.0.4
"""
return [i[7:] for i in dir(self.module) if i[:7] == 'render_']
def _get_def_callable(self, name):
return getattr(self.module, "render_%s" % name)
@property
def last_modified(self):
return self.module._modified_time
def last_modified(self):
return self.module._modified_time
class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module.
e.g.::
t = Template("this is a template")
f = file("mymodule.py", "w")
f.write(t.code)
f.close()
import mymodule
t = ModuleTemplate(mymodule)
print t.render()
"""
def __init__(self, module,
module_filename=None,
template=None,
template_filename=None,
module_source=None,
template_source=None,
output_encoding=None,
encoding_errors='strict',
disable_unicode=False,
bytestring_passthrough=False,
format_exceptions=False,
error_handler=None,
lookup=None,
cache_type=None,
cache_dir=None,
cache_url=None,
cache_enabled=True
):
def __init__(self, module,
module_filename=None,
template=None,
template_filename=None,
module_source=None,
template_source=None,
output_encoding=None,
encoding_errors='strict',
disable_unicode=False,
bytestring_passthrough=False,
format_exceptions=False,
error_handler=None,
lookup=None,
cache_args=None,
cache_impl='beaker',
cache_enabled=True,
cache_type=None,
cache_dir=None,
cache_url=None,
include_error_handler=None,
):
self.module_id = re.sub(r'\W', "_", module._template_uri)
self.uri = module._template_uri
self.input_encoding = module._source_encoding
@ -375,38 +554,42 @@ class ModuleTemplate(Template):
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.bytestring_passthrough = bytestring_passthrough or disable_unicode
self.enable_loop = module._enable_loop
if util.py3k and disable_unicode:
if compat.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
"Mako for Python 3 does not "
"support disabling Unicode")
elif output_encoding and disable_unicode:
raise exceptions.UnsupportedError(
"output_encoding must be set to "
"None when disable_unicode is used.")
"output_encoding must be set to "
"None when disable_unicode is used.")
self.module = module
self.filename = template_filename
ModuleInfo(module,
module_filename,
self,
template_filename,
module_source,
template_source)
ModuleInfo(module,
module_filename,
self,
template_filename,
module_source,
template_source)
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.include_error_handler = include_error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
self._setup_cache_args(
cache_impl, cache_enabled, cache_args,
cache_type, cache_dir, cache_url
)
class DefTemplate(Template):
"""a Template which represents a callable def in a parent
"""A :class:`.Template` which represents a callable def in a parent
template."""
def __init__(self, parent, callable_):
self.parent = parent
self.callable_ = callable_
@ -415,27 +598,31 @@ class DefTemplate(Template):
self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler
self.include_error_handler = parent.include_error_handler
self.enable_loop = parent.enable_loop
self.lookup = parent.lookup
self.bytestring_passthrough = parent.bytestring_passthrough
def get_def(self, name):
return self.parent.get_def(name)
class ModuleInfo(object):
"""Stores information about a module currently loaded into
memory, provides reverse lookups of template source, module
source code based on a module's identifier.
"""
_modules = weakref.WeakValueDictionary()
def __init__(self,
module,
module_filename,
template,
template_filename,
module_source,
template_source):
def __init__(self,
module,
module_filename,
template,
template_filename,
module_source,
template_source):
self.module = module
self.module_filename = module_filename
self.template_filename = template_filename
@ -444,93 +631,116 @@ class ModuleInfo(object):
self._modules[module.__name__] = template._mmarker = self
if module_filename:
self._modules[module_filename] = self
@classmethod
def get_module_source_metadata(cls, module_source, full_line_map=False):
source_map = re.search(
r"__M_BEGIN_METADATA(.+?)__M_END_METADATA",
module_source, re.S).group(1)
source_map = compat.json.loads(source_map)
source_map['line_map'] = dict(
(int(k), int(v))
for k, v in source_map['line_map'].items())
if full_line_map:
f_line_map = source_map['full_line_map'] = []
line_map = source_map['line_map']
curr_templ_line = 1
for mod_line in range(1, max(line_map)):
if mod_line in line_map:
curr_templ_line = line_map[mod_line]
f_line_map.append(curr_templ_line)
return source_map
@property
def code(self):
if self.module_source is not None:
return self.module_source
else:
return open(self.module_filename).read()
return util.read_python_file(self.module_filename)
@property
def source(self):
if self.template_source is not None:
if self.module._source_encoding and \
not isinstance(self.template_source, unicode):
not isinstance(self.template_source, compat.text_type):
return self.template_source.decode(
self.module._source_encoding)
self.module._source_encoding)
else:
return self.template_source
else:
data = util.read_file(self.template_filename)
if self.module._source_encoding:
return open(self.template_filename, 'rb').read().\
decode(self.module._source_encoding)
return data.decode(self.module._source_encoding)
else:
return open(self.template_filename).read()
return data
def _compile(template, text, filename, generate_magic_comment):
lexer = template.lexer_cls(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
future_imports=template.future_imports,
source_encoding=lexer.encoding,
generate_magic_comment=generate_magic_comment,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined,
enable_loop=template.enable_loop,
reserved_names=template.reserved_names)
return source, lexer
def _compile_text(template, text, filename):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=template.disable_unicode,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
source, lexer = _compile(template, text, filename,
generate_magic_comment=template.disable_unicode)
cid = identifier
if not util.py3k and isinstance(cid, unicode):
if not compat.py3k and isinstance(cid, compat.text_type):
cid = cid.encode()
module = types.ModuleType(cid)
code = compile(source, cid, 'exec')
exec code in module.__dict__, module.__dict__
# this exec() works for 2.4->3.3.
exec(code, module.__dict__, module.__dict__)
return (source, module)
def _compile_module_file(template, text, filename, outputpath):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=True,
disable_unicode=template.disable_unicode,
strict_undefined=template.strict_undefined)
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
if isinstance(source, unicode):
def _compile_module_file(template, text, filename, outputpath, module_writer):
source, lexer = _compile(template, text, filename,
generate_magic_comment=True)
if isinstance(source, compat.text_type):
source = source.encode(lexer.encoding or 'ascii')
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
if module_writer:
module_writer(source, outputpath)
else:
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_):
return _get_module_info(callable_.func_globals['__name__'])
if compat.py3k:
return _get_module_info(callable_.__globals__['__name__'])
else:
return _get_module_info(callable_.func_globals['__name__'])
def _get_module_info(filename):
return ModuleInfo._modules[filename]

View File

@ -1,83 +1,68 @@
# mako/util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
py24 = sys.version_info >= (2, 4) and sys.version_info < (2, 5)
jython = sys.platform.startswith('java')
win32 = sys.platform.startswith('win')
if py3k:
from io import StringIO
else:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import codecs, re, weakref, os, time, operator
import re
import collections
import codecs
import os
from mako import compat
import operator
try:
import threading
import thread
except ImportError:
import dummy_threading as threading
import dummy_thread as thread
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
def function_named(fn, name):
"""Return a function with a given __name__.
def update_wrapper(decorated, fn):
decorated.__wrapped__ = fn
decorated.__name__ = fn.__name__
return decorated
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
"""
fn.__name__ = name
return fn
class PluginLoader(object):
def __init__(self, group):
self.group = group
self.impls = {}
def load(self, name):
if name in self.impls:
return self.impls[name]()
else:
import pkg_resources
for impl in pkg_resources.iter_entry_points(
self.group,
name):
self.impls[name] = impl.load
return impl.load()
else:
from mako import exceptions
raise exceptions.RuntimeException(
"Can't load plugin %s %s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = __import__(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
try:
from functools import partial
except:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
if py24:
def exception_name(exc):
try:
return exc.__class__.__name__
except AttributeError:
return exc.__name__
else:
def exception_name(exc):
return exc.__class__.__name__
def verify_directory(dir):
"""create and/or verify a filesystem directory."""
tries = 0
while not os.path.exists(dir):
try:
tries += 1
os.makedirs(dir, 0775)
os.makedirs(dir, compat.octal("0775"))
except:
if tries > 5:
raise
def to_list(x, default=None):
if x is None:
return default
@ -88,7 +73,9 @@ def to_list(x, default=None):
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
@ -100,77 +87,118 @@ class memoized_property(object):
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class SetLikeDict(dict):
"""a dictionary that has some setlike methods on it"""
def union(self, other):
"""produce a 'union' of this dict and another (at the key level).
values in the second dict take precedence over that of the first"""
x = SetLikeDict(**self)
x.update(other)
return x
class FastEncodingBuffer(object):
"""a very rudimentary buffer that is faster than StringIO,
"""a very rudimentary buffer that is faster than StringIO,
but doesn't crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False):
def __init__(self, encoding=None, errors='strict', as_unicode=False):
self.data = collections.deque()
self.encoding = encoding
if unicode:
self.delim = u''
if as_unicode:
self.delim = compat.u('')
else:
self.delim = ''
self.unicode = unicode
self.as_unicode = as_unicode
self.errors = errors
self.write = self.data.append
def truncate(self):
self.data = collections.deque()
self.write = self.data.append
def getvalue(self):
if self.encoding:
return self.delim.join(self.data).encode(self.encoding, self.errors)
return self.delim.join(self.data).encode(self.encoding,
self.errors)
else:
return self.delim.join(self.data)
class LRUCache(dict):
"""A dictionary-like object that stores a limited number of items, discarding
lesser used items periodically.
"""A dictionary-like object that stores a limited number of items,
discarding lesser used items periodically.
this is a rewrite of LRUCache from Myghty to use a periodic timestamp-based
paradigm so that synchronization is not really needed. the size management
paradigm so that synchronization is not really needed. the size management
is inexact.
"""
class _Item(object):
def __init__(self, key, value):
self.key = key
self.value = value
self.timestamp = time_func()
self.timestamp = compat.time_func()
def __repr__(self):
return repr(self.value)
def __init__(self, capacity, threshold=.5):
self.capacity = capacity
self.threshold = threshold
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item.timestamp = time_func()
item.timestamp = compat.time_func()
return item.value
def values(self):
return [i.value for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
@ -179,17 +207,17 @@ class LRUCache(dict):
else:
item.value = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
bytime = sorted(dict.values(self),
bytime = sorted(dict.values(self),
key=operator.attrgetter('timestamp'), reverse=True)
for item in bytime[self.capacity:]:
try:
del self[item.key]
except KeyError:
# if we couldnt find a key, most likely some other thread broke in
# on us. loop around and try again
# if we couldn't find a key, most likely some other thread
# broke in on us. loop around and try again
break
# Regexp to match python magic encoding line
@ -197,8 +225,10 @@ _PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)',
re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a Python source file (binary mode) from magic comment.
"""Deduce the encoding of a Python source file (binary mode) from magic
comment.
It does this in the same way as the `Python interpreter`__
@ -227,13 +257,14 @@ def parse_encoding(fp):
pass
else:
line2 = fp.readline()
m = _PYTHON_MAGIC_COMMENT_re.match(line2.decode('ascii', 'ignore'))
m = _PYTHON_MAGIC_COMMENT_re.match(
line2.decode('ascii', 'ignore'))
if has_bom:
if m:
raise SyntaxError, \
"python refuses to compile code with both a UTF8" \
" byte-order-mark and a magic encoding comment"
raise SyntaxError(
"python refuses to compile code with both a UTF8"
" byte-order-mark and a magic encoding comment")
return 'utf_8'
elif m:
return m.group(1)
@ -242,16 +273,18 @@ def parse_encoding(fp):
finally:
fp.seek(pos)
def sorted_dict_repr(d):
"""repr() a dictionary with the keys in order.
Used by the lexer unit test to compare parse trees based on strings.
"""
keys = d.keys()
keys = list(d.keys())
keys.sort()
return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
def restore__ast(_ast):
"""Attempt to restore the required classes to the _ast module if it
appears to be missing them
@ -328,25 +361,22 @@ mako in baz not in mako""", '<unknown>', 'exec', _ast.PyCF_ONLY_AST)
_ast.NotIn = type(m.body[12].value.ops[1])
try:
from inspect import CO_VARKEYWORDS, CO_VARARGS
def inspect_func_args(fn):
co = fn.func_code
def read_file(path, mode='rb'):
fp = open(path, mode)
try:
data = fp.read()
return data
finally:
fp.close()
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw, fn.func_defaults
except ImportError:
import inspect
def inspect_func_args(fn):
return inspect.getargspec(fn)
def read_python_file(path):
fp = open(path, "rb")
try:
encoding = parse_encoding(fp)
data = fp.read()
if encoding:
data = data.decode(encoding)
return data
finally:
fp.close()