2022-01-24 04:07:52 +00:00
|
|
|
"""Extension API for adding custom tags and behavior."""
|
|
|
|
import pprint
|
2019-12-02 21:46:54 +00:00
|
|
|
import re
|
2022-01-24 04:07:52 +00:00
|
|
|
import typing as t
|
|
|
|
|
|
|
|
from markupsafe import Markup
|
|
|
|
|
|
|
|
from . import defaults
|
|
|
|
from . import nodes
|
|
|
|
from .environment import Environment
|
|
|
|
from .exceptions import TemplateAssertionError
|
|
|
|
from .exceptions import TemplateSyntaxError
|
|
|
|
from .runtime import concat # type: ignore
|
|
|
|
from .runtime import Context
|
|
|
|
from .runtime import Undefined
|
|
|
|
from .utils import import_string
|
|
|
|
from .utils import pass_context
|
|
|
|
|
|
|
|
if t.TYPE_CHECKING:
|
|
|
|
import typing_extensions as te
|
|
|
|
from .lexer import Token
|
|
|
|
from .lexer import TokenStream
|
|
|
|
from .parser import Parser
|
|
|
|
|
|
|
|
class _TranslationsBasic(te.Protocol):
|
|
|
|
def gettext(self, message: str) -> str:
|
|
|
|
...
|
|
|
|
|
|
|
|
def ngettext(self, singular: str, plural: str, n: int) -> str:
|
|
|
|
pass
|
|
|
|
|
|
|
|
class _TranslationsContext(_TranslationsBasic):
|
|
|
|
def pgettext(self, context: str, message: str) -> str:
|
|
|
|
...
|
|
|
|
|
|
|
|
def npgettext(self, context: str, singular: str, plural: str, n: int) -> str:
|
|
|
|
...
|
|
|
|
|
|
|
|
_SupportedTranslations = t.Union[_TranslationsBasic, _TranslationsContext]
|
|
|
|
|
|
|
|
|
|
|
|
# I18N functions available in Jinja templates. If the I18N library
|
|
|
|
# provides ugettext, it will be assigned to gettext.
|
|
|
|
GETTEXT_FUNCTIONS: t.Tuple[str, ...] = (
|
|
|
|
"_",
|
|
|
|
"gettext",
|
|
|
|
"ngettext",
|
|
|
|
"pgettext",
|
|
|
|
"npgettext",
|
|
|
|
)
|
|
|
|
_ws_re = re.compile(r"\s*\n\s*")
|
|
|
|
|
|
|
|
|
|
|
|
class Extension:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Extensions can be used to add extra functionality to the Jinja template
|
|
|
|
system at the parser level. Custom extensions are bound to an environment
|
|
|
|
but may not store environment specific data on `self`. The reason for
|
|
|
|
this is that an extension can be bound to another environment (for
|
|
|
|
overlays) by creating a copy and reassigning the `environment` attribute.
|
|
|
|
|
|
|
|
As extensions are created by the environment they cannot accept any
|
|
|
|
arguments for configuration. One may want to work around that by using
|
|
|
|
a factory function, but that is not possible as extensions are identified
|
|
|
|
by their import name. The correct way to configure the extension is
|
|
|
|
storing the configuration values on the environment. Because this way the
|
|
|
|
environment ends up acting as central configuration storage the
|
|
|
|
attributes may clash which is why extensions have to ensure that the names
|
|
|
|
they choose for configuration are not too generic. ``prefix`` for example
|
|
|
|
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
|
|
|
|
name as includes the name of the extension (fragment cache).
|
|
|
|
"""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
identifier: t.ClassVar[str]
|
|
|
|
|
|
|
|
def __init_subclass__(cls) -> None:
|
|
|
|
cls.identifier = f"{cls.__module__}.{cls.__name__}"
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
#: if this extension parses this is the list of tags it's listening to.
|
2022-01-24 04:07:52 +00:00
|
|
|
tags: t.Set[str] = set()
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
#: the priority of that extension. This is especially useful for
|
|
|
|
#: extensions that preprocess values. A lower value means higher
|
|
|
|
#: priority.
|
|
|
|
#:
|
|
|
|
#: .. versionadded:: 2.4
|
|
|
|
priority = 100
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(self, environment: Environment) -> None:
|
2019-12-02 21:46:54 +00:00
|
|
|
self.environment = environment
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def bind(self, environment: Environment) -> "Extension":
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Create a copy of this extension bound to another environment."""
|
2022-11-07 18:06:49 +00:00
|
|
|
rv = object.__new__(self.__class__)
|
2019-12-02 21:46:54 +00:00
|
|
|
rv.__dict__.update(self.__dict__)
|
|
|
|
rv.environment = environment
|
|
|
|
return rv
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def preprocess(
|
|
|
|
self, source: str, name: t.Optional[str], filename: t.Optional[str] = None
|
|
|
|
) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""This method is called before the actual lexing and can be used to
|
|
|
|
preprocess the source. The `filename` is optional. The return value
|
|
|
|
must be the preprocessed source.
|
|
|
|
"""
|
|
|
|
return source
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def filter_stream(
|
|
|
|
self, stream: "TokenStream"
|
|
|
|
) -> t.Union["TokenStream", t.Iterable["Token"]]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
|
|
|
|
to filter tokens returned. This method has to return an iterable of
|
|
|
|
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
|
|
|
|
:class:`~jinja2.lexer.TokenStream`.
|
|
|
|
"""
|
|
|
|
return stream
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""If any of the :attr:`tags` matched this method is called with the
|
|
|
|
parser as first argument. The token the parser stream is pointing at
|
|
|
|
is the name token that matched. This method has to return one or a
|
|
|
|
list of multiple nodes.
|
|
|
|
"""
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def attr(
|
|
|
|
self, name: str, lineno: t.Optional[int] = None
|
|
|
|
) -> nodes.ExtensionAttribute:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Return an attribute node for the current extension. This is useful
|
|
|
|
to pass constants on extensions to generated template code.
|
|
|
|
|
|
|
|
::
|
|
|
|
|
|
|
|
self.attr('_my_attribute', lineno=lineno)
|
|
|
|
"""
|
|
|
|
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def call_method(
|
|
|
|
self,
|
|
|
|
name: str,
|
|
|
|
args: t.Optional[t.List[nodes.Expr]] = None,
|
|
|
|
kwargs: t.Optional[t.List[nodes.Keyword]] = None,
|
|
|
|
dyn_args: t.Optional[nodes.Expr] = None,
|
|
|
|
dyn_kwargs: t.Optional[nodes.Expr] = None,
|
|
|
|
lineno: t.Optional[int] = None,
|
|
|
|
) -> nodes.Call:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Call a method of the extension. This is a shortcut for
|
|
|
|
:meth:`attr` + :class:`jinja2.nodes.Call`.
|
|
|
|
"""
|
|
|
|
if args is None:
|
|
|
|
args = []
|
|
|
|
if kwargs is None:
|
|
|
|
kwargs = []
|
2022-01-24 04:07:52 +00:00
|
|
|
return nodes.Call(
|
|
|
|
self.attr(name, lineno=lineno),
|
|
|
|
args,
|
|
|
|
kwargs,
|
|
|
|
dyn_args,
|
|
|
|
dyn_kwargs,
|
|
|
|
lineno=lineno,
|
|
|
|
)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
@pass_context
|
|
|
|
def _gettext_alias(
|
|
|
|
__context: Context, *args: t.Any, **kwargs: t.Any
|
|
|
|
) -> t.Union[t.Any, Undefined]:
|
|
|
|
return __context.call(__context.resolve("gettext"), *args, **kwargs)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _make_new_gettext(func: t.Callable[[str], str]) -> t.Callable[..., str]:
|
|
|
|
@pass_context
|
|
|
|
def gettext(__context: Context, __string: str, **variables: t.Any) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
rv = __context.call(func, __string)
|
|
|
|
if __context.eval_ctx.autoescape:
|
|
|
|
rv = Markup(rv)
|
2022-01-24 04:07:52 +00:00
|
|
|
# Always treat as a format string, even if there are no
|
|
|
|
# variables. This makes translation strings more consistent
|
|
|
|
# and predictable. This requires escaping
|
|
|
|
return rv % variables # type: ignore
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
return gettext
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _make_new_ngettext(func: t.Callable[[str, str, int], str]) -> t.Callable[..., str]:
|
|
|
|
@pass_context
|
|
|
|
def ngettext(
|
|
|
|
__context: Context,
|
|
|
|
__singular: str,
|
|
|
|
__plural: str,
|
|
|
|
__num: int,
|
|
|
|
**variables: t.Any,
|
|
|
|
) -> str:
|
|
|
|
variables.setdefault("num", __num)
|
2019-12-02 21:46:54 +00:00
|
|
|
rv = __context.call(func, __singular, __plural, __num)
|
|
|
|
if __context.eval_ctx.autoescape:
|
|
|
|
rv = Markup(rv)
|
2022-01-24 04:07:52 +00:00
|
|
|
# Always treat as a format string, see gettext comment above.
|
|
|
|
return rv % variables # type: ignore
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
return ngettext
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _make_new_pgettext(func: t.Callable[[str, str], str]) -> t.Callable[..., str]:
|
|
|
|
@pass_context
|
|
|
|
def pgettext(
|
|
|
|
__context: Context, __string_ctx: str, __string: str, **variables: t.Any
|
|
|
|
) -> str:
|
|
|
|
variables.setdefault("context", __string_ctx)
|
|
|
|
rv = __context.call(func, __string_ctx, __string)
|
|
|
|
|
|
|
|
if __context.eval_ctx.autoescape:
|
|
|
|
rv = Markup(rv)
|
|
|
|
|
|
|
|
# Always treat as a format string, see gettext comment above.
|
|
|
|
return rv % variables # type: ignore
|
|
|
|
|
|
|
|
return pgettext
|
|
|
|
|
|
|
|
|
|
|
|
def _make_new_npgettext(
|
|
|
|
func: t.Callable[[str, str, str, int], str]
|
|
|
|
) -> t.Callable[..., str]:
|
|
|
|
@pass_context
|
|
|
|
def npgettext(
|
|
|
|
__context: Context,
|
|
|
|
__string_ctx: str,
|
|
|
|
__singular: str,
|
|
|
|
__plural: str,
|
|
|
|
__num: int,
|
|
|
|
**variables: t.Any,
|
|
|
|
) -> str:
|
|
|
|
variables.setdefault("context", __string_ctx)
|
|
|
|
variables.setdefault("num", __num)
|
|
|
|
rv = __context.call(func, __string_ctx, __singular, __plural, __num)
|
|
|
|
|
|
|
|
if __context.eval_ctx.autoescape:
|
|
|
|
rv = Markup(rv)
|
|
|
|
|
|
|
|
# Always treat as a format string, see gettext comment above.
|
|
|
|
return rv % variables # type: ignore
|
|
|
|
|
|
|
|
return npgettext
|
|
|
|
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
class InternationalizationExtension(Extension):
|
2022-01-24 04:07:52 +00:00
|
|
|
"""This extension adds gettext support to Jinja."""
|
|
|
|
|
|
|
|
tags = {"trans"}
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# TODO: the i18n extension is currently reevaluating values in a few
|
|
|
|
# situations. Take this example:
|
|
|
|
# {% trans count=something() %}{{ count }} foo{% pluralize
|
|
|
|
# %}{{ count }} fooss{% endtrans %}
|
|
|
|
# something is called twice here. One time for the gettext value and
|
|
|
|
# the other time for the n-parameter of the ngettext function.
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(self, environment: Environment) -> None:
|
|
|
|
super().__init__(environment)
|
|
|
|
environment.globals["_"] = _gettext_alias
|
2019-12-02 21:46:54 +00:00
|
|
|
environment.extend(
|
|
|
|
install_gettext_translations=self._install,
|
|
|
|
install_null_translations=self._install_null,
|
|
|
|
install_gettext_callables=self._install_callables,
|
|
|
|
uninstall_gettext_translations=self._uninstall,
|
|
|
|
extract_translations=self._extract,
|
2022-01-24 04:07:52 +00:00
|
|
|
newstyle_gettext=False,
|
2019-12-02 21:46:54 +00:00
|
|
|
)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _install(
|
|
|
|
self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
|
|
|
|
) -> None:
|
|
|
|
# ugettext and ungettext are preferred in case the I18N library
|
|
|
|
# is providing compatibility with older Python versions.
|
|
|
|
gettext = getattr(translations, "ugettext", None)
|
2019-12-02 21:46:54 +00:00
|
|
|
if gettext is None:
|
|
|
|
gettext = translations.gettext
|
2022-01-24 04:07:52 +00:00
|
|
|
ngettext = getattr(translations, "ungettext", None)
|
2019-12-02 21:46:54 +00:00
|
|
|
if ngettext is None:
|
|
|
|
ngettext = translations.ngettext
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
pgettext = getattr(translations, "pgettext", None)
|
|
|
|
npgettext = getattr(translations, "npgettext", None)
|
2019-12-02 21:46:54 +00:00
|
|
|
self._install_callables(
|
2022-01-24 04:07:52 +00:00
|
|
|
gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
|
2019-12-02 21:46:54 +00:00
|
|
|
)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
|
|
|
|
import gettext
|
|
|
|
|
|
|
|
translations = gettext.NullTranslations()
|
|
|
|
|
|
|
|
if hasattr(translations, "pgettext"):
|
|
|
|
# Python < 3.8
|
|
|
|
pgettext = translations.pgettext # type: ignore
|
|
|
|
else:
|
|
|
|
|
|
|
|
def pgettext(c: str, s: str) -> str:
|
|
|
|
return s
|
|
|
|
|
|
|
|
if hasattr(translations, "npgettext"):
|
|
|
|
npgettext = translations.npgettext # type: ignore
|
|
|
|
else:
|
|
|
|
|
|
|
|
def npgettext(c: str, s: str, p: str, n: int) -> str:
|
|
|
|
return s if n == 1 else p
|
|
|
|
|
|
|
|
self._install_callables(
|
|
|
|
gettext=translations.gettext,
|
|
|
|
ngettext=translations.ngettext,
|
|
|
|
newstyle=newstyle,
|
|
|
|
pgettext=pgettext,
|
|
|
|
npgettext=npgettext,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _install_callables(
|
|
|
|
self,
|
|
|
|
gettext: t.Callable[[str], str],
|
|
|
|
ngettext: t.Callable[[str, str, int], str],
|
|
|
|
newstyle: t.Optional[bool] = None,
|
|
|
|
pgettext: t.Optional[t.Callable[[str, str], str]] = None,
|
|
|
|
npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
|
|
|
|
) -> None:
|
2019-12-02 21:46:54 +00:00
|
|
|
if newstyle is not None:
|
2022-01-24 04:07:52 +00:00
|
|
|
self.environment.newstyle_gettext = newstyle # type: ignore
|
|
|
|
if self.environment.newstyle_gettext: # type: ignore
|
2019-12-02 21:46:54 +00:00
|
|
|
gettext = _make_new_gettext(gettext)
|
|
|
|
ngettext = _make_new_ngettext(ngettext)
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
if pgettext is not None:
|
|
|
|
pgettext = _make_new_pgettext(pgettext)
|
|
|
|
|
|
|
|
if npgettext is not None:
|
|
|
|
npgettext = _make_new_npgettext(npgettext)
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
self.environment.globals.update(
|
2022-01-24 04:07:52 +00:00
|
|
|
gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
|
2019-12-02 21:46:54 +00:00
|
|
|
)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _uninstall(self, translations: "_SupportedTranslations") -> None:
|
|
|
|
for key in ("gettext", "ngettext", "pgettext", "npgettext"):
|
2019-12-02 21:46:54 +00:00
|
|
|
self.environment.globals.pop(key, None)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _extract(
|
|
|
|
self,
|
|
|
|
source: t.Union[str, nodes.Template],
|
|
|
|
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
|
|
|
|
) -> t.Iterator[
|
|
|
|
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
|
|
|
|
]:
|
|
|
|
if isinstance(source, str):
|
2019-12-02 21:46:54 +00:00
|
|
|
source = self.environment.parse(source)
|
|
|
|
return extract_from_ast(source, gettext_functions)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse a translatable tag."""
|
|
|
|
lineno = next(parser.stream).lineno
|
2022-11-07 18:06:49 +00:00
|
|
|
|
|
|
|
context = None
|
|
|
|
context_token = parser.stream.next_if("string")
|
|
|
|
|
|
|
|
if context_token is not None:
|
|
|
|
context = context_token.value
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# find all the variables referenced. Additionally a variable can be
|
|
|
|
# defined in the body of the trans block too, but this is checked at
|
|
|
|
# a later state.
|
2022-01-24 04:07:52 +00:00
|
|
|
plural_expr: t.Optional[nodes.Expr] = None
|
|
|
|
plural_expr_assignment: t.Optional[nodes.Assign] = None
|
2022-11-07 18:06:49 +00:00
|
|
|
num_called_num = False
|
2022-01-24 04:07:52 +00:00
|
|
|
variables: t.Dict[str, nodes.Expr] = {}
|
2019-12-02 21:46:54 +00:00
|
|
|
trimmed = None
|
2022-01-24 04:07:52 +00:00
|
|
|
while parser.stream.current.type != "block_end":
|
2019-12-02 21:46:54 +00:00
|
|
|
if variables:
|
2022-01-24 04:07:52 +00:00
|
|
|
parser.stream.expect("comma")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# skip colon for python compatibility
|
2022-01-24 04:07:52 +00:00
|
|
|
if parser.stream.skip_if("colon"):
|
2019-12-02 21:46:54 +00:00
|
|
|
break
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
token = parser.stream.expect("name")
|
|
|
|
if token.value in variables:
|
|
|
|
parser.fail(
|
|
|
|
f"translatable variable {token.value!r} defined twice.",
|
|
|
|
token.lineno,
|
|
|
|
exc=TemplateAssertionError,
|
|
|
|
)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# expressions
|
2022-01-24 04:07:52 +00:00
|
|
|
if parser.stream.current.type == "assign":
|
2019-12-02 21:46:54 +00:00
|
|
|
next(parser.stream)
|
2022-01-24 04:07:52 +00:00
|
|
|
variables[token.value] = var = parser.parse_expression()
|
|
|
|
elif trimmed is None and token.value in ("trimmed", "notrimmed"):
|
|
|
|
trimmed = token.value == "trimmed"
|
2019-12-02 21:46:54 +00:00
|
|
|
continue
|
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
variables[token.value] = var = nodes.Name(token.value, "load")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if plural_expr is None:
|
|
|
|
if isinstance(var, nodes.Call):
|
2022-01-24 04:07:52 +00:00
|
|
|
plural_expr = nodes.Name("_trans", "load")
|
|
|
|
variables[token.value] = plural_expr
|
2019-12-02 21:46:54 +00:00
|
|
|
plural_expr_assignment = nodes.Assign(
|
2022-01-24 04:07:52 +00:00
|
|
|
nodes.Name("_trans", "store"), var
|
|
|
|
)
|
2019-12-02 21:46:54 +00:00
|
|
|
else:
|
|
|
|
plural_expr = var
|
2022-01-24 04:07:52 +00:00
|
|
|
num_called_num = token.value == "num"
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
parser.stream.expect("block_end")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
plural = None
|
|
|
|
have_plural = False
|
|
|
|
referenced = set()
|
|
|
|
|
|
|
|
# now parse until endtrans or pluralize
|
|
|
|
singular_names, singular = self._parse_block(parser, True)
|
|
|
|
if singular_names:
|
|
|
|
referenced.update(singular_names)
|
|
|
|
if plural_expr is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
plural_expr = nodes.Name(singular_names[0], "load")
|
|
|
|
num_called_num = singular_names[0] == "num"
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# if we have a pluralize block, we parse that too
|
2022-01-24 04:07:52 +00:00
|
|
|
if parser.stream.current.test("name:pluralize"):
|
2019-12-02 21:46:54 +00:00
|
|
|
have_plural = True
|
|
|
|
next(parser.stream)
|
2022-01-24 04:07:52 +00:00
|
|
|
if parser.stream.current.type != "block_end":
|
|
|
|
token = parser.stream.expect("name")
|
|
|
|
if token.value not in variables:
|
|
|
|
parser.fail(
|
|
|
|
f"unknown variable {token.value!r} for pluralization",
|
|
|
|
token.lineno,
|
|
|
|
exc=TemplateAssertionError,
|
|
|
|
)
|
|
|
|
plural_expr = variables[token.value]
|
|
|
|
num_called_num = token.value == "num"
|
|
|
|
parser.stream.expect("block_end")
|
2019-12-02 21:46:54 +00:00
|
|
|
plural_names, plural = self._parse_block(parser, False)
|
|
|
|
next(parser.stream)
|
|
|
|
referenced.update(plural_names)
|
|
|
|
else:
|
|
|
|
next(parser.stream)
|
|
|
|
|
|
|
|
# register free names as simple name expressions
|
2022-01-24 04:07:52 +00:00
|
|
|
for name in referenced:
|
|
|
|
if name not in variables:
|
|
|
|
variables[name] = nodes.Name(name, "load")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if not have_plural:
|
|
|
|
plural_expr = None
|
|
|
|
elif plural_expr is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
parser.fail("pluralize without variables", lineno)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if trimmed is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
trimmed = self.environment.policies["ext.i18n.trimmed"]
|
2019-12-02 21:46:54 +00:00
|
|
|
if trimmed:
|
|
|
|
singular = self._trim_whitespace(singular)
|
|
|
|
if plural:
|
|
|
|
plural = self._trim_whitespace(plural)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
node = self._make_node(
|
|
|
|
singular,
|
|
|
|
plural,
|
2022-11-07 18:06:49 +00:00
|
|
|
context,
|
2022-01-24 04:07:52 +00:00
|
|
|
variables,
|
|
|
|
plural_expr,
|
|
|
|
bool(referenced),
|
|
|
|
num_called_num and have_plural,
|
|
|
|
)
|
2019-12-02 21:46:54 +00:00
|
|
|
node.set_lineno(lineno)
|
|
|
|
if plural_expr_assignment is not None:
|
|
|
|
return [plural_expr_assignment, node]
|
|
|
|
else:
|
|
|
|
return node
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
|
|
|
|
return _ws_re.sub(" ", string.strip())
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _parse_block(
|
|
|
|
self, parser: "Parser", allow_pluralize: bool
|
|
|
|
) -> t.Tuple[t.List[str], str]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse until the next block tag with a given name."""
|
|
|
|
referenced = []
|
|
|
|
buf = []
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
while True:
|
|
|
|
if parser.stream.current.type == "data":
|
|
|
|
buf.append(parser.stream.current.value.replace("%", "%%"))
|
2019-12-02 21:46:54 +00:00
|
|
|
next(parser.stream)
|
2022-01-24 04:07:52 +00:00
|
|
|
elif parser.stream.current.type == "variable_begin":
|
2019-12-02 21:46:54 +00:00
|
|
|
next(parser.stream)
|
2022-01-24 04:07:52 +00:00
|
|
|
name = parser.stream.expect("name").value
|
2019-12-02 21:46:54 +00:00
|
|
|
referenced.append(name)
|
2022-01-24 04:07:52 +00:00
|
|
|
buf.append(f"%({name})s")
|
|
|
|
parser.stream.expect("variable_end")
|
|
|
|
elif parser.stream.current.type == "block_begin":
|
2019-12-02 21:46:54 +00:00
|
|
|
next(parser.stream)
|
2022-01-24 04:07:52 +00:00
|
|
|
if parser.stream.current.test("name:endtrans"):
|
2019-12-02 21:46:54 +00:00
|
|
|
break
|
2022-01-24 04:07:52 +00:00
|
|
|
elif parser.stream.current.test("name:pluralize"):
|
2019-12-02 21:46:54 +00:00
|
|
|
if allow_pluralize:
|
|
|
|
break
|
2022-01-24 04:07:52 +00:00
|
|
|
parser.fail(
|
|
|
|
"a translatable section can have only one pluralize section"
|
|
|
|
)
|
|
|
|
parser.fail(
|
|
|
|
"control structures in translatable sections are not allowed"
|
|
|
|
)
|
2019-12-02 21:46:54 +00:00
|
|
|
elif parser.stream.eos:
|
2022-01-24 04:07:52 +00:00
|
|
|
parser.fail("unclosed translation block")
|
2019-12-02 21:46:54 +00:00
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
raise RuntimeError("internal parser error")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
return referenced, concat(buf)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def _make_node(
|
|
|
|
self,
|
|
|
|
singular: str,
|
|
|
|
plural: t.Optional[str],
|
2022-11-07 18:06:49 +00:00
|
|
|
context: t.Optional[str],
|
2022-01-24 04:07:52 +00:00
|
|
|
variables: t.Dict[str, nodes.Expr],
|
|
|
|
plural_expr: t.Optional[nodes.Expr],
|
|
|
|
vars_referenced: bool,
|
|
|
|
num_called_num: bool,
|
|
|
|
) -> nodes.Output:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Generates a useful node from the data provided."""
|
2022-01-24 04:07:52 +00:00
|
|
|
newstyle = self.environment.newstyle_gettext # type: ignore
|
|
|
|
node: nodes.Expr
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
# no variables referenced? no need to escape for old style
|
|
|
|
# gettext invocations only if there are vars.
|
2022-01-24 04:07:52 +00:00
|
|
|
if not vars_referenced and not newstyle:
|
|
|
|
singular = singular.replace("%%", "%")
|
2019-12-02 21:46:54 +00:00
|
|
|
if plural:
|
2022-01-24 04:07:52 +00:00
|
|
|
plural = plural.replace("%%", "%")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-11-07 18:06:49 +00:00
|
|
|
func_name = "gettext"
|
|
|
|
func_args: t.List[nodes.Expr] = [nodes.Const(singular)]
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-11-07 18:06:49 +00:00
|
|
|
if context is not None:
|
|
|
|
func_args.insert(0, nodes.Const(context))
|
|
|
|
func_name = f"p{func_name}"
|
|
|
|
|
|
|
|
if plural_expr is not None:
|
|
|
|
func_name = f"n{func_name}"
|
|
|
|
func_args.extend((nodes.Const(plural), plural_expr))
|
|
|
|
|
|
|
|
node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# in case newstyle gettext is used, the method is powerful
|
|
|
|
# enough to handle the variable expansion and autoescape
|
|
|
|
# handling itself
|
2022-01-24 04:07:52 +00:00
|
|
|
if newstyle:
|
|
|
|
for key, value in variables.items():
|
2019-12-02 21:46:54 +00:00
|
|
|
# the function adds that later anyways in case num was
|
|
|
|
# called num, so just skip it.
|
2022-01-24 04:07:52 +00:00
|
|
|
if num_called_num and key == "num":
|
2019-12-02 21:46:54 +00:00
|
|
|
continue
|
|
|
|
node.kwargs.append(nodes.Keyword(key, value))
|
|
|
|
|
|
|
|
# otherwise do that here
|
|
|
|
else:
|
|
|
|
# mark the return value as safe if we are in an
|
|
|
|
# environment with autoescaping turned on
|
|
|
|
node = nodes.MarkSafeIfAutoescape(node)
|
|
|
|
if variables:
|
2022-01-24 04:07:52 +00:00
|
|
|
node = nodes.Mod(
|
|
|
|
node,
|
|
|
|
nodes.Dict(
|
|
|
|
[
|
|
|
|
nodes.Pair(nodes.Const(key), value)
|
|
|
|
for key, value in variables.items()
|
|
|
|
]
|
|
|
|
),
|
|
|
|
)
|
2019-12-02 21:46:54 +00:00
|
|
|
return nodes.Output([node])
|
|
|
|
|
|
|
|
|
|
|
|
class ExprStmtExtension(Extension):
|
2022-01-24 04:07:52 +00:00
|
|
|
"""Adds a `do` tag to Jinja that works like the print statement just
|
2019-12-02 21:46:54 +00:00
|
|
|
that it doesn't print the return value.
|
|
|
|
"""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
tags = {"do"}
|
|
|
|
|
|
|
|
def parse(self, parser: "Parser") -> nodes.ExprStmt:
|
2019-12-02 21:46:54 +00:00
|
|
|
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
|
|
|
|
node.node = parser.parse_tuple()
|
|
|
|
return node
|
|
|
|
|
|
|
|
|
|
|
|
class LoopControlExtension(Extension):
|
|
|
|
"""Adds break and continue to the template engine."""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
tags = {"break", "continue"}
|
|
|
|
|
|
|
|
def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
|
2019-12-02 21:46:54 +00:00
|
|
|
token = next(parser.stream)
|
2022-01-24 04:07:52 +00:00
|
|
|
if token.value == "break":
|
2019-12-02 21:46:54 +00:00
|
|
|
return nodes.Break(lineno=token.lineno)
|
|
|
|
return nodes.Continue(lineno=token.lineno)
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
class DebugExtension(Extension):
|
|
|
|
"""A ``{% debug %}`` tag that dumps the available variables,
|
|
|
|
filters, and tests.
|
|
|
|
|
|
|
|
.. code-block:: html+jinja
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
<pre>{% debug %}</pre>
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
.. code-block:: text
|
|
|
|
|
|
|
|
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
|
|
|
|
...,
|
|
|
|
'namespace': <class 'jinja2.utils.Namespace'>},
|
|
|
|
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
|
|
|
|
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
|
|
|
|
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
|
|
|
|
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
|
|
|
|
|
|
|
|
.. versionadded:: 2.11.0
|
|
|
|
"""
|
|
|
|
|
|
|
|
tags = {"debug"}
|
|
|
|
|
|
|
|
def parse(self, parser: "Parser") -> nodes.Output:
|
|
|
|
lineno = parser.stream.expect("name:debug").lineno
|
|
|
|
context = nodes.ContextReference()
|
|
|
|
result = self.call_method("_render", [context], lineno=lineno)
|
|
|
|
return nodes.Output([result], lineno=lineno)
|
|
|
|
|
|
|
|
def _render(self, context: Context) -> str:
|
|
|
|
result = {
|
|
|
|
"context": context.get_all(),
|
|
|
|
"filters": sorted(self.environment.filters.keys()),
|
|
|
|
"tests": sorted(self.environment.tests.keys()),
|
|
|
|
}
|
|
|
|
|
|
|
|
# Set the depth since the intent is to show the top few names.
|
|
|
|
return pprint.pformat(result, depth=3, compact=True)
|
|
|
|
|
|
|
|
|
|
|
|
def extract_from_ast(
|
|
|
|
ast: nodes.Template,
|
|
|
|
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
|
|
|
|
babel_style: bool = True,
|
|
|
|
) -> t.Iterator[
|
|
|
|
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
|
|
|
|
]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Extract localizable strings from the given template node. Per
|
|
|
|
default this function returns matches in babel style that means non string
|
|
|
|
parameters as well as keyword arguments are returned as `None`. This
|
|
|
|
allows Babel to figure out what you really meant if you are using
|
|
|
|
gettext functions that allow keyword arguments for placeholder expansion.
|
|
|
|
If you don't want that behavior set the `babel_style` parameter to `False`
|
|
|
|
which causes only strings to be returned and parameters are always stored
|
|
|
|
in tuples. As a consequence invalid gettext calls (calls without a single
|
|
|
|
string parameter or string parameters after non-string parameters) are
|
|
|
|
skipped.
|
|
|
|
|
|
|
|
This example explains the behavior:
|
|
|
|
|
|
|
|
>>> from jinja2 import Environment
|
|
|
|
>>> env = Environment()
|
|
|
|
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
|
|
|
|
>>> list(extract_from_ast(node))
|
|
|
|
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
|
|
|
|
>>> list(extract_from_ast(node, babel_style=False))
|
|
|
|
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
|
|
|
|
|
|
|
|
For every string found this function yields a ``(lineno, function,
|
|
|
|
message)`` tuple, where:
|
|
|
|
|
|
|
|
* ``lineno`` is the number of the line on which the string was found,
|
|
|
|
* ``function`` is the name of the ``gettext`` function used (if the
|
|
|
|
string was extracted from embedded Python code), and
|
2022-01-24 04:07:52 +00:00
|
|
|
* ``message`` is the string, or a tuple of strings for functions
|
|
|
|
with multiple string arguments.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
This extraction function operates on the AST and is because of that unable
|
|
|
|
to extract any comments. For comment support you have to use the babel
|
|
|
|
extraction interface or extract comments yourself.
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
out: t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]
|
|
|
|
|
|
|
|
for node in ast.find_all(nodes.Call):
|
|
|
|
if (
|
|
|
|
not isinstance(node.node, nodes.Name)
|
|
|
|
or node.node.name not in gettext_functions
|
|
|
|
):
|
2019-12-02 21:46:54 +00:00
|
|
|
continue
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
strings: t.List[t.Optional[str]] = []
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
for arg in node.args:
|
2022-01-24 04:07:52 +00:00
|
|
|
if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
|
2019-12-02 21:46:54 +00:00
|
|
|
strings.append(arg.value)
|
|
|
|
else:
|
|
|
|
strings.append(None)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
for _ in node.kwargs:
|
2019-12-02 21:46:54 +00:00
|
|
|
strings.append(None)
|
|
|
|
if node.dyn_args is not None:
|
|
|
|
strings.append(None)
|
|
|
|
if node.dyn_kwargs is not None:
|
|
|
|
strings.append(None)
|
|
|
|
|
|
|
|
if not babel_style:
|
2022-01-24 04:07:52 +00:00
|
|
|
out = tuple(x for x in strings if x is not None)
|
|
|
|
|
|
|
|
if not out:
|
2019-12-02 21:46:54 +00:00
|
|
|
continue
|
|
|
|
else:
|
|
|
|
if len(strings) == 1:
|
2022-01-24 04:07:52 +00:00
|
|
|
out = strings[0]
|
2019-12-02 21:46:54 +00:00
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
out = tuple(strings)
|
|
|
|
|
|
|
|
yield node.lineno, node.node.name, out
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
class _CommentFinder:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Helper class to find comments in a token stream. Can only
|
|
|
|
find comments for gettext calls forwards. Once the comment
|
|
|
|
from line 4 is found, a comment for line 1 will not return a
|
|
|
|
usable value.
|
|
|
|
"""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def __init__(
|
|
|
|
self, tokens: t.Sequence[t.Tuple[int, str, str]], comment_tags: t.Sequence[str]
|
|
|
|
) -> None:
|
2019-12-02 21:46:54 +00:00
|
|
|
self.tokens = tokens
|
|
|
|
self.comment_tags = comment_tags
|
|
|
|
self.offset = 0
|
|
|
|
self.last_lineno = 0
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def find_backwards(self, offset: int) -> t.List[str]:
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
2022-01-24 04:07:52 +00:00
|
|
|
for _, token_type, token_value in reversed(
|
|
|
|
self.tokens[self.offset : offset]
|
|
|
|
):
|
|
|
|
if token_type in ("comment", "linecomment"):
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
|
|
|
prefix, comment = token_value.split(None, 1)
|
|
|
|
except ValueError:
|
|
|
|
continue
|
|
|
|
if prefix in self.comment_tags:
|
|
|
|
return [comment.rstrip()]
|
|
|
|
return []
|
|
|
|
finally:
|
|
|
|
self.offset = offset
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def find_comments(self, lineno: int) -> t.List[str]:
|
2019-12-02 21:46:54 +00:00
|
|
|
if not self.comment_tags or self.last_lineno > lineno:
|
|
|
|
return []
|
2022-01-24 04:07:52 +00:00
|
|
|
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
|
2019-12-02 21:46:54 +00:00
|
|
|
if token_lineno > lineno:
|
|
|
|
return self.find_backwards(self.offset + idx)
|
|
|
|
return self.find_backwards(len(self.tokens))
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def babel_extract(
|
|
|
|
fileobj: t.BinaryIO,
|
|
|
|
keywords: t.Sequence[str],
|
|
|
|
comment_tags: t.Sequence[str],
|
|
|
|
options: t.Dict[str, t.Any],
|
|
|
|
) -> t.Iterator[
|
|
|
|
t.Tuple[
|
|
|
|
int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]], t.List[str]
|
|
|
|
]
|
|
|
|
]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Babel extraction method for Jinja templates.
|
|
|
|
|
|
|
|
.. versionchanged:: 2.3
|
|
|
|
Basic support for translation comments was added. If `comment_tags`
|
|
|
|
is now set to a list of keywords for extraction, the extractor will
|
2022-01-24 04:07:52 +00:00
|
|
|
try to find the best preceding comment that begins with one of the
|
2019-12-02 21:46:54 +00:00
|
|
|
keywords. For best results, make sure to not have more than one
|
|
|
|
gettext call in one line of code and the matching comment in the
|
|
|
|
same line or the line before.
|
|
|
|
|
|
|
|
.. versionchanged:: 2.5.1
|
|
|
|
The `newstyle_gettext` flag can be set to `True` to enable newstyle
|
|
|
|
gettext calls.
|
|
|
|
|
|
|
|
.. versionchanged:: 2.7
|
|
|
|
A `silent` option can now be provided. If set to `False` template
|
|
|
|
syntax errors are propagated instead of being ignored.
|
|
|
|
|
|
|
|
:param fileobj: the file-like object the messages should be extracted from
|
|
|
|
:param keywords: a list of keywords (i.e. function names) that should be
|
|
|
|
recognized as translation functions
|
|
|
|
:param comment_tags: a list of translator tags to search for and include
|
|
|
|
in the results.
|
|
|
|
:param options: a dictionary of additional options (optional)
|
|
|
|
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
|
|
|
|
(comments will be empty currently)
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
extensions: t.Dict[t.Type[Extension], None] = {}
|
|
|
|
|
|
|
|
for extension_name in options.get("extensions", "").split(","):
|
|
|
|
extension_name = extension_name.strip()
|
|
|
|
|
|
|
|
if not extension_name:
|
2019-12-02 21:46:54 +00:00
|
|
|
continue
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
extensions[import_string(extension_name)] = None
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
if InternationalizationExtension not in extensions:
|
2022-01-24 04:07:52 +00:00
|
|
|
extensions[InternationalizationExtension] = None
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def getbool(options: t.Mapping[str, str], key: str, default: bool = False) -> bool:
|
|
|
|
return options.get(key, str(default)).lower() in {"1", "on", "yes", "true"}
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
silent = getbool(options, "silent", True)
|
2019-12-02 21:46:54 +00:00
|
|
|
environment = Environment(
|
2022-01-24 04:07:52 +00:00
|
|
|
options.get("block_start_string", defaults.BLOCK_START_STRING),
|
|
|
|
options.get("block_end_string", defaults.BLOCK_END_STRING),
|
|
|
|
options.get("variable_start_string", defaults.VARIABLE_START_STRING),
|
|
|
|
options.get("variable_end_string", defaults.VARIABLE_END_STRING),
|
|
|
|
options.get("comment_start_string", defaults.COMMENT_START_STRING),
|
|
|
|
options.get("comment_end_string", defaults.COMMENT_END_STRING),
|
|
|
|
options.get("line_statement_prefix") or defaults.LINE_STATEMENT_PREFIX,
|
|
|
|
options.get("line_comment_prefix") or defaults.LINE_COMMENT_PREFIX,
|
|
|
|
getbool(options, "trim_blocks", defaults.TRIM_BLOCKS),
|
|
|
|
getbool(options, "lstrip_blocks", defaults.LSTRIP_BLOCKS),
|
|
|
|
defaults.NEWLINE_SEQUENCE,
|
|
|
|
getbool(options, "keep_trailing_newline", defaults.KEEP_TRAILING_NEWLINE),
|
|
|
|
tuple(extensions),
|
2019-12-02 21:46:54 +00:00
|
|
|
cache_size=0,
|
2022-01-24 04:07:52 +00:00
|
|
|
auto_reload=False,
|
2019-12-02 21:46:54 +00:00
|
|
|
)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
if getbool(options, "trimmed"):
|
|
|
|
environment.policies["ext.i18n.trimmed"] = True
|
|
|
|
if getbool(options, "newstyle_gettext"):
|
|
|
|
environment.newstyle_gettext = True # type: ignore
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
source = fileobj.read().decode(options.get("encoding", "utf-8"))
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
|
|
|
node = environment.parse(source)
|
|
|
|
tokens = list(environment.lex(environment.preprocess(source)))
|
2022-01-24 04:07:52 +00:00
|
|
|
except TemplateSyntaxError:
|
2019-12-02 21:46:54 +00:00
|
|
|
if not silent:
|
|
|
|
raise
|
|
|
|
# skip templates with syntax errors
|
|
|
|
return
|
|
|
|
|
|
|
|
finder = _CommentFinder(tokens, comment_tags)
|
|
|
|
for lineno, func, message in extract_from_ast(node, keywords):
|
|
|
|
yield lineno, func, message, finder.find_comments(lineno)
|
|
|
|
|
|
|
|
|
|
|
|
#: nicer import names
|
|
|
|
i18n = InternationalizationExtension
|
|
|
|
do = ExprStmtExtension
|
|
|
|
loopcontrols = LoopControlExtension
|
2022-01-24 04:07:52 +00:00
|
|
|
debug = DebugExtension
|